repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
albertsuarez/covid19-bot
src/helper/env.py
get_twitter_access_token_key
python
def get_twitter_access_token_key(): return _get('TWITTER_ACCESS_TOKEN_KEY')
Retrieve the Twitter access token key as a environment variable. :return: Environment variable.
https://github.com/albertsuarez/covid19-bot/blob/73b8e7e069456d86820bfb5401dff66b67d2f98f/src/helper/env.py#L26-L31
import os def _get(env_key): if env_key in os.environ: return os.environ[env_key] return None def get_twitter_consumer_key(): return _get('TWITTER_CONSUMER_KEY') def get_twitter_consumer_secret(): return _get('TWITTER_CONSUMER_SECRET')
MIT License
microsoft/botbuilder-python
libraries/botbuilder-schema/botbuilder/schema/_models_py3.py
Activity.as_suggestion_activity
python
def as_suggestion_activity(self): return self if self.__is_activity(ActivityTypes.suggestion) else None
Returns this activity as a SuggestionActivity object; or None, if this is not that type of activity. :returns: This activity as a suggestion activity; or None.
https://github.com/microsoft/botbuilder-python/blob/41211de2d7854e27aca8e3d1eccb24352be7e915/libraries/botbuilder-schema/botbuilder/schema/_models_py3.py#L529-L536
from typing import List from botbuilder.schema._connector_client_enums import ActivityTypes from datetime import datetime from enum import Enum from msrest.serialization import Model from msrest.exceptions import HttpOperationError class ActivityEventNames(str, Enum): continue_conversation = "ContinueConversation" create_conversation = "CreateConversation" class ConversationReference(Model): _attribute_map = { "activity_id": {"key": "activityId", "type": "str"}, "user": {"key": "user", "type": "ChannelAccount"}, "bot": {"key": "bot", "type": "ChannelAccount"}, "conversation": {"key": "conversation", "type": "ConversationAccount"}, "channel_id": {"key": "channelId", "type": "str"}, "locale": {"key": "locale", "type": "str"}, "service_url": {"key": "serviceUrl", "type": "str"}, } def __init__( self, *, activity_id: str = None, user=None, bot=None, conversation=None, channel_id: str = None, locale: str = None, service_url: str = None, **kwargs ) -> None: super(ConversationReference, self).__init__(**kwargs) self.activity_id = activity_id self.user = user self.bot = bot self.conversation = conversation self.channel_id = channel_id self.locale = locale self.service_url = service_url class Mention(Model): _attribute_map = { "mentioned": {"key": "mentioned", "type": "ChannelAccount"}, "text": {"key": "text", "type": "str"}, "type": {"key": "type", "type": "str"}, } def __init__( self, *, mentioned=None, text: str = None, type: str = None, **kwargs ) -> None: super(Mention, self).__init__(**kwargs) self.mentioned = mentioned self.text = text self.type = type class ResourceResponse(Model): _attribute_map = {"id": {"key": "id", "type": "str"}} def __init__(self, *, id: str = None, **kwargs) -> None: super(ResourceResponse, self).__init__(**kwargs) self.id = id class Activity(Model): _attribute_map = { "type": {"key": "type", "type": "str"}, "id": {"key": "id", "type": "str"}, "timestamp": {"key": "timestamp", "type": "iso-8601"}, "local_timestamp": {"key": "localTimestamp", "type": "iso-8601"}, "local_timezone": {"key": "localTimezone", "type": "str"}, "service_url": {"key": "serviceUrl", "type": "str"}, "channel_id": {"key": "channelId", "type": "str"}, "from_property": {"key": "from", "type": "ChannelAccount"}, "conversation": {"key": "conversation", "type": "ConversationAccount"}, "recipient": {"key": "recipient", "type": "ChannelAccount"}, "text_format": {"key": "textFormat", "type": "str"}, "attachment_layout": {"key": "attachmentLayout", "type": "str"}, "members_added": {"key": "membersAdded", "type": "[ChannelAccount]"}, "members_removed": {"key": "membersRemoved", "type": "[ChannelAccount]"}, "reactions_added": {"key": "reactionsAdded", "type": "[MessageReaction]"}, "reactions_removed": {"key": "reactionsRemoved", "type": "[MessageReaction]"}, "topic_name": {"key": "topicName", "type": "str"}, "history_disclosed": {"key": "historyDisclosed", "type": "bool"}, "locale": {"key": "locale", "type": "str"}, "text": {"key": "text", "type": "str"}, "speak": {"key": "speak", "type": "str"}, "input_hint": {"key": "inputHint", "type": "str"}, "summary": {"key": "summary", "type": "str"}, "suggested_actions": {"key": "suggestedActions", "type": "SuggestedActions"}, "attachments": {"key": "attachments", "type": "[Attachment]"}, "entities": {"key": "entities", "type": "[Entity]"}, "channel_data": {"key": "channelData", "type": "object"}, "action": {"key": "action", "type": "str"}, "reply_to_id": {"key": "replyToId", "type": "str"}, "label": {"key": "label", "type": "str"}, "value_type": {"key": "valueType", "type": "str"}, "value": {"key": "value", "type": "object"}, "name": {"key": "name", "type": "str"}, "relates_to": {"key": "relatesTo", "type": "ConversationReference"}, "code": {"key": "code", "type": "str"}, "expiration": {"key": "expiration", "type": "iso-8601"}, "importance": {"key": "importance", "type": "str"}, "delivery_mode": {"key": "deliveryMode", "type": "str"}, "listen_for": {"key": "listenFor", "type": "[str]"}, "text_highlights": {"key": "textHighlights", "type": "[TextHighlight]"}, "semantic_action": {"key": "semanticAction", "type": "SemanticAction"}, "caller_id": {"key": "callerId", "type": "str"}, } def __init__( self, *, type=None, id: str = None, timestamp=None, local_timestamp=None, local_timezone: str = None, service_url: str = None, channel_id: str = None, from_property=None, conversation=None, recipient=None, text_format=None, attachment_layout=None, members_added=None, members_removed=None, reactions_added=None, reactions_removed=None, topic_name: str = None, history_disclosed: bool = None, locale: str = None, text: str = None, speak: str = None, input_hint=None, summary: str = None, suggested_actions=None, attachments=None, entities=None, channel_data=None, action: str = None, reply_to_id: str = None, label: str = None, value_type: str = None, value=None, name: str = None, relates_to=None, code=None, expiration=None, importance=None, delivery_mode=None, listen_for=None, text_highlights=None, semantic_action=None, caller_id: str = None, **kwargs ) -> None: super(Activity, self).__init__(**kwargs) self.type = type self.id = id self.timestamp = timestamp self.local_timestamp = local_timestamp self.local_timezone = local_timezone self.service_url = service_url self.channel_id = channel_id self.from_property = from_property self.conversation = conversation self.recipient = recipient self.text_format = text_format self.attachment_layout = attachment_layout self.members_added = members_added self.members_removed = members_removed self.reactions_added = reactions_added self.reactions_removed = reactions_removed self.topic_name = topic_name self.history_disclosed = history_disclosed self.locale = locale self.text = text self.speak = speak self.input_hint = input_hint self.summary = summary self.suggested_actions = suggested_actions self.attachments = attachments self.entities = entities self.channel_data = channel_data self.action = action self.reply_to_id = reply_to_id self.label = label self.value_type = value_type self.value = value self.name = name self.relates_to = relates_to self.code = code self.expiration = expiration self.importance = importance self.delivery_mode = delivery_mode self.listen_for = listen_for self.text_highlights = text_highlights self.semantic_action = semantic_action self.caller_id = caller_id def apply_conversation_reference( self, reference: ConversationReference, is_incoming: bool = False ): self.channel_id = reference.channel_id self.service_url = reference.service_url self.conversation = reference.conversation if reference.locale is not None: self.locale = reference.locale if is_incoming: self.from_property = reference.user self.recipient = reference.bot if reference.activity_id is not None: self.id = reference.activity_id else: self.from_property = reference.bot self.recipient = reference.user if reference.activity_id is not None: self.reply_to_id = reference.activity_id return self def as_contact_relation_update_activity(self): return ( self if self.__is_activity(ActivityTypes.contact_relation_update) else None ) def as_conversation_update_activity(self): return self if self.__is_activity(ActivityTypes.conversation_update) else None def as_end_of_conversation_activity(self): return self if self.__is_activity(ActivityTypes.end_of_conversation) else None def as_event_activity(self): return self if self.__is_activity(ActivityTypes.event) else None def as_handoff_activity(self): return self if self.__is_activity(ActivityTypes.handoff) else None def as_installation_update_activity(self): return self if self.__is_activity(ActivityTypes.installation_update) else None def as_invoke_activity(self): return self if self.__is_activity(ActivityTypes.invoke) else None def as_message_activity(self): return self if self.__is_activity(ActivityTypes.message) else None def as_message_delete_activity(self): return self if self.__is_activity(ActivityTypes.message_delete) else None def as_message_reaction_activity(self): return self if self.__is_activity(ActivityTypes.message_reaction) else None def as_message_update_activity(self): return self if self.__is_activity(ActivityTypes.message_update) else None
MIT License
capitalone/particle-cloud-framework
pcf/particle/aws/ec2/ec2_instance.py
EC2Instance._set_unique_keys
python
def _set_unique_keys(self): self.unique_keys = EC2Instance.UNIQUE_KEYS
Logic that sets keys from state definition that are used to uniquely identify the EC2 instance
https://github.com/capitalone/particle-cloud-framework/blob/a05713434572d9d528d724855097854c5a56d377/pcf/particle/aws/ec2/ec2_instance.py#L97-L102
from pcf.core.aws_resource import AWSResource from pcf.core import State from pcf.util import pcf_util from jinja2 import Template from pcf.core.pcf_exceptions import * import base64 import logging import pkg_resources from pcf.util.aws.tag_specifications import EC2InstanceTagSpecifications logger = logging.getLogger(__name__) class EC2Instance(AWSResource): flavor = "ec2_instance" state_lookup = { "running": State.running, "terminated": State.terminated, "stopped": State.stopped, "missing": State.terminated, "stopping": State.pending, "shutting-down": State.pending, "pending": State.pending } equivalent_states = {} START_PARAM_FILTER = { "BlockDeviceMappings", "ImageId", "InstanceType", "KeyName", "MaxCount", "MinCount", "UserData", "SecurityGroupIds", "SubnetId", "IamInstanceProfile", "InstanceInitiatedShutdownBehavior", "TagSpecifications" } STATE_PARAM_FILTER = { "ImageId", "InstanceType", "KeyName", "UserData", "SecurityGroupIds", "SubnetId", "IamInstanceProfile", "InstanceInitiatedShutdownBehavior", "TagSpecifications" } EBS_PARAM_CONVERSIONS = { "DeleteOnTermination": "", "SnapshotId": "", "VolumeSize": "Size", "VolumeType": "", "Iops": "" } UNIQUE_KEYS = ["aws_resource.custom_config.instance_name"] def __init__(self, particle_definition): super(EC2Instance, self).__init__(particle_definition, "ec2") self.instance_name = self.desired_state_definition.get("custom_config").get("instance_name") if not self.instance_name: tags = self.desired_state_definition.get("custom_config").get("tags") self.instance_name = tags.get("PCFName") if not self.instance_name: raise Exception("EC2Instance must have 'instance_name' defined") self._set_unique_keys()
Apache License 2.0
indigo-dc/udocker
udocker/helper/elfpatcher.py
ElfPatcher.get_patch_last_time
python
def get_patch_last_time(self): last_time = FileUtil(self._container_patch_time).getdata('r').strip() try: return str(int(last_time)) except ValueError: return "0"
get time in seconds of last full patch of container
https://github.com/indigo-dc/udocker/blob/87fb41cb5bcdb211d70f2b7f067c8e33d8959a1f/udocker/helper/elfpatcher.py#L154-L160
import re import os import sys import time from udocker import is_genstr from udocker.msg import Msg from udocker.config import Config from udocker.utils.uprocess import Uprocess from udocker.utils.fileutil import FileUtil from udocker.helper.hostinfo import HostInfo class ElfPatcher(object): BIN = 1 LIB = 2 LOADER = 4 ABORT_ON_ERROR = 8 ONE_SUCCESS = 16 ONE_OUTPUT = 32 def __init__(self, localrepo, container_id): self.localrepo = localrepo self._container_dir = os.path.realpath(self.localrepo.cd_container(container_id)) if not self._container_dir: raise ValueError("invalid container id") self._container_root = self._container_dir + "/ROOT" self._container_ld_so_path = self._container_dir + "/ld.so.path" self._container_ld_so_orig = self._container_dir + "/ld.so.orig" self._container_ld_libdirs = self._container_dir + "/ld.lib.dirs" self._container_patch_time = self._container_dir + "/patch.time" self._container_patch_path = self._container_dir + "/patch.path" self._shlib = re.compile(r"^lib\S+\.so(\.\d+)*$") self._uid = HostInfo.uid def select_patchelf(self): arch = HostInfo().arch() image_list = list() if arch == "amd64": image_list = ["patchelf-x86_64", "patchelf"] elif arch == "i386": image_list = ["patchelf-x86", "patchelf"] elif arch == "arm64": image_list = ["patchelf-arm64", "patchelf"] elif arch == "arm": image_list = ["patchelf-arm", "patchelf"] f_util = FileUtil(self.localrepo.bindir) patchelf_exec = f_util.find_file_in_dir(image_list) if not os.path.exists(patchelf_exec): Msg().err("Error: patchelf executable not found") sys.exit(1) return patchelf_exec def _replace(self, cmd, path): cmd_out = [] for arg in cmd: if "#f" in arg: arg = arg.replace("#f", path) cmd_out.append(arg) return cmd_out def _walk_fs(self, cmd, root_path, action=BIN): status = "" for dir_path, dummy, files in os.walk(root_path): for f_name in files: try: f_path = dir_path + '/' + f_name if os.path.islink(f_path): continue if os.stat(f_path).st_uid != self._uid: if action & self.ABORT_ON_ERROR: return "" continue if ((action & self.BIN and os.access(f_path, os.X_OK)) or (action & self.LIB and self._shlib.match(f_name))): out = Uprocess().get_output(self._replace(cmd, f_path)) if out: status = out if action & self.ABORT_ON_ERROR and status is None: return "" if action & self.ONE_SUCCESS and status is not None: return status if action & self.ONE_OUTPUT and status: return status except OSError: pass return status def guess_elf_loader(self): patchelf_exec = self.select_patchelf() cmd = [patchelf_exec, "-q", "--print-interpreter", "#f"] for d_name in ("/bin", "/usr/bin", "/lib64"): elf_loader = self._walk_fs(cmd, self._container_root + d_name, self.ONE_OUTPUT | self.BIN) if elf_loader and ".so" in elf_loader: return elf_loader return "" def get_original_loader(self): if os.path.exists(self._container_ld_so_path): return FileUtil(self._container_ld_so_path).getdata('r').strip() elf_loader = self.guess_elf_loader() if elf_loader: FileUtil(self._container_ld_so_path).putdata(elf_loader, 'w') return elf_loader def get_container_loader(self): elf_loader = self.get_original_loader() if not elf_loader: return "" elf_loader = self._container_root + "/" + elf_loader return elf_loader if os.path.exists(elf_loader) else "" def get_patch_last_path(self): last_path = FileUtil(self._container_patch_path).getdata('r') if last_path and is_genstr(last_path): return last_path.strip() return "" def check_container_path(self): last_path = self.get_patch_last_path() if last_path and last_path != self._container_dir: return False return True
Apache License 2.0
rebiocoder/bioforum
venv/Lib/site-packages/django/db/backends/base/operations.py
BaseDatabaseOperations.max_name_length
python
def max_name_length(self): return None
Return the maximum length of table and column names, or None if there is no limit.
https://github.com/rebiocoder/bioforum/blob/08c8ff2f07ae667d37ce343f537e878d78ac8fe2/venv/Lib/site-packages/django/db/backends/base/operations.py#L250-L255
import datetime import decimal from importlib import import_module from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.db import NotSupportedError, transaction from django.db.backends import utils from django.utils import timezone from django.utils.dateparse import parse_duration from django.utils.encoding import force_text class BaseDatabaseOperations: compiler_module = "django.db.models.sql.compiler" integer_field_ranges = { 'SmallIntegerField': (-32768, 32767), 'IntegerField': (-2147483648, 2147483647), 'BigIntegerField': (-9223372036854775808, 9223372036854775807), 'PositiveSmallIntegerField': (0, 32767), 'PositiveIntegerField': (0, 2147483647), } set_operators = { 'union': 'UNION', 'intersection': 'INTERSECT', 'difference': 'EXCEPT', } cast_data_types = {} cast_char_field_without_max_length = None PRECEDING = 'PRECEDING' FOLLOWING = 'FOLLOWING' UNBOUNDED_PRECEDING = 'UNBOUNDED ' + PRECEDING UNBOUNDED_FOLLOWING = 'UNBOUNDED ' + FOLLOWING CURRENT_ROW = 'CURRENT ROW' def __init__(self, connection): self.connection = connection self._cache = None def autoinc_sql(self, table, column): return None def bulk_batch_size(self, fields, objs): return len(objs) def cache_key_culling_sql(self): return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s" def unification_cast_sql(self, output_field): return '%s' def date_extract_sql(self, lookup_type, field_name): raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_extract_sql() method') def date_interval_sql(self, timedelta): raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_interval_sql() method') def date_trunc_sql(self, lookup_type, field_name): raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetrunc_sql() method') def datetime_cast_date_sql(self, field_name, tzname): raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_date() method') def datetime_cast_time_sql(self, field_name, tzname): raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_time_sql() method') def datetime_extract_sql(self, lookup_type, field_name, tzname): raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_extract_sql() method') def datetime_trunc_sql(self, lookup_type, field_name, tzname): raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_trunc_sql() method') def time_trunc_sql(self, lookup_type, field_name): raise NotImplementedError('subclasses of BaseDatabaseOperations may require a time_trunc_sql() method') def time_extract_sql(self, lookup_type, field_name): return self.date_extract_sql(lookup_type, field_name) def deferrable_sql(self): return '' def distinct_sql(self, fields): if fields: raise NotImplementedError('DISTINCT ON fields is not supported by this database backend') else: return 'DISTINCT' def fetch_returned_insert_id(self, cursor): return cursor.fetchone()[0] def field_cast_sql(self, db_type, internal_type): return '%s' def force_no_ordering(self): return [] def for_update_sql(self, nowait=False, skip_locked=False, of=()): return 'FOR UPDATE%s%s%s' % ( ' OF %s' % ', '.join(of) if of else '', ' NOWAIT' if nowait else '', ' SKIP LOCKED' if skip_locked else '', ) def last_executed_query(self, cursor, sql, params): def to_string(s): return force_text(s, strings_only=True, errors='replace') if isinstance(params, (list, tuple)): u_params = tuple(to_string(val) for val in params) elif params is None: u_params = () else: u_params = {to_string(k): to_string(v) for k, v in params.items()} return "QUERY = %r - PARAMS = %r" % (sql, u_params) def last_insert_id(self, cursor, table_name, pk_name): return cursor.lastrowid def lookup_cast(self, lookup_type, internal_type=None): return "%s" def max_in_list_size(self): return None
MIT License
cortex-lab/phy
phy/plot/gloo/framebuffer.py
FrameBuffer._create
python
def _create(self): log.debug("GPU: Create framebuffer") self._handle = gl.glGenFramebuffers(1)
Create framebuffer on GPU
https://github.com/cortex-lab/phy/blob/9a330b9437a3d0b40a37a201d147224e6e7fb462/phy/plot/gloo/framebuffer.py#L340-L344
import logging import numpy as np from . import gl from .globject import GLObject from .texture import Texture2D log = logging.getLogger(__name__) class RenderBuffer(GLObject): def __init__(self, width=0, height=0, format=None): GLObject.__init__(self) self._width = width self._height = height self._target = gl.GL_RENDERBUFFER self._format = format self._need_resize = True @property def width(self): return self._width @property def height(self): return self._height def resize(self, width, height): if width != self._width or height != self._height: self._need_resize = True self._width = width self._height = height def _create(self): log.debug("GPU: Create render buffer") self._handle = gl.glGenRenderbuffers(1) def _delete(self): log.debug("GPU: Deleting render buffer") gl.glDeleteRenderbuffer(self._handle) def _activate(self): log.debug("GPU: Activate render buffer") gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self._handle) if self._need_resize: self._resize() self._need_resize = False def _deactivate(self): log.debug("GPU: Deactivate render buffer") gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, 0) def _resize(self): log.debug("GPU: Resize render buffer") gl.glRenderbufferStorage(self._target, self._format, self._width, self._height) class ColorBuffer(RenderBuffer): def __init__(self, width, height, format=gl.GL_RGBA): RenderBuffer.__init__(self, width, height, format) class DepthBuffer(RenderBuffer): def __init__(self, width, height, format=gl.GL_DEPTH_COMPONENT): RenderBuffer.__init__(self, width, height, format) class StencilBuffer(RenderBuffer): def __init__(self, width, height, format=gl.GL_STENCIL_INDEX8): RenderBuffer.__init__(self, width, height, format) class FrameBuffer(GLObject): def __init__(self, color=None, depth=None, stencil=None): GLObject.__init__(self) self._width = 0 self._height = 0 self._color = None self._depth = None self._stencil = None self._need_attach = True self._pending_attachments = [] if color is not None: self.color = color if depth is not None: self.depth = depth if stencil is not None: self.stencil = stencil @property def color(self): return self._color @color.setter def color(self, buffers): if not isinstance(buffers, list): buffers = [buffers] self._color = [] for i, buffer in enumerate(buffers): if self.width != 0 and self.width != buffer.width: raise ValueError("Buffer width does not match") elif self.height != 0 and self.height != buffer.height: raise ValueError("Buffer height does not match") self._width = buffer.width self._height = buffer.height target = gl.GL_COLOR_ATTACHMENT0 + i self._color.append(buffer) if isinstance(buffer, (ColorBuffer, Texture2D)) or buffer is None: self._pending_attachments.append((target, buffer)) else: raise ValueError( "Buffer must be a ColorBuffer, Texture2D or None") self._need_attach = True @property def depth(self): return self._depth @depth.setter def depth(self, buffer): if self.width != 0 and self.width != buffer.width: raise ValueError("Buffer width does not match") elif self.height != 0 and self.height != buffer.height: raise ValueError("Buffer height does not match") self._width = buffer.width self._height = buffer.height target = gl.GL_DEPTH_ATTACHMENT self._depth = buffer if isinstance(buffer, (DepthBuffer, Texture2D)) or buffer is None: self._pending_attachments.append((target, buffer)) else: raise ValueError( "Buffer must be a DepthBuffer, Texture2D or None") self._need_attach = True @property def stencil(self): return self._stencil @stencil.setter def stencil(self, buffer): if self.width != 0 and self.width != buffer.width: raise ValueError("Buffer width does not match") elif self.height != 0 and self.height != buffer.height: raise ValueError("Buffer height does not match") self._width = buffer.width self._height = buffer.height target = gl.GL_STENCIL_ATTACHMENT self._stencil = buffer if isinstance(buffer, StencilBuffer) or buffer is None: self._pending_attachments.append((target, buffer)) else: raise ValueError( "Buffer must be a StencilBuffer, Texture2D or None") self._need_attach = True @property def width(self): return self._width @property def height(self): return self._height def resize(self, width, height): self._width = width self._height = height for i, buffer in enumerate(self.color): if isinstance(buffer, ColorBuffer): buffer.resize(width, height) elif isinstance(buffer, Texture2D): newbuffer = np.resize(buffer, (height, width, buffer.shape[2])) newbuffer = newbuffer.view(buffer.__class__) self.color[i] = newbuffer buffer.delete() target = gl.GL_COLOR_ATTACHMENT0 + i self._pending_attachments.append((target, self.color[i])) self._need_attach = True if isinstance(self.depth, DepthBuffer): self.depth.resize(width, height) elif isinstance(self.depth, Texture2D): depth = np.resize(self.depth, (height, width, self.depth.shape[2])) depth = depth.view(self.depth.__class__) self.depth.delete() self.depth = depth target = gl.GL_DEPTH_ATTACHMENT self._pending_attachments.append((target, self.depth)) self._need_attach = True if isinstance(self.stencil, StencilBuffer): self.stencil.resize(width, height) elif isinstance(self.stencil, Texture2D): stencil = np.resize( self.stencil, (height, width, self.stencil.shape[2])) stencil = stencil.view(self.stencil.__class__) self.stencil.delete() self.stencil = stencil target = gl.GL_STENCIL_ATTACHMENT self._pending_attachments.append((target, self.stencil)) self._need_attach = True
BSD 3-Clause New or Revised License
daxm/fmcapi
fmcapi/api_objects/policy_services/defaultactions.py
DefaultActions.format_data
python
def format_data(self, filter_query=""): json_data = super().format_data() logging.debug("In format_data() for DefaultActions class.") if "action" in self.__dict__: if self.action in self.VALID_ACTION: json_data["action"] = self.action else: logging.warning(f"action variable must be one of: {self.VALID_ACTION}.") return json_data
Gather all the data in preparation for sending to API in JSON format. :param filter_query: (str) 'all' or 'kwargs' :return: (dict) json_data
https://github.com/daxm/fmcapi/blob/fc4bad7ff733a6283e83970d7844c73e7e88a50c/fmcapi/api_objects/policy_services/defaultactions.py#L42-L56
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate from .accesspolicies import AccessPolicies import logging class DefaultActions(APIClassTemplate): VALID_JSON_DATA = [] VALID_FOR_KWARGS = VALID_JSON_DATA + [ "acp_id", "acp_name", "device_id", "device_name", "fetchZeroHitCount", "limit", "action", ] PREFIX_URL = "/policy/accesspolicies" REQUIRED_FOR_PUT = ["acp_id", "id", "action"] REQUIRED_FOR_GET = ["acp_id"] VALID_ACTION = ["BLOCK", "TRUST", "PERMIT", "NETWORK_DISCOVERY"] def __init__(self, fmc, **kwargs): logging.debug("In __init__() for DefaultActions class.") super().__init__(fmc, **kwargs) self.parse_kwargs(**kwargs) self.type = "AccessPolicyDefaultAction" self.URL = f"{self.URL}{self.URL_SUFFIX}"
BSD 3-Clause New or Revised License
preferredai/cornac
cornac/models/mcf/recom_mcf.py
MCF.score
python
def score(self, user_idx, item_idx=None): if item_idx is None: if self.train_set.is_unk_user(user_idx): raise ScoreException( "Can't make score prediction for (user_id=%d)" % user_idx ) known_item_scores = self.V.dot(self.U[user_idx, :]) return known_item_scores else: if self.train_set.is_unk_user(user_idx) or self.train_set.is_unk_item( item_idx ): raise ScoreException( "Can't make score prediction for (user_id=%d, item_id=%d)" % (user_idx, item_idx) ) user_pred = self.V[item_idx, :].dot(self.U[user_idx, :]) user_pred = sigmoid(user_pred) if self.train_set.min_rating == self.train_set.max_rating: user_pred = scale(user_pred, 0.0, self.train_set.max_rating, 0.0, 1.0) else: user_pred = scale( user_pred, self.train_set.min_rating, self.train_set.max_rating, 0.0, 1.0, ) return user_pred
Predict the scores/ratings of a user for an item. Parameters ---------- user_idx: int, required The index of the user for whom to perform score prediction. item_idx: int, optional, default: None The index of the item for which to perform score prediction. If None, scores for all known items will be returned. Returns ------- res : A scalar or a Numpy array Relative scores that the user gives to the item or to all known items
https://github.com/preferredai/cornac/blob/2de81e17b83794a3e7cbc4f47d6bd9061694b5d1/cornac/models/mcf/recom_mcf.py#L198-L247
import numpy as np from ..recommender import Recommender from ...utils.common import sigmoid from ...utils.common import scale from ...exception import ScoreException class MCF(Recommender): def __init__( self, k=5, max_iter=100, learning_rate=0.001, gamma=0.9, lamda=0.001, name="MCF", trainable=True, verbose=False, init_params=None, seed=None, ): Recommender.__init__(self, name=name, trainable=trainable, verbose=verbose) self.k = k self.max_iter = max_iter self.learning_rate = learning_rate self.gamma = gamma self.lamda = lamda self.seed = seed self.ll = np.full(max_iter, 0) self.eps = 0.000000001 self.init_params = {} if init_params is None else init_params self.U = self.init_params.get("U", None) self.V = self.init_params.get("V", None) self.Z = self.init_params.get("Z", None) def fit(self, train_set, val_set=None): Recommender.fit(self, train_set, val_set) if self.trainable: (rat_uid, rat_iid, rat_val) = train_set.uir_tuple map_iid = train_set.item_indices (net_iid, net_jid, net_val) = train_set.item_graph.get_train_triplet( map_iid, map_iid ) if [self.train_set.min_rating, self.train_set.max_rating] != [0, 1]: if self.train_set.min_rating == self.train_set.max_rating: rat_val = scale(rat_val, 0.0, 1.0, 0.0, self.train_set.max_rating) else: rat_val = scale( rat_val, 0.0, 1.0, self.train_set.min_rating, self.train_set.max_rating, ) if [min(net_val), max(net_val)] != [0, 1]: if min(net_val) == max(net_val): net_val = scale(net_val, 0.0, 1.0, 0.0, max(net_val)) else: net_val = scale(net_val, 0.0, 1.0, min(net_val), max(net_val)) rat_val = np.array(rat_val, dtype="float32") rat_uid = np.array(rat_uid, dtype="int32") rat_iid = np.array(rat_iid, dtype="int32") net_val = np.array(net_val, dtype="float32") net_iid = np.array(net_iid, dtype="int32") net_jid = np.array(net_jid, dtype="int32") if self.verbose: print("Learning...") from cornac.models.mcf import mcf res = mcf.mcf( rat_uid, rat_iid, rat_val, net_iid, net_jid, net_val, k=self.k, n_users=train_set.num_users, n_items=train_set.num_items, n_ratings=len(rat_val), n_edges=len(net_val), n_epochs=self.max_iter, lamda=self.lamda, learning_rate=self.learning_rate, gamma=self.gamma, init_params={"U": self.U, "V": self.V, "Z": self.Z}, verbose=self.verbose, seed=self.seed, ) self.U = np.asarray(res["U"]) self.V = np.asarray(res["V"]) self.Z = np.asarray(res["Z"]) if self.verbose: print("Learning completed") elif self.verbose: print("%s is trained already (trainable = False)" % self.name) return self
Apache License 2.0
fxtd-odyssey/qbinder
research/pyqtConfig/config.py
QSettingsManager.reset
python
def reset(self): self.settings = QSettings() self.handlers = {} self.handler_callbacks = {} self.defaults = {} self.maps = {} self.eventhooks = {}
Reset the config manager to it's initialised state. This initialises QSettings, unsets all defaults and removes all handlers, maps, and hooks.
https://github.com/fxtd-odyssey/qbinder/blob/734fc2aaf80a495c1216b2c27530ab752279d103/research/pyqtConfig/config.py#L953-L964
from __future__ import unicode_literals from __future__ import print_function __author__ = 'timmyliang' __email__ = '820472580@qq.com' __date__ = '2020-03-09 16:19:41' from Qt.QtGui import * from Qt.QtCore import * from Qt.QtWidgets import * import os import sys import types from collections import defaultdict, OrderedDict import logging try: import xml.etree.cElementTree as et except ImportError: import xml.etree.ElementTree as et try: QVariant except NameError: QVariant = None RECALCULATE_ALL = 1 RECALCULATE_VIEW = 2 def types_MethodType(fn, handler): try: return types.MethodType(fn, handler, type(handler)) except TypeError: return types.MethodType(fn, handler) def _convert_list_type_from_XML(vs): vlist = vs.findall('ListItem') + vs.findall('ConfigListItem') l = [] for xconfig in vlist: v = xconfig.text if xconfig.get('type') in CONVERT_TYPE_FROM_XML: v = CONVERT_TYPE_FROM_XML[xconfig.get('type')](xconfig) l.append(v) return l def _convert_list_type_to_XML(co, vs): for cv in vs: c = et.SubElement(co, "ListItem") t = type(cv).__name__ c.set("type", t) c = CONVERT_TYPE_TO_XML[t](c, cv) return co def _convert_dict_type_from_XML(vs): vlist = vs.findall('DictItem') d = {} for xconfig in vlist: v = xconfig.text if xconfig.get('type') in CONVERT_TYPE_FROM_XML: v = CONVERT_TYPE_FROM_XML[xconfig.get('type')](xconfig) d[xconfig.get('key')] = v return d def _convert_dict_type_to_XML(co, vs): for k, v in vs.items(): c = et.SubElement(co, "DictItem") t = type(v).__name__ c.set("type", t) c.set("key", k) c = CONVERT_TYPE_TO_XML[t](c, v) return co def _apply_text_str(co, s): co.text = str(s) return co CONVERT_TYPE_TO_XML = { 'str': _apply_text_str, 'unicode': _apply_text_str, 'int': _apply_text_str, 'float': _apply_text_str, 'bool': _apply_text_str, 'list': _convert_list_type_to_XML, 'tuple': _convert_list_type_to_XML, 'dict': _convert_dict_type_to_XML, 'NoneType': _apply_text_str, } CONVERT_TYPE_FROM_XML = { 'str': lambda x: str(x.text), 'unicode': lambda x: str(x.text), 'int': lambda x: int(x.text), 'float': lambda x: float(x.text), 'bool': lambda x: bool(x.text.lower() == 'true'), 'list': _convert_list_type_from_XML, 'tuple': _convert_list_type_from_XML, 'dict': _convert_dict_type_from_XML, 'NoneType': lambda x: None, } def build_dict_mapper(mdict): rdict = {v: k for k, v in mdict.items()} return ( lambda x: mdict[x] if x in mdict else x, lambda x: rdict[x] if x in rdict else x, ) try: unicode except: def unicode(s): if isinstance(s, bytes): return s.decode('utf-8') else: return s try: basestring except: basestring = str def build_tuple_mapper(mlist): mdict = {k: v for k, v in mlist} rdict = {v: k for k, v in mlist} return ( lambda x: mdict[x] if x in mdict else x, lambda x: rdict[x] if x in rdict else x, ) def _get_QComboBox(self): return self._get_map(self.currentText()) def _set_QComboBox(self, v): self.setCurrentIndex(self.findText(unicode(self._set_map(v)))) def _event_QComboBox(self): return self.currentIndexChanged def _get_QCheckBox(self): return self.isChecked() def _set_QCheckBox(self, v): self.setChecked(v) def _event_QCheckBox(self): return self.stateChanged def _get_QAction(self): return self.isChecked() def _set_QAction(self, v): self.setChecked(v) def _event_QAction(self): return self.toggled def _get_QActionGroup(self): if self.checkedAction(): return self.actions().index(self.checkedAction()) else: return None def _set_QActionGroup(self, v): self.actions()[v].setChecked(True) def _event_QActionGroup(self): return self.triggered def _get_QPushButton(self): return self.isChecked() def _set_QPushButton(self, v): self.setChecked(v) def _event_QPushButton(self): return self.toggled def _get_QSpinBox(self): return self.value() def _set_QSpinBox(self, v): self.setValue(v) def _event_QSpinBox(self): return self.valueChanged def _get_QDoubleSpinBox(self): return self.value() def _set_QDoubleSpinBox(self, v): self.setValue(v) def _event_QDoubleSpinBox(self): return self.valueChanged def _get_QPlainTextEdit(self): return self.document().toPlainText() def _set_QPlainTextEdit(self, v): self.setPlainText(unicode(v)) def _event_QPlainTextEdit(self): return self.sourceChangesApplied def _get_QLineEdit(self): return self._get_map(self.text()) def _set_QLineEdit(self, v): self.setText(unicode(self._set_map(v))) def _event_QLineEdit(self): return self.textChanged def _get_CodeEditor(self): _get_QPlainTextEdit(self) def _set_CodeEditor(self, v): _set_QPlainTextEdit(self, unicode(v)) def _event_CodeEditor(self): return _event_QPlainTextEdit(self) def _get_QListWidget(self): return [self._get_map(s.text()) for s in self.selectedItems()] def _set_QListWidget(self, v): if v: for s in v: self.findItems(unicode(self._set_map(s)), Qt.MatchExactly)[0].setSelected(True) def _event_QListWidget(self): return self.itemSelectionChanged def _get_QListWidgetAddRemove(self): return [self._get_map(self.item(n).text()) for n in range(0, self.count())] def _set_QListWidgetAddRemove(self, v): block = self.blockSignals(True) self.clear() self.addItems([unicode(self._set_map(s)) for s in v]) self.blockSignals(block) self.itemAddedOrRemoved.emit() def _event_QListWidgetAddRemove(self): return self.itemAddedOrRemoved def _get_QColorButton(self): return self.color() def _set_QColorButton(self, v): self.setColor(v) def _event_QColorButton(self): return self.colorChanged def _get_QNoneDoubleSpinBox(self): return self.value() def _set_QNoneDoubleSpinBox(self, v): self.setValue(v) def _event_QNoneDoubleSpinBox(self): return self.valueChanged def _get_QCheckTreeWidget(self): return [self._get_map(s) for s in self._checked_item_cache] def _set_QCheckTreeWidget(self, v): if v: for s in v: f = self.findItems(unicode(self._set_map(s)), Qt.MatchExactly | Qt.MatchRecursive) if f: f[0].setCheckState(0, Qt.Checked) def _event_QCheckTreeWidget(self): return self.itemCheckedChanged def _get_QSlider(self): return self.value() def _set_QSlider(self, v): self.setValue(v) def _event_QSlider(self): return self.valueChanged def _get_QButtonGroup(self): return [(nr, btn.isChecked()) for nr, btn in enumerate(self.buttons())] def _set_QButtonGroup(self, v): for idx, state in v: self.buttons()[idx].setChecked(state) def _event_QButtonGroup(self): return self.buttonClicked def _get_QTabWidget(self): return self.currentIndex() def _set_QTabWidget(self, v): self.setCurrentIndex(v) def _event_QTabWidget(self): return self.currentChanged HOOKS = { QComboBox: (_get_QComboBox, _set_QComboBox, _event_QComboBox), QCheckBox: (_get_QCheckBox, _set_QCheckBox, _event_QCheckBox), QAction: (_get_QAction, _set_QAction, _event_QAction), QActionGroup: (_get_QActionGroup, _set_QActionGroup, _event_QActionGroup), QPushButton: (_get_QPushButton, _set_QPushButton, _event_QPushButton), QSpinBox: (_get_QSpinBox, _set_QSpinBox, _event_QSpinBox), QDoubleSpinBox: (_get_QDoubleSpinBox, _set_QDoubleSpinBox, _event_QDoubleSpinBox), QPlainTextEdit: (_get_QPlainTextEdit, _set_QPlainTextEdit, _event_QPlainTextEdit), QLineEdit: (_get_QLineEdit, _set_QLineEdit, _event_QLineEdit), QListWidget: (_get_QListWidget, _set_QListWidget, _event_QListWidget), QSlider: (_get_QSlider, _set_QSlider, _event_QSlider), QButtonGroup: (_get_QButtonGroup, _set_QButtonGroup, _event_QButtonGroup), QTabWidget: (_get_QTabWidget, _set_QTabWidget, _event_QTabWidget) } class ConfigManagerBase(QObject): updated = Signal(int) def __init__(self, defaults={}, *args, **kwargs): super(ConfigManagerBase, self).__init__(*args, **kwargs) self.mutex = QMutex() self.hooks = HOOKS self.reset() self.defaults = defaults def _get(self, key): with QMutexLocker(self.mutex): try: return self.config[key] except: return None def _get_default(self, key): with QMutexLocker(self.mutex): try: return self.defaults[key] except: return None def get(self, key): v = self._get(key) if v is not None: return v else: return self._get_default(key) def set(self, key, value, trigger_handler=True, trigger_update=True): old = self._get(key) if old is not None and old == value: return False self._set(key, value) if trigger_handler and key in self.handlers: getter = self.handlers[key].getter setter = self.handlers[key].setter if setter and getter() != self._get(key): setter(self._get(key)) if trigger_update: self.updated.emit(self.eventhooks[key] if key in self.eventhooks else RECALCULATE_ALL) return True def set_default(self, key, value, eventhook=RECALCULATE_ALL): self.defaults[key] = value self.eventhooks[key] = eventhook self.updated.emit(eventhook) def set_defaults(self, keyvalues, eventhook=RECALCULATE_ALL): for key, value in list(keyvalues.items()): self.defaults[key] = value self.eventhooks[key] = eventhook self.updated.emit(eventhook) def replace(self, keyvalues, trigger_update=True): self.config = [] self.set_many(keyvalues) def set_many(self, keyvalues, trigger_update=True): has_updated = False for k, v in list(keyvalues.items()): u = self.set(k, v, trigger_update=False) has_updated = has_updated or u if has_updated and trigger_update: self.updated.emit(RECALCULATE_ALL) return has_updated def add_handler(self, key, handler, mapper=(lambda x: x, lambda x: x), auto_set_default=True, default=None): if isinstance(mapper, (dict, OrderedDict)): mapper = build_dict_mapper(mapper) elif isinstance(mapper, list) and isinstance(mapper[0], tuple): mapper = build_tuple_mapper(mapper) handler._get_map, handler._set_map = mapper if key in self.handlers: return self.handlers[key] = handler cls = self._get_hook(handler) hookg, hooks, hooku = self.hooks[cls] handler.getter = types_MethodType(hookg, handler) handler.setter = types_MethodType(hooks, handler) handler.updater = types_MethodType(hooku, handler) logging.debug("Add handler %s for %s" % (type(handler).__name__, key)) handler_callback = lambda x = None: self.set(key, handler.getter(), trigger_handler=False) handler.updater().connect(handler_callback) self.handler_callbacks[key] = handler_callback if key not in self.defaults: if default is None: self.set_default(key, handler.getter()) else: self.set_default(key, default) if self._get(key) is not None: handler.setter(self._get(key)) elif key in self.defaults: handler.setter(self.defaults[key]) def _get_hook(self, handler): fst = lambda x: next(x, None) cls = fst(x for x in self.hooks.keys() if x == type(handler)) if cls is None: cls = fst(x for x in self.hooks.keys() if isinstance(handler, x)) if cls is None: raise TypeError("No handler-functions available for this widget " "type (%s)" % type(handler).__name__) return cls def add_handlers(self, keyhandlers): for key, handler in list(keyhandlers.items()): self.add_handler(key, handler) def remove_handler(self, key): if key in self.handlers: handler = self.handlers[key] handler.updater().disconnect(self.handler_callbacks[key]) del self.handlers[key] def add_hooks(self, key, hooks): self.hooks[key] = hooks def getXMLConfig(self, root): config = et.SubElement(root, "Config") for ck, cv in list(self.config.items()): co = et.SubElement(config, "ConfigSetting") co.set("id", ck) t = type(cv).__name__ co.set("type", type(cv).__name__) co = CONVERT_TYPE_TO_XML[t](co, cv) return root def setXMLConfig(self, root): config = {} for xconfig in root.findall('Config/ConfigSetting'): if xconfig.get('type') in CONVERT_TYPE_FROM_XML: v = CONVERT_TYPE_FROM_XML[xconfig.get('type')](xconfig) config[xconfig.get('id')] = v self.set_many(config, trigger_update=False) def as_dict(self): result_dict = {} for k, v in self.defaults.items(): result_dict[k] = self.get(k) return result_dict class ConfigManager(ConfigManagerBase): def reset(self): self.config = {} self.handlers = {} self.handler_callbacks = {} self.defaults = {} self.maps = {} self.eventhooks = {} def _get(self, key): with QMutexLocker(self.mutex): try: return self.config[key] except: return None def _set(self, key, value): with QMutexLocker(self.mutex): self.config[key] = value class QSettingsManager(ConfigManagerBase):
MIT License
matbarofex/pyrofex
src/pyRofex/service.py
set_default_environment
python
def set_default_environment(environment): _validate_environment(environment) globals.default_environment = environment
Set default environment. The environment that is gonna be used as default when it's not specified. Example: if we send an order using the send_order function and we do not specified and Environment, the order is going to be send to the default one. :param environment: the environment that is going to be set as default. :type environment: Environment
https://github.com/matbarofex/pyrofex/blob/536dd896d7f45dd066fe8ca31986ed1da011a942/src/pyRofex/service.py#L47-L59
from inspect import getargspec from .clients.rest_rfx import RestClient from .clients.websocket_rfx import WebSocketClient from .components import globals from .components.exceptions import ApiException from .components.enums import Environment from .components.enums import MarketDataEntry from .components.enums import TimeInForce from .components.enums import Market def initialize(user, password, account, environment, proxies=None): _validate_environment(environment) _set_environment_parameters(user, password, account, environment, proxies) globals.environment_config[environment]["rest_client"] = RestClient(environment) globals.environment_config[environment]["ws_client"] = WebSocketClient(environment) set_default_environment(environment)
MIT License
guanshuoxu/rsna-str-pulmonary-embolism-detection
trainval/lung_localization/split2/albumentations/augmentations/bbox_utils.py
convert_bbox_to_albumentations
python
def convert_bbox_to_albumentations(bbox, source_format, rows, cols, check_validity=False): if source_format not in {"coco", "pascal_voc", "yolo"}: raise ValueError( "Unknown source_format {}. Supported formats are: 'coco', 'pascal_voc' and 'yolo'".format(source_format) ) if source_format == "coco": x_min, y_min, width, height = bbox[:4] x_max = x_min + width y_max = y_min + height elif source_format == "yolo": _bbox = np.array(bbox[:4]) assert np.all((0 < _bbox) & (_bbox < 1)), "In YOLO format all labels must be float and in range (0, 1)" x, y, width, height = denormalize_bbox(_bbox, rows, cols) x_min = x - width / 2 + 1 x_max = x_min + width y_min = y - height / 2 + 1 y_max = y_min + height else: x_min, y_min, x_max, y_max = bbox[:4] bbox = [x_min, y_min, x_max, y_max] + list(bbox[4:]) bbox = normalize_bbox(bbox, rows, cols) if check_validity: check_bbox(bbox) return bbox
Convert a bounding box from a format specified in `source_format` to the format used by albumentations: normalized coordinates of bottom-left and top-right corners of the bounding box in a form of `[x_min, y_min, x_max, y_max]` e.g. `[0.15, 0.27, 0.67, 0.5]`. Args: bbox (list): bounding box source_format (str): format of the bounding box. Should be 'coco', 'pascal_voc', or 'yolo'. check_validity (bool): check if all boxes are valid boxes rows (int): image height cols (int): image width Note: The `coco` format of a bounding box looks like `[x_min, y_min, width, height]`, e.g. [97, 12, 150, 200]. The `pascal_voc` format of a bounding box looks like `[x_min, y_min, x_max, y_max]`, e.g. [97, 12, 247, 212]. The `yolo` format of a bounding box looks like `[x, y, width, height]`, e.g. [0.3, 0.1, 0.05, 0.07]; where `x`, `y` coordinates of the center of the box, all values normalized to 1 by image height and width. Raises: ValueError: if `target_format` is not equal to `coco` or `pascal_voc`, ot `yolo`.
https://github.com/guanshuoxu/rsna-str-pulmonary-embolism-detection/blob/f5552d40ba9c32e7c16f36e1d2a03ed883ca7858/trainval/lung_localization/split2/albumentations/augmentations/bbox_utils.py#L129-L177
from __future__ import division from albumentations.core.utils import DataProcessor import numpy as np __all__ = [ "normalize_bbox", "denormalize_bbox", "normalize_bboxes", "denormalize_bboxes", "calculate_bbox_area", "filter_bboxes_by_visibility", "convert_bbox_to_albumentations", "convert_bbox_from_albumentations", "convert_bboxes_to_albumentations", "convert_bboxes_from_albumentations", "BboxProcessor", ] class BboxProcessor(DataProcessor): @property def default_data_name(self): return "bboxes" def ensure_data_valid(self, data): for data_name in self.data_fields: if data.get(data_name) and len(data[data_name][0]) < 5: if self.params.label_fields is None: raise ValueError( "Please specify 'label_fields' in 'bbox_params' or add labels to the end of bbox " "because bboxes must have labels" ) if self.params.label_fields: if not all(l in data.keys() for l in self.params.label_fields): raise ValueError("Your 'label_fields' are not valid - them must have same names as params in dict") def filter(self, data, rows, cols): return filter_bboxes( data, rows, cols, min_area=self.params.min_area, min_visibility=self.params.min_visibility ) def check(self, data, rows, cols): return check_bboxes(data) def convert_from_albumentations(self, data, rows, cols): return convert_bboxes_from_albumentations(data, self.params.format, rows, cols, check_validity=True) def convert_to_albumentations(self, data, rows, cols): return convert_bboxes_to_albumentations(data, self.params.format, rows, cols, check_validity=True) def normalize_bbox(bbox, rows, cols): if rows == 0: raise ValueError("Argument rows cannot be zero") if cols == 0: raise ValueError("Argument cols cannot be zero") x_min, y_min, x_max, y_max = bbox[:4] normalized_bbox = [x_min / cols, y_min / rows, x_max / cols, y_max / rows] return normalized_bbox + list(bbox[4:]) def denormalize_bbox(bbox, rows, cols): if rows == 0: raise ValueError("Argument rows cannot be zero") if cols == 0: raise ValueError("Argument cols cannot be zero") x_min, y_min, x_max, y_max = bbox[:4] denormalized_bbox = [x_min * cols, y_min * rows, x_max * cols, y_max * rows] return denormalized_bbox + list(bbox[4:]) def normalize_bboxes(bboxes, rows, cols): return [normalize_bbox(bbox, rows, cols) for bbox in bboxes] def denormalize_bboxes(bboxes, rows, cols): return [denormalize_bbox(bbox, rows, cols) for bbox in bboxes] def calculate_bbox_area(bbox, rows, cols): bbox = denormalize_bbox(bbox, rows, cols) x_min, y_min, x_max, y_max = bbox[:4] area = (x_max - x_min) * (y_max - y_min) return area def filter_bboxes_by_visibility( original_shape, bboxes, transformed_shape, transformed_bboxes, threshold=0.0, min_area=0.0 ): img_height, img_width = original_shape[:2] transformed_img_height, transformed_img_width = transformed_shape[:2] visible_bboxes = [] for bbox, transformed_bbox in zip(bboxes, transformed_bboxes): if not all(0.0 <= value <= 1.0 for value in transformed_bbox[:4]): continue bbox_area = calculate_bbox_area(bbox, img_height, img_width) transformed_bbox_area = calculate_bbox_area(transformed_bbox, transformed_img_height, transformed_img_width) if transformed_bbox_area < min_area: continue visibility = transformed_bbox_area / bbox_area if visibility >= threshold: visible_bboxes.append(transformed_bbox) return visible_bboxes
MIT License
wummel/dosage
dosagelib/plugins/s.py
SMBC.shouldSkipUrl
python
def shouldSkipUrl(self, url, data): return url in ( self.stripUrl % '2865', self.stripUrl % '2653', self.stripUrl % '2424', self.stripUrl % '2226', self.stripUrl % '2069', self.stripUrl % '1895', self.stripUrl % '1896', self.stripUrl % '1589', )
Skip promo or missing update pages.
https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/dosagelib/plugins/s.py#L297-L308
from re import compile, escape, IGNORECASE, sub from os.path import splitext, basename from datetime import datetime from ..scraper import _BasicScraper, _ParserScraper from ..helpers import indirectStarter, bounceStarter from ..util import tagre, getPageContent class SabrinaOnline(_BasicScraper): url = 'http://sabrina-online.com/' imageSearch = compile(tagre("a", "href", r'(strips/[^"]*)')) prevSearch = compile(tagre("a", "href", r"(\d\d\d\d-\d\d.html)") + tagre("img", "src", "b_back.gif")) help = 'Index format: n (unpadded)' adult = True multipleImagesPerStrip = True @classmethod def starter(cls): archive = cls.url + 'archive.html' data = getPageContent(archive, cls.session) search = compile(tagre("a", "href", r"(\d\d\d\d-\d\d.html)")) archivepages = search.findall(data) return cls.url + archivepages[-1] class SafelyEndangered(_BasicScraper): url = 'http://www.safelyendangered.com/' stripUrl = url + 'comic/%s' firstStripUrl = stripUrl % 'ignored' imageSearch = compile(tagre("img", "src", r'(http://www\.safelyendangered\.com/wp-content/uploads/\d+/\d+/[^"]+\.[a-z]+).*')) prevSearch = compile(tagre("a", "href", r'([^"]+)', after="navi navi-prev")) textSearch = compile(tagre("img", "title", r'([^"]+)', before=r'http://www\.safelyendangered\.com/wp-content/uploads')) help = 'Index format: yyyy/mm/stripname' class SamAndFuzzy(_BasicScraper): url = 'http://www.samandfuzzy.com/' stripUrl = 'http://samandfuzzy.com/%s' firstStripUrl = stripUrl % '1' imageSearch = compile(r'(/comics/.+?)" alt') prevSearch = compile(r'"><a href="(.+?)"><img src="imgint/nav_prev.gif"') help = 'Index format: nnnn' class SandraOnTheRocks(_BasicScraper): url = 'http://www.sandraontherocks.com/' stripUrl = url + 'strips-sotr/%s' firstStripUrl = stripUrl % 'start_by_running' imageSearch = compile(tagre("img", "src", r'([^"]*/comics/[^"]+)')) prevSearch = compile(tagre("a", "href", r'([^"]*/strips-sotr/[^"]+)', before="cn[id]prev")) help = 'Index format: name' class ScandinaviaAndTheWorld(_ParserScraper): url = 'http://satwcomic.com/' stripUrl = url + '%s' firstStripUrl = stripUrl % 'sweden-denmark-and-norway' starter = indirectStarter(url, '//a[text()="View latest comic"]') imageSearch = '//img[@itemprop="image"]' prevSearch = '//a[@accesskey="p"]' textSearch = '//span[@itemprop="articleBody"]' help = 'Index format: stripname' class ScaryGoRound(_BasicScraper): url = 'http://www.scarygoround.com/' stripUrl = url + '?date=%s' firstStripUrl = stripUrl % '20090918' imageSearch = compile(tagre("img", "src", r'(strips/\d+\.png)')) prevSearch = compile(tagre("a", "href", r'(\?date=\d+)') + "Previous") help = 'Index format: n (unpadded)' class ScenesFromAMultiverse(_BasicScraper): url = 'http://amultiverse.com/' rurl = escape(url) stripUrl = url + '%s/' firstStripUrl = stripUrl % '2010/06/14/parenthood' imageSearch = ( compile(tagre("div", "id", "comic") + r"\s*" + tagre("img", "src", r'(.*amultiverse.com/wp-content/uploads/\d+/\d+/[^"]+)')), compile(tagre("div", "id", "comic") + r"\s*" + tagre("a", "href", r'[^"]*') + tagre("img", "src", r'(.*amultiverse.com/wp-content/uploads/\d+/\d+/[^"]+)')), ) prevSearch = compile(tagre("a", "href", r'(%scomic/\d+\d+/\d+/\d+/[^"]+)' % rurl, after="prev")) help = 'Index format: yyyy/mm/dd/stripname' class SchlockMercenary(_BasicScraper): url = 'http://www.schlockmercenary.com/' stripUrl = url + '%s' firstStripUrl = stripUrl % '2000-06-12' imageSearch = compile(tagre("img", "src", r'(http://static\.schlockmercenary\.com/comics/[^"]+)')) multipleImagesPerStrip = True prevSearch = compile(tagre("a", "href", r'(/\d+-\d+-\d+)', quote="'", after="nav-previous")) help = 'Index format: yyyy-mm-dd' class SchoolBites(_BasicScraper): url = 'http://schoolbites.net/' stripUrl = url + 'd/%s.html' imageSearch = compile(tagre("img", "src", r'(http://cdn\.schoolbites\.net/comics/[^"]+)')) prevSearch = compile(tagre("a", "href", r'(http://schoolbites\.net/d/\d+\.html)', after="prev")) help = 'Index format: yyyymmdd' class Schuelert(_BasicScraper): url = 'http://www.schuelert.de/' rurl = escape(url) stripUrl = url + 'index.php?paged=%s' firstStripUrl = stripUrl % '5' imageSearch = compile(tagre("img", "src", r"(%swp-content/[^']+)" % rurl, quote="'")) prevSearch = compile(tagre("a", "href", r'(%sindex\.php\?paged=\d+)' % rurl) + "&laquo;") multipleImagesPerStrip = True help = 'Index format: none' lang = 'de' class Science(_BasicScraper): url = 'http://sci-ence.org/' rurl = escape(url) stripUrl = url + '%s/' firstStripUrl = stripUrl % 'periodic-table-element-ass' prevSearch = compile(tagre("a", "href", r'(%s[^"]+/)' % rurl, after="prev")) imageSearch = compile(tagre("img", "src", r'(%scomics/\d+-\d+-\d+[^"]+)' % rurl)) help = 'Index format: stripname' class SequentialArt(_BasicScraper): url = 'http://www.collectedcurios.com/sequentialart.php' stripUrl = url + '?s=%s' firstStripUrl = stripUrl % '1' imageSearch = compile(tagre("img", "src", r'([^"]+)', before="strip")) prevSearch = compile(tagre("a", "href", r'(/sequentialart\.php\?s=\d+)') + tagre("img", "src", "Nav_BackOne\.gif")) help = 'Index format: name' class SexyLosers(_BasicScraper): adult = True url = 'http://www.sexylosers.com/' stripUrl = url + '%s.html' imageSearch = compile(r'<img src\s*=\s*"\s*(comics/[\w\.]+?)"', IGNORECASE) prevSearch = compile(r'<a href="(/\d{3}\.\w+?)"><font color = FFAAAA><<', IGNORECASE) help = 'Index format: nnn' starter = indirectStarter(url, compile(r'SEXY LOSERS <A HREF="(.+?)">Latest SL Comic \(#\d+\)</A>', IGNORECASE)) @classmethod def namer(cls, imageUrl, pageUrl): index = pageUrl.split('/')[-1].split('.')[0] title = imageUrl.split('/')[-1].split('.')[0] return index + '-' + title class Sheldon(_BasicScraper): url = 'http://www.sheldoncomics.com/' rurl = escape(url) stripUrl = url + 'archive/%s.html' firstStripUrl = stripUrl % '011130' imageSearch = compile(tagre("img", "src", r'(http://cdn\.sheldoncomics\.com/strips/[^"]+)')) prevSearch = compile(tagre("a", "href", r'(%sarchive/\d+\.html)' % rurl, after="sidenav-prev")) help = 'Index format: yymmdd' class ShermansLagoon(_BasicScraper): url = 'http://shermanslagoon.com/' stripUrl = url + 'comics/%s' firstStripUrl = stripUrl % '/december-29-2003/' imageSearch = compile(tagre("img", "src", r'(http://safr\.kingfeatures\.com/idn/etv/zone/xml/content\.php\?file=.+?)')) prevSearch = compile(r'id="previouscomic" class="button white"><a href="(%scomics/[a-z0-9-]+/)"' % url) help = 'Index format: monthname-day-year' @classmethod def namer(cls, imageUrl, pageUrl): name = pageUrl.rsplit('/', 3)[2] if name == "shermanslagoon.com": import datetime name = datetime.date.today().strftime("%B-%d-%Y").lower() month, day, year = name.split('-') return "%s-%s-%s" % (year, month, day) class Shivae(_BasicScraper): url = 'http://shivae.net/' rurl = escape(url) stripUrl = url + 'blog/%s/' firstStripUrl = stripUrl % '2007/09/21/09212007' imageSearch = compile(tagre("img", "src", r'(%swp-content/blogs\.dir/\d+/files/\d+/\d+/[^"]+)' % rurl)) prevSearch = compile(tagre("a", "href", r'(%sblog/[^"]+)' % rurl, after="navi-prev")) help = 'Index format: yyyy/mm/dd/stripname' class Shortpacked(_ParserScraper): url = 'http://www.shortpacked.com/index.php' stripUrl = url + '?id=%s' css = True imageSearch = 'img#comic' prevSearch = 'a.prev' help = 'Index format: nnn' class ShotgunShuffle(_BasicScraper): url = 'http://shotgunshuffle.com/' stripUrl = url + 'comic/%s' firstStripUrl = stripUrl % 'pilot/' imageSearch = compile(tagre("img", "src", r'(http://shotgunshuffle.com/wp-content/uploads/\d+/\d+/\d+-[^"]+)')) prevSearch = compile(tagre("a", "href", r'([^"]+)', after="navi navi-prev")) help = 'Index format: stripname' class SinFest(_BasicScraper): name = 'KeenSpot/SinFest' url = 'http://www.sinfest.net/' stripUrl = url + 'view.php?date=%s' imageSearch = compile(tagre("img","src", r'(btphp/comics/.+)', after="alt")) prevSearch = compile(tagre("a", "href", r'(view\.php\?date=.+)') + '\\s*' + tagre("img", "src", r'\.\./images/prev\.gif')) help = 'Index format: yyyy-mm-dd' class _Sketchesnatched(_BasicScraper): url = 'http://sketchesnatched.blogspot.com/' stripUrl = url + 'search?updated-max=%s%%2B01:00&max-results=1' firstStripUrl = stripUrl % '2011-01-27T08:32:00' imageSearch = compile(tagre("meta", "content", r"(http://\d+\.bp\.blogspot\.com/[^']+)", after=r'image_url', quote="'")) prevSearch = compile(tagre("a", "href", r"(http://sketchesnatched\.blogspot\.[a-z]+/search[^']+)", before=r"blog-pager-older-link", quote="'")) help = 'Index format: yyyy-mm-ddThh:mm:ss' class SkinDeep(_BasicScraper): url = 'http://www.skindeepcomic.com/' stripUrl = url + 'archive/%s/' imageSearch = compile(r'<span class="webcomic-object[^>]*><img src="([^"]*)"') prevSearch = compile(tagre("a", "href", r'([^"]+)', after="previous-webcomic-link")) help = 'Index format: custom' class SleeplessDomain(_ParserScraper): url = 'http://www.sleeplessdomain.com/' stripUrl = url + 'comic/%s' firstStripUrl = stripUrl % 'chapter-1-cover' css = True imageSearch = 'img#cc-comic' prevSearch = 'div.nav a.prev' starter = bounceStarter(url, 'div.nav a.next') help = 'Index format: chapter-X-page-Y (unpadded)' @classmethod def namer(cls, imageUrl, pageUrl): start = '' tsmatch = compile(r'/(\d+)-').search(imageUrl) if tsmatch: start = datetime.utcfromtimestamp(int(tsmatch.group(1))).strftime("%Y-%m-%d") else: start = '2015-04-11x' return start + "-" + pageUrl.rsplit('/', 1)[-1] class SluggyFreelance(_BasicScraper): url = 'http://www.sluggy.com/' stripUrl = url + 'comics/archives/daily/%s' imageSearch = compile(r'<img src="(/images/comics/.+?)"') prevSearch = compile(r'<a href="(.+?)"[^>]+?><span class="ui-icon ui-icon-seek-prev">') multipleImagesPerStrip = True help = 'Index format: yymmdd' class SMBC(_ParserScraper): url = 'http://www.smbc-comics.com/' rurl = escape(url) stripUrl = url + '?id=%s' firstStripUrl = stripUrl % '1' multipleImagesPerStrip = True imageSearch = ['//img[@id="comic"]', '//div[@id="aftercomic"]/img'] prevSearch = '//a[@class="prev"]' help = 'Index format: nnnn' textSearch = '//img[@id="comic"]/@title' @classmethod def namer(cls, imageUrl, pageUrl): return imageUrl.rsplit('-', 1)[-1]
MIT License
codeforamerica/pittsburgh-purchasing-suite
purchasing/scout/views.py
explore
python
def explore(): return dict(current_user=current_user, choices=Department.choices())
The landing page for scout. Renders the "big search" template. :status 200: Renders the appropriate landing page.
https://github.com/codeforamerica/pittsburgh-purchasing-suite/blob/9552eda6df396746feedc9ce45f35842a716de6a/purchasing/scout/views.py#L31-L36
import re from flask import ( render_template, current_app, request, abort, flash, redirect, url_for ) from flask_login import current_user from purchasing.database import db from purchasing.utils import SimplePagination from purchasing.decorators import wrap_form, requires_roles from purchasing.scout.forms import SearchForm, NoteForm from purchasing.users.models import Department from purchasing.data.companies import Company from purchasing.data.contracts import ContractBase, ContractNote, ContractType from purchasing.scout.util import ( build_filter, build_cases, feedback_handler, find_contract_metadata, return_all_contracts, FILTER_FIELDS ) from purchasing.scout import blueprint CRAZY_CHARS = re.compile('[^A-Za-z0-9 ]') @blueprint.route('/', methods=['GET']) @wrap_form(SearchForm, 'search_form', 'scout/explore.html')
BSD 3-Clause New or Revised License
jakestanger/rofi_mpd
rofi/rofi.py
Rofi.float_entry
python
def float_entry(self, prompt, message=None, min=None, max=None, rofi_args=None, **kwargs): if (min is not None) and (max is not None) and not (max > min): raise ValueError("Maximum limit has to be more than the minimum limit.") def float_validator(text): error = None try: value = float(text) except ValueError: return None, "Please enter a floating point value." if (min is not None) and (value < min): return None, "The minimum allowable value is {0}.".format(min) if (max is not None) and (value > max): return None, "The maximum allowable value is {0}.".format(max) return value, None return self.generic_entry(prompt, float_validator, message, rofi_args, **kwargs)
Prompt the user to enter a floating point number. Parameters ---------- prompt: string Prompt to display to the user. message: string, optional Message to display under the entry line. min, max: float, optional Minimum and maximum values to allow. If None, no limit is imposed. Returns ------- float, or None if the dialog is cancelled.
https://github.com/jakestanger/rofi_mpd/blob/e80be282ef8b9e3dce2b411a2c699c74c5e2bff8/rofi/rofi.py#L610-L648
import atexit from datetime import datetime from decimal import Decimal, InvalidOperation import signal import subprocess import time if hasattr(subprocess.Popen, '__exit__'): Popen = subprocess.Popen else: class ContextManagedPopen(subprocess.Popen): def __enter__(self): return self def __exit__(self, type, value, traceback): if self.stdout: self.stdout.close() if self.stderr: self.stderr.close() if self.stdin: self.stdin.close() self.wait() Popen = ContextManagedPopen class Rofi(object): def __init__(self, lines=None, fixed_lines=None, width=None, fullscreen=None, location=None, exit_hotkeys=('Alt+F4', 'Control+q'), rofi_args=None): self._process = None self.lines = lines self.fixed_lines = fixed_lines self.width = width self.fullscreen = fullscreen self.location = location self.exit_hotkeys = exit_hotkeys self.rofi_args = rofi_args or [] atexit.register(self.close) @classmethod def escape(self, string): return string.translate( {38: '&amp;'} ).translate({ 34: '&quot;', 39: '&apos;', 60: '&lt;', 62: '&gt;' }) def close(self): if self._process: self._process.send_signal(signal.SIGINT) if hasattr(subprocess, 'TimeoutExpired'): try: self._process.wait(timeout=1) except subprocess.TimeoutExpired: self._process.send_signal(signal.SIGKILL) else: count = 0 while count < 100: if self._process.poll() is not None: break time.sleep(0.01) if self._process.poll() is None: self._process.send_signal(signal.SIGKILL) self._process = None def _run_blocking(self, args, input=None): if self._process: self.close() kwargs = {} kwargs['stdout'] = subprocess.PIPE kwargs['universal_newlines'] = True if hasattr(subprocess, 'run'): result = subprocess.run(args, input=input, **kwargs) return result.returncode, result.stdout if input is not None: kwargs['stdin'] = subprocess.PIPE with Popen(args, **kwargs) as proc: stdout, stderr = proc.communicate(input) returncode = proc.poll() return returncode, stdout def _run_nonblocking(self, args, input=None): if self._process: self.close() self._process = subprocess.Popen(args, stdout=subprocess.PIPE) def _common_args(self, allow_fullscreen=True, **kwargs): args = [] lines = kwargs.get('lines', self.lines) if lines: args.extend(['-lines', str(lines)]) fixed_lines = kwargs.get('fixed_lines', self.fixed_lines) if fixed_lines: args.extend(['-fixed-num-lines', str(fixed_lines)]) width = kwargs.get('width', self.width) if width is not None: args.extend(['-width', str(width)]) fullscreen = kwargs.get('fullscreen', self.fullscreen) if allow_fullscreen and fullscreen: args.append('-fullscreen') location = kwargs.get('location', self.location) if location is not None: args.extend(['-location', str(location)]) args.extend(self.rofi_args) return args def error(self, message, rofi_args=None, **kwargs): rofi_args = rofi_args or [] args = ['rofi', '-e', message] args.extend(self._common_args(allow_fullscreen=False, **kwargs)) args.extend(rofi_args) self._run_blocking(args) def status(self, message, rofi_args=None, **kwargs): rofi_args = rofi_args or [] args = ['rofi', '-e', message] args.extend(self._common_args(allow_fullscreen=False, **kwargs)) args.extend(rofi_args) self._run_nonblocking(args) def select(self, prompt, options, rofi_args=None, message="", select=None, **kwargs): rofi_args = rofi_args or [] optionstr = '\n'.join(option.replace('\n', ' ') for option in options) args = ['rofi', '-dmenu', '-p', prompt, '-format', 'i'] if select is not None: args.extend(['-selected-row', str(select)]) display_bindings = [] user_keys = set() for k, v in kwargs.items(): if not k.startswith('key'): continue try: keynum = int(k[3:]) except ValueError: continue key, action = v user_keys.add(keynum) args.extend(['-kb-custom-{0:s}'.format(k[3:]), key]) if action: display_bindings.append("<b>{0:s}</b>: {1:s}".format(key, action)) exit_keys = set() next_key = 10 for key in self.exit_hotkeys: while next_key in user_keys: next_key += 1 exit_keys.add(next_key) args.extend(['-kb-custom-{0:d}'.format(next_key), key]) next_key += 1 message = message or "" if display_bindings: message += "\n" + " ".join(display_bindings) message = message.strip() if message: args.extend(['-mesg', message]) args.extend(self._common_args(**kwargs)) args.extend(rofi_args) returncode, stdout = self._run_blocking(args, input=optionstr) stdout = stdout.strip() index = int(stdout) if stdout else -1 if returncode == 0: key = 0 elif returncode == 1: key = -1 elif returncode > 9: key = returncode - 9 if key in exit_keys: raise SystemExit() else: self.exit_with_error("Unexpected rofi returncode {0:d}.".format(results.returncode)) return index, key def generic_entry(self, prompt, validator=None, message=None, rofi_args=None, **kwargs): error = "" rofi_args = rofi_args or [] while True: args = ['rofi', '-dmenu', '-p', prompt, '-format', 's'] msg = message or "" if error: msg = '<span color="#FF0000" font_weight="bold">{0:s}</span>\n{1:s}'.format(error, msg) msg = msg.rstrip('\n') if msg: args.extend(['-mesg', msg]) args.extend(self._common_args(**kwargs)) args.extend(rofi_args) returncode, stdout = self._run_blocking(args, input="") if returncode == 1: return None text = stdout.rstrip('\n') if validator: value, error = validator(text) if not error: return value else: return text def text_entry(self, prompt, message=None, allow_blank=False, strip=True, rofi_args=None, **kwargs): def text_validator(text): if strip: text = text.strip() if not allow_blank: if not text: return None, "A value is required." return text, None return self.generic_entry(prompt, text_validator, message, rofi_args, **kwargs) def integer_entry(self, prompt, message=None, min=None, max=None, rofi_args=None, **kwargs): if (min is not None) and (max is not None) and not (max > min): raise ValueError("Maximum limit has to be more than the minimum limit.") def integer_validator(text): error = None try: value = int(text) except ValueError: return None, "Please enter an integer value." if (min is not None) and (value < min): return None, "The minimum allowable value is {0:d}.".format(min) if (max is not None) and (value > max): return None, "The maximum allowable value is {0:d}.".format(max) return value, None return self.generic_entry(prompt, integer_validator, message, rofi_args, **kwargs)
MIT License
blacktear23/py-servicebus
servicebus/pika/adapters/twisted_connection.py
TwistedChannel.basic_publish
python
def basic_publish(self, *args, **kwargs): if self.__closed: return defer.fail(self.__closed) return defer.succeed(self.__channel.basic_publish(*args, **kwargs))
Make sure the channel is not closed and then publish. Return a Deferred that fires with the result of the channel's basic_publish.
https://github.com/blacktear23/py-servicebus/blob/c3d6ccf0b2abf131ca1060d89f3c0d4ab08481e4/servicebus/pika/adapters/twisted_connection.py#L124-L131
import functools from twisted.internet import defer, error, reactor from twisted.python import log from servicebus.pika import exceptions from servicebus.pika.adapters import base_connection class ClosableDeferredQueue(defer.DeferredQueue): def __init__(self, size=None, backlog=None): self.closed = None super(ClosableDeferredQueue, self).__init__(size, backlog) def put(self, obj): if self.closed: return defer.fail(self.closed) return defer.DeferredQueue.put(self, obj) def get(self): if self.closed: return defer.fail(self.closed) return defer.DeferredQueue.get(self) def close(self, reason): self.closed = reason while self.waiting: self.waiting.pop().errback(reason) self.pending = [] class TwistedChannel(object): WRAPPED_METHODS = ('exchange_declare', 'exchange_delete', 'queue_declare', 'queue_bind', 'queue_purge', 'queue_unbind', 'basic_qos', 'basic_get', 'basic_recover', 'tx_select', 'tx_commit', 'tx_rollback', 'flow', 'basic_cancel') def __init__(self, channel): self.__channel = channel self.__closed = None self.__calls = set() self.__consumers = {} channel.add_on_close_callback(self.channel_closed) def channel_closed(self, channel, reply_code, reply_text): self.__closed = exceptions.ChannelClosed(reply_code, reply_text) for d in self.__calls: d.errback(self.__closed) for consumers in self.__consumers.values(): for c in consumers: c.close(self.__closed) self.__calls = set() self.__consumers = {} def basic_consume(self, *args, **kwargs): if self.__closed: return defer.fail(self.__closed) queue = ClosableDeferredQueue() queue_name = kwargs['queue'] kwargs['consumer_callback'] = lambda *args: queue.put(args) self.__consumers.setdefault(queue_name, set()).add(queue) try: consumer_tag = self.__channel.basic_consume(*args, **kwargs) except: return defer.fail() return defer.succeed((queue, consumer_tag)) def queue_delete(self, *args, **kwargs): wrapped = self.__wrap_channel_method('queue_delete') queue_name = kwargs['queue'] d = wrapped(*args, **kwargs) return d.addCallback(self.__clear_consumer, queue_name)
BSD 3-Clause New or Revised License
pelioniot/mbed-cloud-sdk-python
src/mbed_cloud/foundation/entities/device_update/campaign_statistics_events.py
CampaignStatisticsEvents.event_type
python
def event_type(self): return self._event_type.value
api example: 'UPD4_FAIL_101' :rtype: str
https://github.com/pelioniot/mbed-cloud-sdk-python/blob/71dc67fc2a8d1aff31e35ec781fb328e6a60639c/src/mbed_cloud/foundation/entities/device_update/campaign_statistics_events.py#L180-L188
from __future__ import unicode_literals from builtins import str from builtins import super from mbed_cloud.foundation.common.entity_base import Entity from mbed_cloud.foundation.common import fields from mbed_cloud.foundation import enums class CampaignStatisticsEvents(Entity): _api_fieldnames = [ "campaign_id", "count", "created_at", "description", "event_type", "id", "summary_status", "summary_status_id", ] _sdk_fieldnames = _api_fieldnames _renames = {} _renames_to_api = {} def __init__( self, _client=None, campaign_id=None, count=None, created_at=None, description=None, event_type=None, id=None, summary_status=None, summary_status_id=None, ): super().__init__(_client=_client) self._campaign_id = fields.StringField(value=campaign_id) self._count = fields.IntegerField(value=count) self._created_at = fields.DateTimeField(value=created_at) self._description = fields.StringField(value=description) self._event_type = fields.StringField(value=event_type) self._id = fields.StringField(value=id) self._summary_status = fields.StringField(value=summary_status) self._summary_status_id = fields.StringField(value=summary_status_id) @property def campaign_id(self): return self._campaign_id.value @campaign_id.setter def campaign_id(self, value): self._campaign_id.set(value) @property def count(self): return self._count.value @property def created_at(self): return self._created_at.value @property def description(self): return self._description.value @property
Apache License 2.0
maxjiang93/space_time_pde
src/model_utils.py
conv33
python
def conv33(in_channels, out_channels, stride=1, padding=1, bias=True, groups=1): return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=padding, bias=bias, groups=groups)
3x3 2d convolution parts with, kernel_size of 3, stride of 1, and unity padding
https://github.com/maxjiang93/space_time_pde/blob/5e355b0434baf1757d071ce993b84073c8426223/src/model_utils.py#L6-L12
import torch import torch.nn as nn import torch.nn.functional as F
MIT License
googleapis/python-aiplatform
samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_artifact_sync.py
sample_create_artifact
python
def sample_create_artifact(): client = aiplatform_v1beta1.MetadataServiceClient() request = aiplatform_v1beta1.CreateArtifactRequest( parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}", ) response = client.create_artifact(request=request) print(response)
Snippet for create_artifact
https://github.com/googleapis/python-aiplatform/blob/c1c2326b2342ab1b6f4c4ce3852e63376eae740d/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_artifact_sync.py#L30-L45
from google.cloud import aiplatform_v1beta1
Apache License 2.0
pytorch/audio
test/torchaudio_unittest/example/souce_sepration/sdr_reference.py
batch_SDR_torch
python
def batch_SDR_torch(estimation, origin, mask=None): batch_size_est, nsource_est, nsample_est = estimation.size() batch_size_ori, nsource_ori, nsample_ori = origin.size() assert batch_size_est == batch_size_ori, "Estimation and original sources should have same shape." assert nsource_est == nsource_ori, "Estimation and original sources should have same shape." assert nsample_est == nsample_ori, "Estimation and original sources should have same shape." assert nsource_est < nsample_est, "Axis 1 should be the number of sources, and axis 2 should be the signal." batch_size = batch_size_est nsource = nsource_est nsample = nsample_est estimation = estimation - torch.mean(estimation, 2, keepdim=True).expand_as(estimation) origin = origin - torch.mean(origin, 2, keepdim=True).expand_as(estimation) perm = list(set(permutations(np.arange(nsource)))) SDR = torch.zeros((batch_size, nsource, nsource)).type(estimation.type()) for i in range(nsource): for j in range(nsource): SDR[:,i,j] = calc_sdr_torch(estimation[:,i], origin[:,j], mask) SDR_max = [] SDR_perm = [] for permute in perm: sdr = [] for idx in range(len(permute)): sdr.append(SDR[:,idx,permute[idx]].view(batch_size,-1)) sdr = torch.sum(torch.cat(sdr, 1), 1) SDR_perm.append(sdr.view(batch_size, 1)) SDR_perm = torch.cat(SDR_perm, 1) SDR_max, _ = torch.max(SDR_perm, dim=1) return SDR_max / nsource
batch-wise SDR caculation for multiple audio files. estimation: (batch, nsource, nsample) origin: (batch, nsource, nsample) mask: optional, (batch, nsample), binary
https://github.com/pytorch/audio/blob/88ca1e05a3e5525d22cfd68c7320a003ad5c12e3/test/torchaudio_unittest/example/souce_sepration/sdr_reference.py#L52-L98
import numpy as np from itertools import permutations import torch def calc_sdr_torch(estimation, origin, mask=None): if mask is not None: origin = origin * mask estimation = estimation * mask origin_power = torch.pow(origin, 2).sum(1, keepdim=True) + 1e-8 scale = torch.sum(origin*estimation, 1, keepdim=True) / origin_power est_true = scale * origin est_res = estimation - est_true true_power = torch.pow(est_true, 2).sum(1) res_power = torch.pow(est_res, 2).sum(1) return 10*torch.log10(true_power) - 10*torch.log10(res_power)
BSD 2-Clause Simplified License
taverntesting/tavern
tavern/util/extfunctions.py
import_ext_function
python
def import_ext_function(entrypoint): logger = _getlogger() try: module, funcname = entrypoint.split(":") except ValueError as e: msg = "Expected entrypoint in the form module.submodule:function" logger.exception(msg) raise exceptions.InvalidExtFunctionError(msg) from e try: module = importlib.import_module(module) except ImportError as e: msg = "Error importing module {}".format(module) logger.exception(msg) raise exceptions.InvalidExtFunctionError(msg) from e try: function = getattr(module, funcname) except AttributeError as e: msg = "No function named {} in {}".format(funcname, module) logger.exception(msg) raise exceptions.InvalidExtFunctionError(msg) from e return function
Given a function name in the form of a setuptools entry point, try to dynamically load and return it Args: entrypoint (str): setuptools-style entrypoint in the form module.submodule:function Returns: function: function loaded from entrypoint Raises: InvalidExtFunctionError: If the module or function did not exist
https://github.com/taverntesting/tavern/blob/38234215f0f91d317721c23fdb5e5244c19c4229/tavern/util/extfunctions.py#L27-L64
import functools import importlib import logging from . import exceptions from .dict_util import deep_dict_merge def get_pykwalify_logger(module): return logging.getLogger(module) def _getlogger(): return get_pykwalify_logger("tavern.util.extfunctions")
MIT License
dalloriam/engel
engel/widgets/structure.py
Document.__init__
python
def __init__(self, id, view, classname=None, parent=None, **kwargs): super(Document, self).__init__(id, classname, parent, **kwargs) self.view = view
:param view: :class:`~.application.View` in which the document is declared.
https://github.com/dalloriam/engel/blob/ccd90004ac504dc3b42ca4f083706f3d5d4514b6/engel/widgets/structure.py#L12-L17
from .base import BaseContainer from .abstract import HeadLink class Document(BaseContainer): html_tag = "html"
MIT License
davexpro/pochunter
plugins/pocsuite/packages/requests/adapters.py
HTTPAdapter.send
python
def send(self, request, stream=False, timeout=None, verify=False, cert=None, proxies=None): conn = self.get_connection(request.url, proxies) self.cert_verify(conn, request.url, verify, cert) url = self.request_url(request, proxies) self.add_headers(request) chunked = not (request.body is None or 'Content-Length' in request.headers) timeout = TimeoutSauce(connect=timeout, read=timeout) try: if not chunked: resp = conn.urlopen( method=request.method, url=url, body=request.body, headers=request.headers, redirect=False, assert_same_host=False, preload_content=False, decode_content=False, retries=self.max_retries, timeout=timeout ) else: if hasattr(conn, 'proxy_pool'): conn = conn.proxy_pool low_conn = conn._get_conn(timeout=timeout) try: low_conn.putrequest(request.method, url, skip_accept_encoding=True) for header, value in request.headers.items(): low_conn.putheader(header, value) low_conn.endheaders() for i in request.body: low_conn.send(hex(len(i))[2:].encode('utf-8')) low_conn.send(b'\r\n') low_conn.send(i) low_conn.send(b'\r\n') low_conn.send(b'0\r\n\r\n') r = low_conn.getresponse() resp = HTTPResponse.from_httplib( r, pool=conn, connection=low_conn, preload_content=False, decode_content=False ) except: low_conn.close() raise else: conn._put_conn(low_conn) except socket.error as sockerr: raise ConnectionError(sockerr, request=request) except MaxRetryError as e: raise ConnectionError(e, request=request) except _ProxyError as e: raise ProxyError(e) except (_SSLError, _HTTPError) as e: if isinstance(e, _SSLError): raise SSLError(e, request=request) elif isinstance(e, TimeoutError): raise Timeout(e, request=request) else: raise return self.build_response(request, resp)
Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) The timeout on the request. :param verify: (optional) Whether to verify SSL certificates. :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request.
https://github.com/davexpro/pochunter/blob/6a0409600e8f454c7290604fe7d96bd376d5f919/plugins/pocsuite/packages/requests/adapters.py#L294-L388
import socket from .auth import _basic_auth_str from .compat import urlparse, basestring, urldefrag from .cookies import extract_cookies_to_jar from .exceptions import ConnectionError, Timeout, SSLError, ProxyError from .models import Response from .packages.urllib3.exceptions import HTTPError as _HTTPError from .packages.urllib3.exceptions import MaxRetryError from .packages.urllib3.exceptions import ProxyError as _ProxyError from .packages.urllib3.exceptions import SSLError as _SSLError from .packages.urllib3.exceptions import TimeoutError from .packages.urllib3.poolmanager import PoolManager, proxy_from_url from .packages.urllib3.response import HTTPResponse from .packages.urllib3.util import Timeout as TimeoutSauce from .structures import CaseInsensitiveDict from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers, prepend_scheme_if_needed, get_auth_from_url) DEFAULT_POOLBLOCK = False DEFAULT_POOLSIZE = 10 DEFAULT_RETRIES = 0 class BaseAdapter(object): def __init__(self): super(BaseAdapter, self).__init__() def send(self): raise NotImplementedError def close(self): raise NotImplementedError class HTTPAdapter(BaseAdapter): __attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize', '_pool_block'] def __init__(self, pool_connections=DEFAULT_POOLSIZE, pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES, pool_block=DEFAULT_POOLBLOCK): self.max_retries = max_retries self.config = {} self.proxy_manager = {} super(HTTPAdapter, self).__init__() self._pool_connections = pool_connections self._pool_maxsize = pool_maxsize self._pool_block = pool_block self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block) def __getstate__(self): return dict((attr, getattr(self, attr, None)) for attr in self.__attrs__) def __setstate__(self, state): self.proxy_manager = {} self.config = {} for attr, value in state.items(): setattr(self, attr, value) self.init_poolmanager(self._pool_connections, self._pool_maxsize, block=self._pool_block) def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK): self._pool_connections = connections self._pool_maxsize = maxsize self._pool_block = block self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, block=block) def cert_verify(self, conn, url, verify, cert): if url.lower().startswith('https') and verify: cert_loc = None if verify is not True: cert_loc = verify if not cert_loc: cert_loc = DEFAULT_CA_BUNDLE_PATH if not cert_loc: raise Exception("Could not find a suitable SSL CA certificate bundle.") conn.cert_reqs = 'CERT_REQUIRED' conn.ca_certs = cert_loc else: conn.cert_reqs = 'CERT_NONE' conn.ca_certs = None if cert: if not isinstance(cert, basestring): conn.cert_file = cert[0] conn.key_file = cert[1] else: conn.cert_file = cert def build_response(self, req, resp): response = Response() response.status_code = getattr(resp, 'status', None) response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {})) response.encoding = get_encoding_from_headers(response.headers) response.raw = resp response.reason = response.raw.reason if isinstance(req.url, bytes): response.url = req.url.decode('utf-8') else: response.url = req.url extract_cookies_to_jar(response.cookies, req, resp) response.request = req response.connection = self return response def get_connection(self, url, proxies=None): proxies = proxies or {} proxy = proxies.get(urlparse(url.lower()).scheme) if proxy: proxy = prepend_scheme_if_needed(proxy, 'http') proxy_headers = self.proxy_headers(proxy) if not proxy in self.proxy_manager: self.proxy_manager[proxy] = proxy_from_url( proxy, proxy_headers=proxy_headers, num_pools=self._pool_connections, maxsize=self._pool_maxsize, block=self._pool_block) conn = self.proxy_manager[proxy].connection_from_url(url) else: parsed = urlparse(url) url = parsed.geturl() conn = self.poolmanager.connection_from_url(url) return conn def close(self): self.poolmanager.clear() def request_url(self, request, proxies): proxies = proxies or {} scheme = urlparse(request.url).scheme proxy = proxies.get(scheme) if proxy and scheme != 'https': url, _ = urldefrag(request.url) else: url = request.path_url return url def add_headers(self, request, **kwargs): pass def proxy_headers(self, proxy): headers = {} username, password = get_auth_from_url(proxy) if username and password: headers['Proxy-Authorization'] = _basic_auth_str(username, password) return headers
MIT License
rethinkrobotics/intera_sdk
intera_interface/src/intera_motion_interface/motion_waypoint_options.py
MotionWaypointOptions.set_max_joint_speed_ratio
python
def set_max_joint_speed_ratio(self, speed_ratio = None): if speed_ratio is None: speed_ratio = self.default_speed_ratio speed_ratio = clamp_float_warn(0.05, speed_ratio, 1.0, 'speed_ratio') if speed_ratio is None: return self._data.max_joint_speed_ratio = speed_ratio
The waypoint max joint speed is set to a percentage of the physical max joint speed. Cartesian paths should be set to 1.0 @param speed_ratio: ratio [0.0, 1.0]
https://github.com/rethinkrobotics/intera_sdk/blob/6614dec1c5c2e7a74db1af6d01811d1332801785/intera_interface/src/intera_motion_interface/motion_waypoint_options.py#L115-L126
import rospy from intera_motion_msgs.msg import WaypointOptions from copy import deepcopy from .utility_functions import ( ensure_path_to_file_exists, clamp_float_warn ) from rospy_message_converter import message_converter import yaml class MotionWaypointOptions(object): default_speed_ratio = 0.7 default_joint_tolerance = 0.05 default_max_linear_speed = 0.6 default_max_linear_accel = 0.6 default_max_rot_speed = 1.57 default_max_rot_accel = 1.57 @staticmethod def get_accel_preset(accel_name): if accel_name == 'slow': return [1.5, 1.5, 3.0, 3.0, 3.0, 3.0, 3.0] if accel_name == 'medium': return [3.5, 2.5, 5.0, 5.0, 5.0, 5.0, 5.0] if accel_name == 'fast': return [7.0, 5.0, 8.0, 8.0, 8.0, 8.0, 8.0] if accel_name == 'express': return [10.0, 8.0, 10.0, 10.0, 12.0, 12.0, 12.0] rospy.logwarn('Did not recognize accel preset. Skipping.') return None @staticmethod def get_speed_ratio_preset(speed_name): if speed_name == 'slow': return 0.25 if speed_name == 'medium': return 0.6 if speed_name == 'fast': return 0.9 if speed_name == 'express': return 1.0 rospy.logwarn('Did not recognize speed ratio preset. Skipping.') return None def __init__(self, n_dim = 0, label = "default", max_joint_speed_ratio = None, joint_tolerances = None, max_joint_accel = None, max_linear_speed = None, max_linear_accel = None, max_rotational_speed = None, max_rotational_accel= None, corner_distance = 0.0): self._n_dim = n_dim self._data = WaypointOptions() self.set_max_joint_speed_ratio(max_joint_speed_ratio) self.set_joint_tolerances(joint_tolerances) self.set_max_linear_speed(max_linear_speed) self.set_max_linear_accel(max_linear_accel) self.set_max_rotational_speed(max_rotational_speed) self.set_max_rotational_accel(max_rotational_accel) self.set_max_joint_accel(max_joint_accel) self.set_label(label) self.set_corner_distance(corner_distance)
Apache License 2.0
huntrar/scrape
scrape/utils.py
re_filter
python
def re_filter(text, regexps): if not regexps: return text matched_text = [] compiled_regexps = [re.compile(x) for x in regexps] for line in text: if line in matched_text: continue for regexp in compiled_regexps: found = regexp.search(line) if found and found.group(): matched_text.append(line) return matched_text or text
Filter text using regular expressions.
https://github.com/huntrar/scrape/blob/411e5f04018fd6fc2316ea032431c75f96a75692/scrape/utils.py#L151-L167
from __future__ import print_function import glob import hashlib import os import random import re import shutil import string import sys import time import lxml.html as lh try: import pdfkit as pk except ImportError: pass import requests from requests.exceptions import MissingSchema from six import PY2 from six.moves import input, xrange as range from six.moves.urllib.parse import urlparse, urljoin from six.moves.urllib.request import getproxies import tldextract if PY2: from cgi import escape else: from html import escape USER_AGENTS = ( "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) " "Gecko/20100101 Firefox/11.0", "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) " "Gecko/20100 101 Firefox/22.0", "Mozilla/5.0 (Windows NT 6.1; rv:11.0) " "Gecko/20100101 Firefox/11.0", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) " "AppleWebKit/536.5 (KHTML, like Gecko) " "Chrome/19.0.1084.46 Safari/536.5", "Mozilla/5.0 (Windows; Windows NT 6.1) " "AppleWebKit/536.5 (KHTML, like Gecko) " "Chrome/19.0.1084.46 Safari/536.5", ) XDG_CACHE_DIR = os.environ.get( "XDG_CACHE_HOME", os.path.join(os.path.expanduser("~"), ".cache") ) CACHE_DIR = os.path.join(XDG_CACHE_DIR, "scrape") CACHE_FILE = os.path.join(CACHE_DIR, "cache{0}".format("" if PY2 else "3")) def get_proxies(): proxies = getproxies() filtered_proxies = {} for key, value in proxies.items(): if key.startswith("http://"): if not value.startswith("http://"): filtered_proxies[key] = "http://{0}".format(value) else: filtered_proxies[key] = value return filtered_proxies def get_resp(url): try: headers = {"User-Agent": random.choice(USER_AGENTS)} try: request = requests.get(url, headers=headers, proxies=get_proxies()) except MissingSchema: url = add_protocol(url) request = requests.get(url, headers=headers, proxies=get_proxies()) return lh.fromstring(request.text.encode("utf-8") if PY2 else request.text) except Exception: sys.stderr.write("Failed to retrieve {0}.\n".format(url)) raise def get_raw_resp(url): try: headers = {"User-Agent": random.choice(USER_AGENTS)} try: request = requests.get(url, headers=headers, proxies=get_proxies()) except MissingSchema: url = add_protocol(url) request = requests.get(url, headers=headers, proxies=get_proxies()) return request.text.encode("utf-8") if PY2 else request.text except Exception: sys.stderr.write("Failed to retrieve {0} as str.\n".format(url)) raise def enable_cache(): try: import requests_cache except ImportError as err: sys.stderr.write("Failed to enable cache: {0}\n".format(str(err))) return if not os.path.exists(CACHE_DIR): os.makedirs(CACHE_DIR) requests_cache.install_cache(CACHE_FILE) def clear_cache(): for cache in glob.glob("{0}*".format(CACHE_FILE)): os.remove(cache) def hash_text(text): md5 = hashlib.md5() md5.update(text.encode("utf-8")) return md5.hexdigest() def cache_page(page_cache, page_hash, cache_size): page_cache.append(page_hash) if len(page_cache) > cache_size: page_cache.pop(0)
MIT License
alirezamika/autoscraper
autoscraper/auto_scraper.py
AutoScraper.load
python
def load(self, file_path): with open(file_path, "r") as f: data = json.load(f) if isinstance(data, list): self.stack_list = data return self.stack_list = data["stack_list"]
De-serializes the JSON representation of the stack_list and loads it back. Parameters ---------- file_path: str Path of the JSON file to load stack_list from. Returns ------- None
https://github.com/alirezamika/autoscraper/blob/973ba6abed840d16907a556bc0192e2bf4806c6d/autoscraper/auto_scraper.py#L72-L94
import hashlib import json from collections import defaultdict from html import unescape from urllib.parse import urljoin, urlparse import requests from bs4 import BeautifulSoup from autoscraper.utils import ( FuzzyText, ResultItem, get_non_rec_text, get_random_str, normalize, text_match, unique_hashable, unique_stack_list, ) class AutoScraper(object): request_headers = { "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 \ (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36" } def __init__(self, stack_list=None): self.stack_list = stack_list or [] def save(self, file_path): data = dict(stack_list=self.stack_list) with open(file_path, "w") as f: json.dump(data, f)
MIT License
zalando-nakadi/bubuku
bubuku/features/rebalance/change.py
OptimizedRebalanceChange._remove_replica_copies
python
def _remove_replica_copies(self): for broker in self.broker_distribution.values(): for topic_partition in broker.list_replica_copies(): targets = [b for b in self._list_active_brokers_with_skip(broker.broker_id)] moved_to = broker.move_replica(topic_partition, [t for t in targets if t.has_free_replica_slots()]) if not moved_to: moved_to = broker.move_replica(topic_partition, targets) if not moved_to: raise Exception('Failed to move replica ' + str(topic_partition) + ', not enough replicas') self.action_queue.append((topic_partition, broker.broker_id, moved_to.broker_id))
During leadership rebalance it may happen that leader for replica was placed to broker that is already have this partition. Before starting real rebalance it is needed to move such partitions to different brokers, even if it will lead to some additional steps in rebalance.
https://github.com/zalando-nakadi/bubuku/blob/5738cc9309ed46e86fcad41b6fb580ddd69af8fd/bubuku/features/rebalance/change.py#L219-L233
import logging from bubuku.features.rebalance import BaseRebalanceChange from bubuku.features.rebalance.broker import BrokerDescription from bubuku.zookeeper import BukuExhibitor, RebalanceThrottleManager _LOG = logging.getLogger('bubuku.features.rebalance') def distribute(amount: int, items: list, weight_key): if not items: return [] items = sorted(items, key=weight_key, reverse=True) ceil_amount = int(amount / len(items)) amounts = [ceil_amount for _ in range(0, len(items))] for idx in range(0, min(amount - sum(amounts), len(items))): amounts[idx] += 1 return amounts class DistributionMap(object): __slots__ = [ '_candidates', '_candidates_cardinality' ] def __init__(self, brokers: iter): self._candidates = {} self._candidates_cardinality = {broker: broker.calculate_topic_cardinality() for broker in brokers} for source_broker in brokers: if not source_broker.have_extra_leaders(): continue for target_broker in brokers: if not target_broker.have_less_leaders(): continue if not source_broker._rack_id == target_broker._rack_id: continue weight_list = [] for topic in self._candidates_cardinality[source_broker].keys(): delta = self._candidates_cardinality[source_broker][topic] - self._candidates_cardinality[target_broker].get(topic, 0) weight_list.append((topic, delta)) if weight_list: self._candidates[(source_broker, target_broker)] = sorted( weight_list, key=lambda x: x[1], reverse=True) def take_move_pair(self) -> tuple: top_candidate = None for broker_pair, weight_list in self._candidates.items(): if not top_candidate: top_candidate = broker_pair else: if weight_list[0][1] > self._candidates[top_candidate][0][1]: top_candidate = broker_pair topic, _ = self._candidates[top_candidate][0] self._candidates_cardinality[top_candidate[0]][topic] -= 1 topic_exhausted = self._candidates_cardinality[top_candidate[0]][topic] == 0 if topic_exhausted: for broker_pair, weight_list in self._candidates.items(): if top_candidate == broker_pair: DistributionMap._rearrange_topic_weights(weight_list, topic, None) elif top_candidate[0] == broker_pair[0]: DistributionMap._rearrange_topic_weights(weight_list, topic, None) elif top_candidate[1] == broker_pair[1]: DistributionMap._rearrange_topic_weights(weight_list, topic, -1) else: for broker_pair, weight_list in self._candidates.items(): if top_candidate == broker_pair: DistributionMap._rearrange_topic_weights(weight_list, topic, -2) elif top_candidate[0] == broker_pair[0] or top_candidate[1] == broker_pair[1]: DistributionMap._rearrange_topic_weights(weight_list, topic, -1) return top_candidate[0], top_candidate[1], topic @staticmethod def _rearrange_topic_weights(array: list, topic: str, cardinality_change): old_cardinality = None for item in array: if item[0] == topic: old_cardinality = item[1] array.remove(item) break if old_cardinality is None: return if cardinality_change is None: return new_cardinality = old_cardinality + cardinality_change idx = 0 for i in range(len(array) - 1, -1, -1): if array[i][1] >= new_cardinality: idx = i + 1 break array.insert(idx, (topic, new_cardinality)) def cleanup(self): for bp in [bp for bp, wl in self._candidates.items() if not bp[0].have_extra_leaders() or not bp[1].have_less_leaders()]: del self._candidates[bp] class OptimizedRebalanceChange(BaseRebalanceChange): _LOAD_STATE = 'load_state' _COMPUTE_LEADERS = 'compute_leaders' _COMPUTE_REPLICAS = 'compute_replicas' _SORT_ACTIONS = 'sort_actions' _BALANCE = 'balance' def __init__(self, zk: BukuExhibitor, broker_ids: list, empty_brokers: list, exclude_topics: list, throttle: int = 100000000, parallelism: int = 1): self.zk = zk self.all_broker_ids = sorted(int(id_) for id_ in broker_ids) self.broker_ids = sorted(int(id_) for id_ in broker_ids if id_ not in empty_brokers) self.broker_racks = zk.get_broker_racks() self.exclude_topics = exclude_topics self.broker_distribution = None self.source_distribution = None self.action_queue = [] self.state = OptimizedRebalanceChange._LOAD_STATE self.parallelism = parallelism self.throttle_manager = RebalanceThrottleManager(self.zk, throttle) def __str__(self): return 'OptimizedRebalance state={}, queue_size={}, parallelism={}'.format( self.state, len(self.action_queue) if self.action_queue is not None else None, self.parallelism) def run(self, current_actions) -> bool: if self.should_be_paused(current_actions): _LOG.warning("Rebalance paused, because other blocking events running: {}".format(current_actions)) return True if self.zk.is_rebalancing(): return True new_broker_ids = sorted(int(id_) for id_ in self.zk.get_broker_ids()) if new_broker_ids != self.all_broker_ids: _LOG.warning("Rebalance stopped because of broker list change from {} to {}".format( self.all_broker_ids, new_broker_ids)) return False if self.state == OptimizedRebalanceChange._LOAD_STATE: self._load_data() self.state = OptimizedRebalanceChange._COMPUTE_LEADERS elif self.state == OptimizedRebalanceChange._COMPUTE_LEADERS: self._rebalance_leaders() self.state = OptimizedRebalanceChange._COMPUTE_REPLICAS elif self.state == OptimizedRebalanceChange._COMPUTE_REPLICAS: self._rebalance_replicas() self.state = OptimizedRebalanceChange._SORT_ACTIONS elif self.state == OptimizedRebalanceChange._SORT_ACTIONS: self.action_queue = self._sort_actions() self.state = OptimizedRebalanceChange._BALANCE elif self.state == OptimizedRebalanceChange._BALANCE: return not self._balance() return True def _balance(self): items = [] self.throttle_manager.remove_old_throttle_configurations() while self.action_queue and len(items) < self.parallelism: items.append(self.action_queue.popitem()) if not items: return True data_to_rebalance = [(key[0], key[1], replicas) for key, replicas in items] self.throttle_manager.apply_throttle(data_to_rebalance) if not self.zk.reallocate_partitions(data_to_rebalance): for key, replicas in items: self.action_queue[key] = replicas return False def _rebalance_replicas(self): self._remove_replica_copies() if not self._rebalance_replicas_template(False): self._rebalance_replicas_template(True) if not self._rebalance_replicas_template(False): _LOG.error('Failed to rebalance replicas. Probably because of replication factor problems. ' 'Will just stop the process') raise Exception('Failed to perform replica rebalance {}, {}, {}'.format( self.broker_distribution, self.broker_ids, self.action_queue)) def _sort_actions(self): result = {} for topic_partition, source_broker_id, target_broker_id in self.action_queue: if topic_partition not in result: result[topic_partition] = list(self.source_distribution[topic_partition]) tmp_result = result[topic_partition] for i in range(len(tmp_result) - 1, -1, -1): if tmp_result[i] == source_broker_id: tmp_result[i] = target_broker_id break return result def _list_active_brokers_with_skip(self, skip_id: int) -> list: return [b for b in self.broker_distribution.values() if b.broker_id != skip_id and b.broker_id in self.broker_ids]
MIT License
rucio/rucio
lib/rucio/daemons/auditor/srmdumps.py
generate_url
python
def generate_url(rse, config): site = rse.split('_')[0] if site not in config.sections(): base_url = ddmendpoint_url(rse) + 'dumps' url_pattern = 'dump_%Y%m%d' else: url_components = config.get(site, rse).split('/') pattern_index = next(idx for idx, comp in enumerate(url_components) if '%m' in comp) base_url = '/'.join(url_components[:pattern_index]) url_pattern = '/'.join(url_components[pattern_index:]) return base_url, url_pattern
:param rse: Name of the endpoint. :param config: RawConfigParser instance which may have configuration related to the endpoint. :returns: Tuple with the URL where the links can be queried to find new dumps and the pattern used to parse the date of the dump of the files/directories listed..
https://github.com/rucio/rucio/blob/6a6092798bb8220dec07328d0e3f7f42d1b931cd/lib/rucio/daemons/auditor/srmdumps.py#L279-L299
from rucio.common.config import get_config_dirs from rucio.common.dumper import DUMPS_CACHE_DIR from rucio.common.dumper import http_download_to_file, gfal_download_to_file, ddmendpoint_url, temp_file try: import ConfigParser except ImportError: import configparser as ConfigParser try: import HTMLParser except ImportError: import html.parser as HTMLParser import datetime import glob import hashlib import logging import operator import os import re import requests import gfal2 CHUNK_SIZE = 10485760 __DUMPERCONFIGDIRS = (os.path.join(confdir, 'auditor') for confdir in get_config_dirs()) __DUMPERCONFIGDIRS = list(filter(os.path.exists, __DUMPERCONFIGDIRS)) class Parser(ConfigParser.RawConfigParser, object): remove_quotes_re = re.compile(r"^'(.+)'$") remove_double_quotes_re = re.compile(r'^"(.+)"$') def optionxform(self, optionstr): return optionstr def get(self, section, option): value = super(Parser, self).get(section, option) if isinstance(value, str): value = self.remove_quotes_re.sub(r'\1', value) value = self.remove_double_quotes_re.sub(r'\1', value) return value def items(self, section): return [(name, self.get(section, name)) for name in self.options(section)] def mkdir(dir_): try: os.mkdir(dir_) except OSError as e: assert e.errno == 17 def get_newest(base_url, url_pattern, links): logger = logging.getLogger('auditor.srmdumps') times = [] pattern_components = url_pattern.split('/') date_pattern = '{0}/{1}'.format(base_url, pattern_components[0]) if len(pattern_components) > 1: postfix = '/' + '/'.join(pattern_components[1:]) else: postfix = '' for link in links: try: time = datetime.datetime.strptime(link, date_pattern) except ValueError: pass else: times.append((str(link) + postfix, time)) if not times: msg = 'No links found matching the pattern {0} in {1}'.format(date_pattern, links) logger.error(msg) raise RuntimeError(msg) return max(times, key=operator.itemgetter(1)) def gfal_links(base_url): ctxt = gfal2.creat_context() return ['/'.join((base_url, f)) for f in ctxt.listdir(str(base_url))] class _LinkCollector(HTMLParser.HTMLParser, object): def __init__(self): super(_LinkCollector, self).__init__() self.links = [] def handle_starttag(self, tag, attrs): if tag == 'a': self.links.append( next(value for key, value in attrs if key == 'href') ) def http_links(base_url): html = requests.get(base_url).text link_collector = _LinkCollector() link_collector.feed(html) links = [] for link in link_collector.links: if not link.startswith('http://') and not link.startswith('https://'): links.append('{0}/{1}'.format(base_url, link)) else: links.append(link) return links protocol_funcs = { 'davs': { 'links': gfal_links, 'download': gfal_download_to_file, }, 'gsiftp': { 'links': gfal_links, 'download': gfal_download_to_file, }, 'root': { 'links': gfal_links, 'download': gfal_download_to_file, }, 'srm': { 'links': gfal_links, 'download': gfal_download_to_file, }, 'http': { 'links': http_links, 'download': http_download_to_file, }, 'https': { 'links': http_links, 'download': http_download_to_file, }, } def protocol(url): proto = url.split('://')[0] if proto not in protocol_funcs: raise RuntimeError('Protocol {0} not supported'.format(proto)) return proto def get_links(base_url): return protocol_funcs[protocol(base_url)]['links'](base_url) def download(url, filename): return protocol_funcs[protocol(url)]['download'](url, filename) def parse_configuration(conf_dirs=__DUMPERCONFIGDIRS): logger = logging.getLogger('auditor.srmdumps') if len(conf_dirs) == 0: logger.error('No configuration directory given to load SRM dumps paths') raise Exception('No configuration directory given to load SRM dumps paths') configuration = Parser({ 'disabled': False, }) for conf_dir in conf_dirs: configuration.read(glob.glob(conf_dir + '/*.cfg')) return configuration def download_rse_dump(rse, configuration, date='latest', destdir=DUMPS_CACHE_DIR): logger = logging.getLogger('auditor.srmdumps') base_url, url_pattern = generate_url(rse, configuration) if date == 'latest': logger.debug('Looking for site dumps in: "%s"', base_url) links = get_links(base_url) url, date = get_newest(base_url, url_pattern, links) else: url = '{0}/{1}'.format(base_url, date.strftime(url_pattern)) if not os.path.isdir(destdir): os.mkdir(destdir) filename = '{0}_{1}_{2}_{3}'.format( 'ddmendpoint', rse, date.strftime('%d-%m-%Y'), hashlib.sha1(url.encode()).hexdigest() ) filename = re.sub(r'\W', '-', filename) path = os.path.join(destdir, filename) if not os.path.exists(path): logger.debug('Trying to download: "%s"', url) with temp_file(destdir, final_name=filename) as (f, _): download(url, f) return (path, date)
Apache License 2.0
drexly/openhgsenti
lib/django/contrib/gis/gdal/prototypes/errcheck.py
check_envelope
python
def check_envelope(result, func, cargs, offset=-1): env = ptr_byref(cargs, offset) return env
Checks a function that returns an OGR Envelope by reference.
https://github.com/drexly/openhgsenti/blob/d7806f58c81127d32091d9875a99ac13aef94a8a/lib/django/contrib/gis/gdal/prototypes/errcheck.py#L71-L74
from ctypes import c_void_p, string_at from django.contrib.gis.gdal.error import ( GDALException, SRSException, check_err, ) from django.contrib.gis.gdal.libgdal import lgdal from django.utils import six def arg_byref(args, offset=-1): return args[offset]._obj.value def ptr_byref(args, offset=-1): return args[offset]._obj def check_const_string(result, func, cargs, offset=None, cpl=False): if offset: check_err(result, cpl=cpl) ptr = ptr_byref(cargs, offset) return ptr.value else: return result def check_string(result, func, cargs, offset=-1, str_result=False): if str_result: ptr = result if not ptr: s = None else: s = string_at(result) else: check_err(result) ptr = ptr_byref(cargs, offset) s = ptr.value if ptr: lgdal.VSIFree(ptr) return s
Apache License 2.0
learnables/learn2learn
tests/unit/utils_test.py
ref_clone_module
python
def ref_clone_module(module): clone = copy.deepcopy(module) if hasattr(clone, '_parameters'): for param_key in module._parameters: if module._parameters[param_key] is not None: cloned = module._parameters[param_key].clone() clone._parameters[param_key] = cloned if hasattr(clone, '_buffers'): for buffer_key in module._buffers: if clone._buffers[buffer_key] is not None and clone._buffers[buffer_key].requires_grad: clone._buffers[buffer_key] = module._buffers[buffer_key].clone() if hasattr(clone, '_modules'): for module_key in clone._modules: clone._modules[module_key] = ref_clone_module(module._modules[module_key]) return clone
Note: This implementation does not work for RNNs. It requires calling learner.rnn._apply(lambda x: x) before each forward call. See this issue for more details: https://github.com/learnables/learn2learn/issues/139 Note: This implementation also does not work for Modules that re-use parameters from another Module. See this issue for more details: https://github.com/learnables/learn2learn/issues/174
https://github.com/learnables/learn2learn/blob/e0705a22953f06e609f7e2c4157bcab3bb2ed53b/tests/unit/utils_test.py#L11-L45
import unittest import copy import torch import learn2learn as l2l EPSILON = 1e-8
MIT License
hass-emulated-hue/core
emulated_hue/__init__.py
HueEmulator.hass
python
def hass(self) -> Optional[HomeAssistantClient]: return self._hass
Return the Home Assistant instance.
https://github.com/hass-emulated-hue/core/blob/7dbc4dfe6a39f67695e888179c8e02e7bf005570/emulated_hue/__init__.py#L42-L44
import asyncio import logging from typing import Optional from hass_client import HomeAssistantClient from .api import HueApi from .config import Config from .discovery import async_setup_discovery LOGGER = logging.getLogger(__name__) class HueEmulator: def __init__( self, data_path: str, hass_url: str, hass_token: str, http_port: int, https_port: int, use_default_ports: bool, ) -> None: self._loop = None self._config = Config(self, data_path, http_port, https_port, use_default_ports) self._hass: Optional[HomeAssistantClient] = None self._hass_url = hass_url self._hass_token = hass_token self._api = HueApi(self) @property def config(self) -> Config: return self._config @property
Apache License 2.0
jerrylingjiemei/adept-dataset-release
phys_sim/objects.py
ObjectManager.add_occluder
python
def add_occluder(self, shape="cube", joint="revolute", mass=1, init_pos=(0, 0, 0), init_orn=(0, 0, 0), scale=(.2, 4., 2.), joint_pattern=None, **kwargs): obj_path = os.path.join(self.obj_dir, "shapes", '%s.obj' % shape) init_orn_quat = p.getQuaternionFromEuler(deg2rad(init_orn)) col_id = p.createCollisionShape(p.GEOM_MESH, fileName=obj_path, meshScale=scale, collisionFramePosition=(-scale[0], 0, scale[2])) self.occluder_info["linkMasses"].append(mass) self.occluder_info["linkCollisionShapeIndices"].append(col_id) self.occluder_info["linkVisualShapeIndices"].append(col_id) self.occluder_info["linkPositions"].append(init_pos) self.occluder_info["linkOrientations"].append(init_orn_quat) self.occluder_info["linkInertialFramePositions"].append((-scale[0], 0, scale[2])) self.occluder_info["linkInertialFrameOrientations"].append((0, 0, 0, 1)) self.occluder_info["linkParentIndices"].append(0) self.occluder_info["linkJointAxis"].append((0, 1, 0)) if joint == "revolute": self.occluder_info["linkJointTypes"].append(p.JOINT_REVOLUTE) if joint_pattern is None: self.joint_patterns.append(np.zeros(self.num_steps)) else: self.joint_patterns.append(convert_rot_patterns(joint_pattern)) elif joint == "prismatic": self.occluder_info["linkJointTypes"].append(p.JOINT_PRISMATIC) if joint_pattern is None: self.joint_patterns.append(np.zeros(self.num_steps)) else: self.joint_patterns.append(convert_trans_patterns(joint_pattern)) else: raise NotImplementedError("Joint type not supported") self.num_link += 1
Add an occluder with physical properties
https://github.com/jerrylingjiemei/adept-dataset-release/blob/900d9d5c7e780a5daa06f0484d91539a4ca92ff8/phys_sim/objects.py#L82-L112
import pybullet as p import re import os from phys_sim.convert_pattern import * from utils.constants import OCCLUDER_HALF_WIDTH from utils.shape_net import SHAPE_DIMENSIONS class ObjectManager(object): def __init__(self, config, obj_dir, num_steps): self.obj_dir = obj_dir self.config = config self.num_steps = num_steps self.plane_id, self.plane_visual_id = self.add_plane() self.object_ids = [] self.desk_ids = [] self.disappear_time = [] self.appear_time = [] self.init_positions = [] for obj_params in self.config["objects"]: self.object_ids.append(self.add_object(**obj_params)) self.num_link = 0 self.joint_patterns = [] self.occluder_info = self.add_occluders_start() if "occluders" in self.config: for occluder_params in self.config["occluders"]: self.add_occluder(**occluder_params) if "desks" in self.config: for desk_params in self.config["desks"]: self.add_desk(**desk_params) self.ground_id = self.add_occluders_end() def add_plane(self): plane_id = p.createCollisionShape(p.GEOM_MESH, fileName="plane.obj", meshScale=[100, 100, 100]) plane_visual_id = p.createVisualShape(p.GEOM_MESH, fileName="plane.obj", rgbaColor=(1, 1, 1, 1)) return plane_id, plane_visual_id def add_object(self, shape, mass=1, init_pos=(0, 0, 1), init_orn=(0, 0, 0), scale=(1, 1, 1), init_v=(0, 0, 0), lat_fric=0., restitution=.9, lin_damp=0, angular_damp=0, disappear_time=100000, appear_time=0, **kwargs): scale = [x * y for x, y in zip(scale, SHAPE_DIMENSIONS[shape])] shape = "cube" obj_path = os.path.join(self.obj_dir, "shapes", '%s.obj' % shape) init_orn_quat = p.getQuaternionFromEuler(deg2rad(init_orn)) col_id = p.createCollisionShape(p.GEOM_MESH, fileName=obj_path, meshScale=scale) obj_id = p.createMultiBody(mass, col_id, basePosition=init_pos, baseOrientation=init_orn_quat) p.resetBaseVelocity(obj_id, linearVelocity=init_v) p.changeDynamics(obj_id, -1, lateralFriction=lat_fric, restitution=restitution, linearDamping=lin_damp, angularDamping=angular_damp) self.init_positions.append(init_pos) self.disappear_time.append(disappear_time) self.appear_time.append(appear_time) return obj_id def add_occluders_start(self): occluders_info = dict(baseCollisionShapeIndex=self.plane_id, baseVisualShapeIndex=self.plane_visual_id, basePosition=(0, 0, 0), linkMasses=[], linkCollisionShapeIndices=[], linkVisualShapeIndices=[], linkPositions=[], linkOrientations=[], linkInertialFramePositions=[], linkInertialFrameOrientations=[], linkParentIndices=[], linkJointTypes=[], linkJointAxis=[]) return occluders_info
MIT License
adw0rd/instagrapi
instagrapi/mixins/hashtag.py
HashtagMixin.hashtag_info_gql
python
def hashtag_info_gql( self, name: str, amount: int = 12, end_cursor: str = None ) -> Hashtag: variables = {"tag_name": name, "show_ranked": False, "first": int(amount)} if end_cursor: variables["after"] = end_cursor data = self.public_graphql_request( variables, query_hash="f92f56d47dc7a55b606908374b43a314" ) if not data.get("hashtag"): raise HashtagNotFound(name=name, **data) return extract_hashtag_gql(data["hashtag"])
Get information about a hashtag by Public Graphql API Parameters ---------- name: str Name of the hashtag amount: int, optional Maximum number of media to return, default is 12 end_cursor: str, optional End Cursor, default value is None Returns ------- Hashtag An object of Hashtag
https://github.com/adw0rd/instagrapi/blob/759533a285747105c25858e738f1b3bc1cef6953/instagrapi/mixins/hashtag.py#L42-L72
from typing import List, Tuple from instagrapi.exceptions import ClientError, HashtagNotFound from instagrapi.extractors import ( extract_hashtag_gql, extract_hashtag_v1, extract_media_gql, extract_media_v1, ) from instagrapi.types import Hashtag, Media from instagrapi.utils import dumps class HashtagMixin: def hashtag_info_a1(self, name: str, max_id: str = None) -> Hashtag: params = {"max_id": max_id} if max_id else None data = self.public_a1_request(f"/explore/tags/{name}/", params=params) if not data.get("hashtag"): raise HashtagNotFound(name=name, **data) return extract_hashtag_gql(data["hashtag"])
MIT License
webrecorder/warcio
warcio/timeutils.py
iso_date_to_datetime
python
def iso_date_to_datetime(string): nums = DATE_TIMESPLIT.split(string) if nums[-1] == '': nums = nums[:-1] if len(nums) == 7: nums[6] = nums[6][:6] nums[6] += PAD_MICRO[len(nums[6]):] the_datetime = datetime.datetime(*(int(num) for num in nums)) return the_datetime
>>> iso_date_to_datetime('2013-12-26T10:11:12Z') datetime.datetime(2013, 12, 26, 10, 11, 12) >>> iso_date_to_datetime('2013-12-26T10:11:12.456789Z') datetime.datetime(2013, 12, 26, 10, 11, 12, 456789) >>> iso_date_to_datetime('2013-12-26T10:11:12.30Z') datetime.datetime(2013, 12, 26, 10, 11, 12, 300000) >>> iso_date_to_datetime('2013-12-26T10:11:12.00001Z') datetime.datetime(2013, 12, 26, 10, 11, 12, 10) >>> iso_date_to_datetime('2013-12-26T10:11:12.000001Z') datetime.datetime(2013, 12, 26, 10, 11, 12, 1) >>> iso_date_to_datetime('2013-12-26T10:11:12.0000001Z') datetime.datetime(2013, 12, 26, 10, 11, 12) >>> iso_date_to_datetime('2013-12-26T10:11:12.000000Z') datetime.datetime(2013, 12, 26, 10, 11, 12)
https://github.com/webrecorder/warcio/blob/aa702cb321621b233c6e5d2a4780151282a778be/warcio/timeutils.py#L28-L61
import re import time import datetime import calendar from email.utils import parsedate, formatdate DATE_TIMESPLIT = re.compile(r'[^\d]') TIMESTAMP_14 = '%Y%m%d%H%M%S' ISO_DT = '%Y-%m-%dT%H:%M:%SZ' PAD_14_DOWN = '10000101000000' PAD_14_UP = '29991231235959' PAD_6_UP = '299912' PAD_MICRO = '000000'
Apache License 2.0
pycord-development/pycord
discord/ui/button.py
Button.url
python
def url(self) -> Optional[str]: return self._underlying.url
Optional[:class:`str`]: The URL this button sends you to.
https://github.com/pycord-development/pycord/blob/2d747f8fc7d452d54dd0fc2b7e0cc2eaa322b0b1/discord/ui/button.py#L154-L156
from __future__ import annotations from typing import Callable, Optional, TYPE_CHECKING, Tuple, Type, TypeVar, Union import inspect import os from .item import Item, ItemCallbackType from ..enums import ButtonStyle, ComponentType from ..partial_emoji import PartialEmoji, _EmojiTag from ..components import Button as ButtonComponent __all__ = ( 'Button', 'button', ) if TYPE_CHECKING: from .view import View from ..emoji import Emoji B = TypeVar('B', bound='Button') V = TypeVar('V', bound='View', covariant=True) class Button(Item[V]): __item_repr_attributes__: Tuple[str, ...] = ( 'style', 'url', 'disabled', 'label', 'emoji', 'row', ) def __init__( self, *, style: ButtonStyle = ButtonStyle.secondary, label: Optional[str] = None, disabled: bool = False, custom_id: Optional[str] = None, url: Optional[str] = None, emoji: Optional[Union[str, Emoji, PartialEmoji]] = None, row: Optional[int] = None, ): super().__init__() if custom_id is not None and url is not None: raise TypeError('cannot mix both url and custom_id with Button') self._provided_custom_id = custom_id is not None if url is None and custom_id is None: custom_id = os.urandom(16).hex() if url is not None: style = ButtonStyle.link if emoji is not None: if isinstance(emoji, str): emoji = PartialEmoji.from_str(emoji) elif isinstance(emoji, _EmojiTag): emoji = emoji._to_partial() else: raise TypeError(f'expected emoji to be str, Emoji, or PartialEmoji not {emoji.__class__}') self._underlying = ButtonComponent._raw_construct( type=ComponentType.button, custom_id=custom_id, url=url, disabled=disabled, label=label, style=style, emoji=emoji, ) self.row = row @property def style(self) -> ButtonStyle: return self._underlying.style @style.setter def style(self, value: ButtonStyle): self._underlying.style = value @property def custom_id(self) -> Optional[str]: return self._underlying.custom_id @custom_id.setter def custom_id(self, value: Optional[str]): if value is not None and not isinstance(value, str): raise TypeError('custom_id must be None or str') self._underlying.custom_id = value @property
MIT License
chalasr/flask-p2p
venv/lib/python2.7/site-packages/gunicorn/app/pasterapp.py
paste_server
python
def paste_server(app, gcfg=None, host="127.0.0.1", port=None, *args, **kwargs): util.warn("""This command is deprecated. You should now use the `--paste` option. Ex.: gunicorn --paste development.ini """) from gunicorn.app.pasterapp import PasterServerApplication PasterServerApplication(app, gcfg=gcfg, host=host, port=port, *args, **kwargs).run()
\ A paster server. Then entry point in your paster ini file should looks like this: [server:main] use = egg:gunicorn#main host = 127.0.0.1 port = 5000
https://github.com/chalasr/flask-p2p/blob/eb9bf3997a82f837beb957721e4b6d7973018a1f/venv/lib/python2.7/site-packages/gunicorn/app/pasterapp.py#L191-L212
from __future__ import print_function import os import pkg_resources import sys try: import configparser as ConfigParser except ImportError: import ConfigParser from paste.deploy import loadapp, loadwsgi SERVER = loadwsgi.SERVER from gunicorn.app.base import Application from gunicorn.config import Config, get_default_config_file from gunicorn import util def _has_logging_config(paste_file): cfg_parser = ConfigParser.ConfigParser() cfg_parser.read([paste_file]) return cfg_parser.has_section('loggers') def paste_config(gconfig, config_url, relative_to, global_conf=None): sys.path.insert(0, relative_to) pkg_resources.working_set.add_entry(relative_to) config_url = config_url.split('#')[0] cx = loadwsgi.loadcontext(SERVER, config_url, relative_to=relative_to, global_conf=global_conf) gc, lc = cx.global_conf.copy(), cx.local_conf.copy() cfg = {} host, port = lc.pop('host', ''), lc.pop('port', '') if host and port: cfg['bind'] = '%s:%s' % (host, port) elif host: cfg['bind'] = host.split(',') cfg['workers'] = int(lc.get('workers', 1)) cfg['umask'] = int(lc.get('umask', 0)) cfg['default_proc_name'] = gc.get('__file__') config_file = config_url.split(':')[1] if _has_logging_config(config_file): cfg.setdefault('logconfig', config_file) for k, v in gc.items(): if k not in gconfig.settings: continue cfg[k] = v for k, v in lc.items(): if k not in gconfig.settings: continue cfg[k] = v return cfg def load_pasteapp(config_url, relative_to, global_conf=None): return loadapp(config_url, relative_to=relative_to, global_conf=global_conf) class PasterBaseApplication(Application): gcfg = None def app_config(self): return paste_config(self.cfg, self.cfgurl, self.relpath, global_conf=self.gcfg) def load_config(self): super(PasterBaseApplication, self).load_config() if hasattr(self, "cfgfname"): parser = ConfigParser.ConfigParser() parser.read([self.cfgfname]) if parser.has_section('loggers'): from logging.config import fileConfig config_file = os.path.abspath(self.cfgfname) fileConfig(config_file, dict(__file__=config_file, here=os.path.dirname(config_file))) class PasterApplication(PasterBaseApplication): def init(self, parser, opts, args): if len(args) != 1: parser.error("No application name specified.") cwd = util.getcwd() cfgfname = os.path.normpath(os.path.join(cwd, args[0])) cfgfname = os.path.abspath(cfgfname) if not os.path.exists(cfgfname): parser.error("Config file not found: %s" % cfgfname) self.cfgurl = 'config:%s' % cfgfname self.relpath = os.path.dirname(cfgfname) self.cfgfname = cfgfname sys.path.insert(0, self.relpath) pkg_resources.working_set.add_entry(self.relpath) return self.app_config() def load(self): os.chdir(self.cfg.chdir) return load_pasteapp(self.cfgurl, self.relpath, global_conf=self.gcfg) class PasterServerApplication(PasterBaseApplication): def __init__(self, app, gcfg=None, host="127.0.0.1", port=None, *args, **kwargs): self.cfg = Config() self.gcfg = gcfg self.app = app self.callable = None gcfg = gcfg or {} cfgfname = gcfg.get("__file__") if cfgfname is not None: self.cfgurl = 'config:%s' % cfgfname self.relpath = os.path.dirname(cfgfname) self.cfgfname = cfgfname cfg = kwargs.copy() if port and not host.startswith("unix:"): bind = "%s:%s" % (host, port) else: bind = host cfg["bind"] = bind.split(',') if gcfg: for k, v in gcfg.items(): cfg[k] = v cfg["default_proc_name"] = cfg['__file__'] try: for k, v in cfg.items(): if k.lower() in self.cfg.settings and v is not None: self.cfg.set(k.lower(), v) except Exception as e: print("\nConfig error: %s" % str(e), file=sys.stderr) sys.stderr.flush() sys.exit(1) if cfg.get("config"): self.load_config_from_file(cfg["config"]) else: default_config = get_default_config_file() if default_config is not None: self.load_config_from_file(default_config) def load(self): os.chdir(self.cfg.chdir) return self.app def run(): util.warn("""This command is deprecated. You should now use the `--paste` option. Ex.: gunicorn --paste development.ini """) from gunicorn.app.pasterapp import PasterApplication PasterApplication("%(prog)s [OPTIONS] pasteconfig.ini").run()
MIT License
nvbn/thefuck
thefuck/shells/tcsh.py
Tcsh._get_version
python
def _get_version(self): proc = Popen(['tcsh', '--version'], stdout=PIPE, stderr=DEVNULL) return proc.stdout.read().decode('utf-8').split()[1]
Returns the version of the current shell
https://github.com/nvbn/thefuck/blob/c719712b6256f4add4e65e8d4369b36d73342b48/thefuck/shells/tcsh.py#L41-L44
from subprocess import Popen, PIPE from time import time import os from ..utils import DEVNULL, memoize from .generic import Generic class Tcsh(Generic): friendly_name = 'Tcsh' def app_alias(self, alias_name): return ("alias {0} 'setenv TF_SHELL tcsh && setenv TF_ALIAS {0} && " "set fucked_cmd=`history -h 2 | head -n 1` && " "eval `thefuck ${{fucked_cmd}}`'").format(alias_name) def _parse_alias(self, alias): name, value = alias.split("\t", 1) return name, value @memoize def get_aliases(self): proc = Popen(['tcsh', '-ic', 'alias'], stdout=PIPE, stderr=DEVNULL) return dict( self._parse_alias(alias) for alias in proc.stdout.read().decode('utf-8').split('\n') if alias and '\t' in alias) def _get_history_file_name(self): return os.environ.get("HISTFILE", os.path.expanduser('~/.history')) def _get_history_line(self, command_script): return u'#+{}\n{}\n'.format(int(time()), command_script) def how_to_configure(self): return self._create_shell_configuration( content=u'eval `thefuck --alias`', path='~/.tcshrc', reload='tcsh')
MIT License
costapt/vess2ret
test.py
join_and_create_dir
python
def join_and_create_dir(*paths): path = os.path.join(*paths) mkdir(path) return path
Join the paths provided as arguments, create the directory and return the path.
https://github.com/costapt/vess2ret/blob/5702175bcd9ecde34d4fedab45a7cd2878a0184c/test.py#L32-L37
import os import sys import getopt import numpy as np import models as m import matplotlib.pyplot as plt import util.util as u from util.data import TwoImageIterator from util.util import MyDict, load_params, load_weights_of, compose_imgs, convert_to_rgb, mkdir, get_log_dir def print_help(): print "Usage:" print "test.py [--help] [--results_dir] [--log_dir] [--base_dir] [--train_dir] [--val_dir] " "[--test_dir] [--load_to_memory] [--expt_name] [--target_size] [--N]" print "--results_dir: Directory where to save the results." print "--log_dir': Directory where the experiment was logged." print "--base_dir: Directory that contains the data." print "--train_dir: Directory inside base_dir that contains training data." print "--val_dir: Directory inside base_dir that contains validation data." print "--test_dir: Directory inside base_dir that contains test data." print "--load_to_memory: Whether to load the images into memory." print "--expt_name: The name of the experiment to test." print "--target_size: The size of the images loaded by the iterator." print "--N: The number of samples to generate."
MIT License
dials/dials
algorithms/scaling/scaler_factory.py
NullScalerFactory.create
python
def create(cls, params, experiment, reflection_table): logger.info("Preprocessing target dataset for scaling. \n") reflection_table = cls.filter_bad_reflections(reflection_table) variance_mask = reflection_table["variance"] <= 0.0 reflection_table.set_flags( variance_mask, reflection_table.flags.excluded_for_scaling ) logger.info( "%s reflections not suitable for scaling\n", reflection_table.get_flags( reflection_table.flags.excluded_for_scaling ).count(True), ) cls.ensure_experiment_identifier(experiment, reflection_table) return NullScaler(params, experiment, reflection_table)
Return Null Scaler.
https://github.com/dials/dials/blob/a2cb71bf410e179b92554bcce2e21388e1dc25d1/algorithms/scaling/scaler_factory.py#L206-L222
import logging from libtbx import Auto from dials.algorithms.scaling.scaler import ( MultiScaler, NullScaler, SingleScaler, TargetScaler, ) from dials.algorithms.scaling.scaling_library import choose_initial_scaling_intensities from dials.algorithms.scaling.scaling_utilities import ( BadDatasetForScalingException, Reasons, align_axis_along_z, calc_crystal_frame_vectors, quasi_normalisation, ) from dials.array_family import flex from dials.util.filter_reflections import ( filter_reflection_table_selection, sum_partial_reflections, ) logger = logging.getLogger("dials") def create_scaler(params, experiments, reflections): if not reflections: raise ValueError("No reflection tables provided as input") if len(reflections) == 1: scaler = SingleScalerFactory.create(params, experiments[0], reflections[0]) else: is_scaled_list = [expt.scaling_model.is_scaled for expt in experiments] if params.scaling_options.target_mtz or params.scaling_options.target_model: scaler = TargetScalerFactory.create_for_target_against_reference( params, experiments, reflections ) elif ( params.scaling_options.target_cycle and len(set(is_scaled_list)) == 2 ): scaler = TargetScalerFactory.create(params, experiments, reflections) else: scaler = MultiScalerFactory.create(params, experiments, reflections) return scaler class ScalerFactory: @staticmethod def filter_bad_reflections( reflections, partiality_cutoff=0.4, min_isigi=-5.0, intensity_choice="combine" ): logger.info( "Applying filter of min_isigi > %s, partiality > %s", min_isigi, partiality_cutoff, ) logger.disabled = True if intensity_choice == "combine": if "intensity.sum.value" not in reflections: if "intensity.prf.value" not in reflections: intensity_choice = None else: intensity_choice = ["profile"] elif "intensity.prf.value" not in reflections: intensity_choice = ["sum"] else: intensity_choice = ["sum | profile"] else: intensity_choice = [intensity_choice] if intensity_choice: good = filter_reflection_table_selection( reflections, intensity_choice=intensity_choice, combine_partials=False, partiality_threshold=partiality_cutoff, min_isigi=min_isigi, ) mask = ~good reflections.set_flags(mask, reflections.flags.excluded_for_scaling) logger.disabled = False return reflections @staticmethod def ensure_experiment_identifier(experiment, reflection_table): id_vals = list(reflection_table.experiment_identifiers().values()) assert experiment.identifier in id_vals, (experiment.identifier, list(id_vals)) assert len(id_vals) == 1, list(id_vals) logger.info( "The experiment id for this dataset is %s.", reflection_table.experiment_identifiers().keys()[0], ) class SingleScalerFactory(ScalerFactory): @classmethod def create(cls, params, experiment, reflection_table, for_multi=False): cls.ensure_experiment_identifier(experiment, reflection_table) logger.info( "The scaling model type being applied is %s. \n", experiment.scaling_model.id_, ) try: reflection_table = cls.filter_bad_reflections( reflection_table, partiality_cutoff=params.cut_data.partiality_cutoff, min_isigi=params.cut_data.min_isigi, intensity_choice=params.reflection_selection.intensity_choice, ) except ValueError: raise BadDatasetForScalingException reflection_table = sum_partial_reflections(reflection_table) if "inverse_scale_factor" not in reflection_table: reflection_table["inverse_scale_factor"] = flex.double( reflection_table.size(), 1.0 ) elif ( reflection_table["inverse_scale_factor"].count(0.0) == reflection_table.size() ): reflection_table["inverse_scale_factor"] = flex.double( reflection_table.size(), 1.0 ) reflection_table = choose_initial_scaling_intensities( reflection_table, params.reflection_selection.intensity_choice ) excluded_for_scaling = reflection_table.get_flags( reflection_table.flags.excluded_for_scaling ) user_excluded = reflection_table.get_flags( reflection_table.flags.user_excluded_in_scaling ) reasons = Reasons() reasons.add_reason("user excluded", user_excluded.count(True)) reasons.add_reason("excluded for scaling", excluded_for_scaling.count(True)) n_excluded = (excluded_for_scaling | user_excluded).count(True) if n_excluded == reflection_table.size(): logger.info("All reflections were determined to be unsuitable for scaling.") logger.info(reasons) raise BadDatasetForScalingException( """Unable to use this dataset for scaling""" ) else: logger.info( "Excluding %s/%s reflections\n%s", n_excluded, reflection_table.size(), reasons, ) if params.reflection_selection.method == "intensity_ranges": reflection_table = quasi_normalisation(reflection_table, experiment) if ( params.reflection_selection.method in (None, Auto, "auto", "quasi_random") ) or ( experiment.scaling_model.id_ == "physical" and "absorption" in experiment.scaling_model.components ): if experiment.scan: reflection_table = calc_crystal_frame_vectors( reflection_table, experiment ) alignment_axis = (1.0, 0.0, 0.0) reflection_table["s0c"] = align_axis_along_z( alignment_axis, reflection_table["s0c"] ) reflection_table["s1c"] = align_axis_along_z( alignment_axis, reflection_table["s1c"] ) try: scaler = SingleScaler(params, experiment, reflection_table, for_multi) except BadDatasetForScalingException as e: raise ValueError(e) else: return scaler class NullScalerFactory(ScalerFactory): @classmethod
BSD 3-Clause New or Revised License
tnychn/instascrape
instascrape/structures.py
Post.url
python
def url(self) -> str: return "https://instagram.com/p/" + self.shortcode
Returns the URL of this post.
https://github.com/tnychn/instascrape/blob/7aaf3c1a1786bbe80059ed6e0d93442a19a6f475/instascrape/structures.py#L312-L314
import os import sys import json import logging import traceback from typing import * from io import BytesIO from collections import namedtuple, OrderedDict import requests from instascrape.constants import * from instascrape.exceptions import * from instascrape.group import * from instascrape.utils import get_username_from_userid, set_mtime, get_biggest_media, verify_file, to_datetime __all__ = ("Post", "IGTV", "Profile", "Hashtag", "Explore") logger = logging.getLogger("instascrape") CommentItem = namedtuple("CommentItem", "author text created_time") class DataGetterMixin: @property def raw_data(self) -> dict: if self._full_data is None: self._obtain_full_data() return self._full_data def _find_or_get(self, *keys: str, data: dict = None, i: int = None): i = 0 if i is None else i key = keys[i] if data is not None: if key in data: return data[key] else: self._obtain_full_data() d = self._full_data[keys[0]] for k in keys[1:]: d = d[k] return d else: if key in self._init_data: d = self._init_data[key] elif self._full_data is not None and key in self._full_data: d = self._full_data[key] else: self._obtain_full_data() d = self._full_data[key] i += 1 return self._find_or_get(*keys, data=d, i=i) if len(keys) > 1 else d class AsDictMixin: info_vars = () def as_dict(self, *, extra: bool = False) -> OrderedDict: assert len(self.info_vars) > 0, "'AsDictMixin' should not be used in this class if 'info_vars' is intended to be empty" dictionary = OrderedDict({"_struct": self.__class__.__name__} if extra else {}) for attr in self.info_vars: dictionary[attr] = getattr(self, attr) return dictionary class MediaItem(AsDictMixin): info_vars = ("typename", "src", "width", "height", "is_video") @classmethod def compose_items(cls, data: dict) -> List["MediaItem"]: def make(node: dict) -> "MediaItem": typename = node["__typename"] if typename == "GraphImage": item = get_biggest_media(node["display_resources"]) elif typename == "GraphVideo": item = {"src": node["video_url"]} return cls(typename, item.get("src"), item.get("config_width"), item.get("config_height")) typename = data["__typename"] if typename in ("GraphImage", "GraphVideo"): items = [make(data)] elif typename == "GraphSidecar": items = [] data = data["edge_sidecar_to_children"]["edges"] for node in data: items.append(make(node["node"])) else: raise AssertionError("unrecognized typename: '{}'".format(typename)) return items def __init__(self, typename: str, src: str, width: int, height: int): self.typename = typename self.src = src self.width = width self.height = height def __repr__(self) -> str: return "MediaItem(typename='{}', src='{}', width={}, height={})".format(self.typename, self.src, self.width, self.height) def __eq__(self, other) -> bool: return isinstance(other, MediaItem) and self.src == other.src def __hash__(self) -> int: return hash(self.src) @property def is_video(self) -> bool: return self.typename == "GraphStoryVideo" def download(self, dest: str, filename: str, *, write: bool = True, verify: bool = True) -> Optional[str]: try: f = None logger.debug("Downloading file {0} -> {1}".format(self.src, dest)) r = requests.get(self.src, stream=True, timeout=30) mime = r.headers["Content-Type"] bytesize = int(r.headers["Content-Length"]) size = int(bytesize / 1024) if mime == "video/mp4": ext = ".mp4" elif mime == "image/jpeg": ext = ".jpg" else: raise DownloadError("Unsupported MIME type: {0}".format(mime), self.src) finish_filename = filename + ext finish_path = os.path.join(dest, finish_filename) part_filename = filename + ext + ".part" part_path = os.path.join(dest, part_filename) if os.path.isfile(finish_path): if verify and verify_file(r.content, finish_path): logger.debug("~> [{0}] {1} [skip] (already downloaded)".format(mime, finish_filename)) return None if os.stat(finish_path).st_size == bytesize: logger.debug("~> [{0}] {1} [skip] (already downloaded)".format(mime, finish_filename)) return None f = open(part_path, "wb+") if write else BytesIO() for chunk in r.iter_content(1024): if chunk: f.write(chunk) logger.debug("=> [{0}] {1} [{2}x{3}] ({4} kB)".format(mime, finish_filename, self.width or "?", self.height or "?", size)) except Exception as e: raise DownloadError(str(e), self.src) from e else: if f: f.close() os.rename(part_path, finish_path) return finish_path finally: if f and not f.closed: f.close() class ReelItem(MediaItem): info_vars = ("typename", "src", "width", "height", "is_video", "id", "owner_username", "owner_id", "owner_profile_picture_url", "created_time", "expire_time", "cta_url") @classmethod def compose_items(cls, data: dict) -> List["ReelItem"]: def make(node: dict) -> "ReelItem": typename = node["__typename"] if typename == "GraphStoryImage": item = get_biggest_media(node["display_resources"]) elif typename == "GraphStoryVideo": item = get_biggest_media(node["video_resources"]) return cls(typename, item.get("src"), item.get("config_width"), item.get("config_height"), node) items = [] data = data["items"] for node in data: items.append(make(node)) return items def __init__(self, typename: str, src: str, width: int, height: int, data: dict): super().__init__(typename, src, width, height) self.data = data def __repr__(self) -> str: return "ReelItem(typename='{}', src='{}', width={}, height={})".format(self.typename, self.src, self.width, self.height) def __eq__(self, other) -> bool: return isinstance(other, ReelItem) and self.src == other.src and self.id == other.id def __hash__(self) -> int: return hash(self.id) @property def is_video(self) -> bool: return self.typename == "GraphStoryVideo" @property def id(self) -> str: return self.data["id"] @property def owner_username(self) -> str: return self.data["owner"]["username"] @property def owner_id(self) -> str: return self.data["owner"]["id"] @property def owner_profile_picture_url(self) -> str: return self.data["owner"]["profile_pic_url"] def owner_profile_picture(self) -> MediaItem: return MediaItem("GraphImage", self.owner_profile_picture_url, 150, 150) @property def created_time(self) -> int: return int(self.data["taken_at_timestamp"]) @property def expire_time(self) -> int: return int(self.data["expiring_at_timestamp"]) @property def cta_url(self) -> Optional[str]: return self.data["story_cta_url"] class Post(AsDictMixin, DataGetterMixin): info_vars = ("shortcode", "url", "typename", "id", "owner_username", "owner_id", "owner_profile_picture_url", "created_time", "caption", "media_count", "likes_count", "comments_count") @classmethod def from_shortcode(cls, insta, shortcode: str): post = cls(insta, {"shortcode": shortcode}) post._obtain_full_data() return post def __init__(self, insta, data: dict): self._insta = insta self._init_data = data self._full_data = None self.shortcode = data["shortcode"] def _obtain_full_data(self): if self._full_data is None: logger.debug("Fetching initial json data of Post(shortcode='{}')...".format(self.shortcode)) self._full_data = self._insta._fetch_json_data(POST_URL.format(shortcode=self.shortcode))["shortcode_media"] def __repr__(self) -> str: return "Post(shortcode='{0}', typename='{1}')".format(self.shortcode, self.typename) def __eq__(self, other) -> bool: return isinstance(other, Post) and self.shortcode == other.shortcode and self.id == other.id def __hash__(self) -> int: return hash(self.shortcode) def __len__(self) -> int: return self.media_count def __getitem__(self, index: int) -> MediaItem: return self.media_items()[index] def __iter__(self) -> MediaItem: for media in self.media_items(): yield media @property
MIT License
monarch-initiative/dipper
dipper/utils/CurieUtil.py
CurieUtil.get_curie_prefix
python
def get_curie_prefix(self, uri): for key, value in self.uri_map.items(): if uri.startswith(key): return value return None
Return the CURIE's prefix:
https://github.com/monarch-initiative/dipper/blob/2a5fad1223b5dfc75311e1927fd56e2943253bc7/dipper/utils/CurieUtil.py#L39-L44
import logging __author__ = 'condit@sdsc.edu' LOG = logging.getLogger(__name__) class CurieUtil(object): def __init__(self, curie_map): self.curie_map = curie_map if curie_map is not None: if len(set(curie_map.keys())) != len(set(curie_map.values())): LOG.warning("Curie map is NOT one to one!") LOG.warning( "`get_curie_prefix(IRI)` " "may return the same prefix for different base IRI") self.uri_map = {} for key, value in curie_map.items(): self.uri_map[value] = key return def get_curie(self, uri): prefix = self.get_curie_prefix(uri) if prefix is not None: key = self.curie_map[prefix] return f'{prefix}:{uri[len(key):len(uri)]}' return None
BSD 3-Clause New or Revised License
slawek87/yql-finance
yql/api.py
YQL.__init__
python
def __init__(self, symbol, start_data, end_data): self.start_date = self.to_date(start_data) self.end_date = self.to_date(end_data) self.symbol = symbol self.data = self.fetch_data()
Method setups basic (self) variables.
https://github.com/slawek87/yql-finance/blob/52b1ac6720db09c4d8a9864b171506e90a8d3964/yql/api.py#L27-L33
import datetime from dateutil.relativedelta import relativedelta from yql.request import Request from yql import const class YQL(object): request = Request() def __repr__(self): return '<YQL Object: symbol %s start_date / %s end_date %s>' % (self.symbol, self.start_date, self.end_date)
BSD 3-Clause New or Revised License
openxaiproject/eeg-explanation-model
3rd Party/modules/render.py
enlarge_image
python
def enlarge_image(img, scaling = 3): if scaling < 1 or not isinstance(scaling,int): print('scaling factor needs to be an int >= 1') if len(img.shape) == 2: H,W = img.shape out = np.zeros((scaling*H, scaling*W)) for h in range(H): fh = scaling*h for w in range(W): fw = scaling*w out[fh:fh+scaling, fw:fw+scaling] = img[h,w] elif len(img.shape) == 3: H,W,D = img.shape out = np.zeros((scaling*H, scaling*W,D)) for h in range(H): fh = scaling*h for w in range(W): fw = scaling*w out[fh:fh+scaling, fw:fw+scaling,:] = img[h,w,:] return out
Enlarges a given input matrix by replicating each pixel value scaling times in horizontal and vertical direction. Parameters ---------- img : numpy.ndarray array of shape [H x W] OR [H x W x D] scaling : int positive integer value > 0 Returns ------- out : numpy.ndarray two-dimensional array of shape [scaling*H x scaling*W] OR three-dimensional array of shape [scaling*H x scaling*W x D] depending on the dimensionality of the input
https://github.com/openxaiproject/eeg-explanation-model/blob/d14b7875747a621f7688c42dd72442c72161b919/3rd Party/modules/render.py#L48-L94
import numpy as np import matplotlib.cm from matplotlib.cm import ScalarMappable def vec2im(V, shape = () ): if len(shape) < 2: shape = [np.sqrt(V.size)]*2 shape = list(map(int, shape)) return np.reshape(V, shape)
Apache License 2.0
gofrendiasgard/kokoropy
kokoropy/packages/sqlalchemy/sql/dml.py
UpdateBase.params
python
def params(self, *arg, **kw): raise NotImplementedError( "params() is not supported for INSERT/UPDATE/DELETE statements." " To set the values for an INSERT or UPDATE statement, use" " stmt.values(**parameters).")
Set the parameters for the statement. This method raises ``NotImplementedError`` on the base class, and is overridden by :class:`.ValuesBase` to provide the SET/VALUES clause of UPDATE and INSERT.
https://github.com/gofrendiasgard/kokoropy/blob/49c8ca4b7dd2a084f2ced33fc5987b8a8b62c995/kokoropy/packages/sqlalchemy/sql/dml.py#L53-L64
from .base import Executable, _generative, _from_objects, DialectKWArgs from .elements import ClauseElement, _literal_as_text, Null, and_, _clone from .selectable import _interpret_as_from, _interpret_as_select, HasPrefixes from .. import util from .. import exc class UpdateBase(DialectKWArgs, HasPrefixes, Executable, ClauseElement): __visit_name__ = 'update_base' _execution_options = Executable._execution_options.union({'autocommit': True}) _hints = util.immutabledict() _prefixes = () def _process_colparams(self, parameters): def process_single(p): if isinstance(p, (list, tuple)): return dict( (c.key, pval) for c, pval in zip(self.table.c, p) ) else: return p if (isinstance(parameters, (list, tuple)) and parameters and isinstance(parameters[0], (list, tuple, dict))): if not self._supports_multi_parameters: raise exc.InvalidRequestError( "This construct does not support " "multiple parameter sets.") return [process_single(p) for p in parameters], True else: return process_single(parameters), False
MIT License
gdikov/adversarial-variational-bayes
third_party/ite/cost/meta_a.py
MASpearmanLT.estimation
python
def estimation(self, y, ds=None): if ds is None: ds = ones(y.shape[1], dtype='int') self.verification_compatible_subspace_dimensions(y, ds) self.verification_one_dimensional_subspaces(ds) num_of_samples = y.shape[0] k = int(floor(sqrt(num_of_samples))) self.spearman_cond_lt_co.p = k / num_of_samples a = self.spearman_cond_lt_co.estimation(y, ds) return a
Estimate lower tail dependence. Parameters ---------- y : (number of samples, dimension)-ndarray One row of y corresponds to one sample. ds : int vector, vector of ones ds[i] = 1 (for all i): the i^th subspace is one-dimensional. If ds is not given (ds=None), the vector of ones [ds = ones(y.shape[1],dtype='int')] is emulated inside the function. Returns ------- a : float Estimated lower tail dependence. References ---------- Friedrich Schmid and Rafael Schmidt. Multivariate conditional versions of Spearman's rho and related measures of tail dependence. Journal of Multivariate Analysis, 98:1123-1140, 2007. C. Spearman. The proof and measurement of association between two things. The American Journal of Psychology, 15:72-101, 1904. Examples -------- a = co.estimation(y,ds)
https://github.com/gdikov/adversarial-variational-bayes/blob/ebd692c70349f34bcb3a2086269bd814cafce96f/third_party/ite/cost/meta_a.py#L58-L104
from numpy import sqrt, floor, ones from ite.cost.x_initialization import InitX from ite.cost.x_verification import VerOneDSubspaces, VerCompSubspaceDims from ite.cost.x_factory import co_factory class MASpearmanLT(InitX, VerOneDSubspaces, VerCompSubspaceDims): def __init__(self, mult=True, spearman_cond_lt_co_name='BASpearmanCondLT', spearman_cond_lt_co_pars=None): spearman_cond_lt_co_pars = spearman_cond_lt_co_pars or {} super().__init__(mult=mult) spearman_cond_lt_co_pars['mult'] = mult self.spearman_cond_lt_co = co_factory(spearman_cond_lt_co_name, **spearman_cond_lt_co_pars)
MIT License
joke2k/faker
faker/providers/barcode/en_US/__init__.py
Provider.upc_a
python
def upc_a( self, upc_ae_mode: bool = False, base: Optional[str] = None, number_system_digit: Optional[int] = None, ) -> str: if upc_ae_mode is True: return self._upc_ae(base=base, number_system_digit=number_system_digit) else: ean13 = self.ean13(leading_zero=True) return ean13[1:]
Generate a 12-digit UPC-A barcode. The value of ``upc_ae_mode`` controls how barcodes will be generated. If ``False`` (default), barcodes are not guaranteed to have a UPC-E equivalent. In this mode, the method uses |EnUsBarcodeProvider.ean13| under the hood, and the values of ``base`` and ``number_system_digit`` will be ignored. If ``upc_ae_mode`` is ``True``, the resulting barcodes are guaranteed to have a UPC-E equivalent, and the values of ``base`` and ``number_system_digit`` will be used to control what is generated. Under this mode, ``base`` is expected to have a 6-digit string value. If any other value is supplied, a random 6-digit string will be used instead. As for ``number_system_digit``, the expected value is a ``0`` or a ``1``. If any other value is provided, this method will randomly choose from the two. .. important:: When ``upc_ae_mode`` is enabled, you might encounter instances where different values of ``base`` (e.g. ``'120003'`` and ``'120004'``) produce the same UPC-A barcode. This is normal, and the reason lies within the whole conversion process. To learn more about this and what ``base`` and ``number_system_digit`` actually represent, please refer to |EnUsBarcodeProvider.upc_e|. :sample: :sample: upc_ae_mode=True, number_system_digit=0 :sample: upc_ae_mode=True, number_system_digit=1 :sample: upc_ae_mode=True, base='123456', number_system_digit=0 :sample: upc_ae_mode=True, base='120003', number_system_digit=0 :sample: upc_ae_mode=True, base='120004', number_system_digit=0
https://github.com/joke2k/faker/blob/3818045332f4cb2911e5ac18f69e385bf0c51af0/faker/providers/barcode/en_US/__init__.py#L149-L192
import re from itertools import product from typing import Dict, Optional, Pattern from .. import PrefixType from .. import Provider as BarcodeProvider class Provider(BarcodeProvider): local_prefixes = ( *product((0,), range(10)), *product((1,), range(4)), ) upc_e_base_pattern: Pattern = re.compile(r"^\d{6}$") upc_ae_pattern1: Pattern = re.compile( r"^(?P<number_system_digit>[01])" r"(?=\d{11}$)" r"(?P<mfr_code>\d{2})" r"(?:(?P<extra>[012])0{4})" r"(?P<product_code>\d{3})" r"(?P<check_digit>\d)$", ) upc_ae_pattern2: Pattern = re.compile( r"^(?P<number_system_digit>[01])" r"(?=\d{11}$)" r"(?P<mfr_code>\d{3,4}?)" r"(?:0{5})" r"(?P<product_code>\d{1,2})" r"(?P<check_digit>\d)$", ) upc_ae_pattern3: Pattern = re.compile( r"^(?P<number_system_digit>[01])" r"(?=\d{11}$)" r"(?P<mfr_code>\d{5})" r"(?:0{4}(?P<extra>[5-9]))" r"(?P<check_digit>\d)$", ) def ean13(self, prefixes: PrefixType = (), leading_zero: Optional[bool] = None) -> str: if not prefixes: if leading_zero is True: prefixes = ((0,),) elif leading_zero is False: prefixes = ((self.random_int(1, 9),),) return super().ean13(prefixes=prefixes) def _convert_upc_a2e(self, upc_a: str) -> str: if not isinstance(upc_a, str): raise TypeError("`upc_a` is not a string") m1 = self.upc_ae_pattern1.match(upc_a) m2 = self.upc_ae_pattern2.match(upc_a) m3 = self.upc_ae_pattern3.match(upc_a) if not any([m1, m2, m3]): raise ValueError("`upc_a` has an invalid value") upc_e_template = "{number_system_digit}{mfr_code}{product_code}{extra}{check_digit}" if m1: upc_e = upc_e_template.format(**m1.groupdict()) elif m2: groupdict: Dict[str, str] = m2.groupdict() mfr_code = groupdict.get("mfr_code") or "" groupdict["extra"] = str(len(mfr_code)) upc_e = upc_e_template.format(**groupdict) elif m3: groupdict = m3.groupdict() groupdict["product_code"] = "" upc_e = upc_e_template.format(**groupdict) return upc_e def _upc_ae(self, base: Optional[str] = None, number_system_digit: Optional[int] = None) -> str: base_ = ( [int(x) for x in base] if isinstance(base, str) and self.upc_e_base_pattern.match(base) else [self.random_int(0, 9) for _ in range(6)] ) if number_system_digit not in [0, 1]: number_system_digit = self.random_int(0, 1) if base_[-1] <= 2: code = base_[:2] + base_[-1:] + [0] * 4 + base_[2:-1] elif base_[-1] <= 4: code = base_[: base_[-1]] + [0] * 5 + base_[base_[-1] : -1] else: code = base_[:5] + [0] * 4 + base_[-1:] code.insert(0, number_system_digit) weights = [3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3] weighted_sum = sum(x * y for x, y in zip(code, weights)) check_digit = (10 - weighted_sum % 10) % 10 code.append(check_digit) return "".join(str(x) for x in code)
MIT License
common-workflow-language/cwltool
cwltool/provenance_profile.py
ProvenanceProfile.declare_artefact
python
def declare_artefact(self, value: Any) -> ProvEntity: if value is None: return self.document.entity(CWLPROV["None"], {PROV_LABEL: "None"}) if isinstance(value, (bool, int, float)): entity = self.document.entity(uuid.uuid4().urn, {PROV_VALUE: value}) self.research_object.add_uri(entity.identifier.uri) return entity if isinstance(value, (str, str)): (entity, _) = self.declare_string(value) return entity if isinstance(value, bytes): byte_s = BytesIO(value) data_file = self.research_object.add_data_file(byte_s) data_id = "data:%s" % PurePosixPath(data_file).stem return self.document.entity( data_id, {PROV_TYPE: WFPROV["Artifact"], PROV_VALUE: str(value)}, ) if isinstance(value, MutableMapping): if "@id" in value: entities = self.document.get_record(value["@id"]) if entities: return entities[0] if value.get("class") == "File": (entity, _, _) = self.declare_file(value) value["@id"] = entity.identifier.uri return entity if value.get("class") == "Directory": entity = self.declare_directory(value) value["@id"] = entity.identifier.uri return entity coll_id = value.setdefault("@id", uuid.uuid4().urn) coll = self.document.entity( coll_id, [ (PROV_TYPE, WFPROV["Artifact"]), (PROV_TYPE, PROV["Collection"]), (PROV_TYPE, PROV["Dictionary"]), ], ) if value.get("class"): _logger.warning("Unknown data class %s.", value["class"]) coll.add_asserted_type(CWLPROV[value["class"]]) coll_attribs = [] for (key, val) in value.items(): v_ent = self.declare_artefact(val) self.document.membership(coll, v_ent) m_entity = self.document.entity(uuid.uuid4().urn) m_entity.add_asserted_type(PROV["KeyEntityPair"]) m_entity.add_attributes( {PROV["pairKey"]: str(key), PROV["pairEntity"]: v_ent} ) coll_attribs.append((PROV["hadDictionaryMember"], m_entity)) coll.add_attributes(coll_attribs) self.research_object.add_uri(coll.identifier.uri) return coll try: members = [] for each_input_obj in iter(value): e = self.declare_artefact(each_input_obj) members.append(e) coll = self.document.entity( uuid.uuid4().urn, [ (PROV_TYPE, WFPROV["Artifact"]), (PROV_TYPE, PROV["Collection"]), ], ) if not members: coll.add_asserted_type(PROV["EmptyCollection"]) else: for member in members: self.document.membership(coll, member) self.research_object.add_uri(coll.identifier.uri) return coll except TypeError: _logger.warning("Unrecognized type %s of %r", type(value), value) entity = self.document.entity(uuid.uuid4().urn, {PROV_LABEL: repr(value)}) self.research_object.add_uri(entity.identifier.uri) return entity
Create data artefact entities for all file objects.
https://github.com/common-workflow-language/cwltool/blob/910616d937f01ffc3784a8011dcabbbb5c618a05/cwltool/provenance_profile.py#L491-L609
import copy import datetime import logging import urllib import uuid from io import BytesIO from pathlib import PurePath, PurePosixPath from socket import getfqdn from typing import ( Any, List, MutableMapping, MutableSequence, Optional, Tuple, Union, cast, ) from prov.identifier import Identifier from prov.model import PROV, PROV_LABEL, PROV_TYPE, PROV_VALUE, ProvDocument, ProvEntity from schema_salad.sourceline import SourceLine from typing_extensions import TYPE_CHECKING from .errors import WorkflowException from .job import CommandLineJob, JobBase from .loghandler import _logger from .process import Process, shortname from .provenance_constants import ( ACCOUNT_UUID, CWLPROV, ENCODING, FOAF, METADATA, ORE, PROVENANCE, RO, SCHEMA, SHA1, SHA256, TEXT_PLAIN, UUID, WF4EVER, WFDESC, WFPROV, ) from .stdfsaccess import StdFsAccess from .utils import CWLObjectType, JobsType, get_listing, posix_path, versionstring from .workflow_job import WorkflowJob if TYPE_CHECKING: from .provenance import ResearchObject def copy_job_order( job: Union[Process, JobsType], job_order_object: CWLObjectType ) -> CWLObjectType: if not isinstance(job, WorkflowJob): return job_order_object customised_job = {} debug = _logger.isEnabledFor(logging.DEBUG) for each, i in enumerate(job.tool["inputs"]): with SourceLine(job.tool["inputs"], each, WorkflowException, debug): iid = shortname(i["id"]) if iid in job_order_object: customised_job[iid] = copy.deepcopy(job_order_object[iid]) elif "default" in i: customised_job[iid] = copy.deepcopy(i["default"]) else: pass return customised_job class ProvenanceProfile: def __init__( self, research_object: "ResearchObject", full_name: str, host_provenance: bool, user_provenance: bool, orcid: str, fsaccess: StdFsAccess, run_uuid: Optional[uuid.UUID] = None, ) -> None: self.fsaccess = fsaccess self.orcid = orcid self.research_object = research_object self.folder = self.research_object.folder self.document = ProvDocument() self.host_provenance = host_provenance self.user_provenance = user_provenance self.engine_uuid = research_object.engine_uuid self.add_to_manifest = self.research_object.add_to_manifest if self.orcid: _logger.debug("[provenance] Creator ORCID: %s", self.orcid) self.full_name = full_name if self.full_name: _logger.debug("[provenance] Creator Full name: %s", self.full_name) self.workflow_run_uuid = run_uuid or uuid.uuid4() self.workflow_run_uri = self.workflow_run_uuid.urn self.generate_prov_doc() def __str__(self) -> str: return "ProvenanceProfile <{}> in <{}>".format( self.workflow_run_uri, self.research_object, ) def generate_prov_doc(self) -> Tuple[str, ProvDocument]: def host_provenance(document: ProvDocument) -> None: document.add_namespace(CWLPROV) document.add_namespace(UUID) document.add_namespace(FOAF) hostname = getfqdn() document.agent( ACCOUNT_UUID, { PROV_TYPE: FOAF["OnlineAccount"], "prov:location": hostname, CWLPROV["hostname"]: hostname, }, ) self.cwltool_version = "cwltool %s" % versionstring().split()[-1] self.document.add_namespace("wfprov", "http://purl.org/wf4ever/wfprov#") self.document.add_namespace("wfdesc", "http://purl.org/wf4ever/wfdesc#") self.document.add_namespace("cwlprov", "https://w3id.org/cwl/prov#") self.document.add_namespace("foaf", "http://xmlns.com/foaf/0.1/") self.document.add_namespace("schema", "http://schema.org/") self.document.add_namespace("orcid", "https://orcid.org/") self.document.add_namespace("id", "urn:uuid:") self.document.add_namespace("data", "urn:hash::sha1:") self.document.add_namespace(SHA256, "nih:sha-256;") self.document.add_namespace("researchobject", self.research_object.base_uri) self.metadata_ns = self.document.add_namespace( "metadata", self.research_object.base_uri + METADATA + "/" ) self.provenance_ns = self.document.add_namespace( "provenance", self.research_object.base_uri + posix_path(PROVENANCE) + "/" ) ro_identifier_workflow = self.research_object.base_uri + "workflow/packed.cwl#" self.wf_ns = self.document.add_namespace("wf", ro_identifier_workflow) ro_identifier_input = ( self.research_object.base_uri + "workflow/primary-job.json#" ) self.document.add_namespace("input", ro_identifier_input) account = self.document.agent(ACCOUNT_UUID) if self.orcid or self.full_name: person = {PROV_TYPE: PROV["Person"], "prov:type": SCHEMA["Person"]} if self.full_name: person["prov:label"] = self.full_name person["foaf:name"] = self.full_name person["schema:name"] = self.full_name else: pass agent = self.document.agent(self.orcid or uuid.uuid4().urn, person) self.document.actedOnBehalfOf(account, agent) else: if self.host_provenance: host_provenance(self.document) if self.user_provenance: self.research_object.user_provenance(self.document) wfengine = self.document.agent( self.engine_uuid, { PROV_TYPE: PROV["SoftwareAgent"], "prov:type": WFPROV["WorkflowEngine"], "prov:label": self.cwltool_version, }, ) self.document.wasStartedBy(wfengine, None, account, datetime.datetime.now()) self.document.activity( self.workflow_run_uri, datetime.datetime.now(), None, { PROV_TYPE: WFPROV["WorkflowRun"], "prov:label": "Run of workflow/packed.cwl#main", }, ) main_workflow = "wf:main" self.document.wasAssociatedWith( self.workflow_run_uri, self.engine_uuid, main_workflow ) self.document.wasStartedBy( self.workflow_run_uri, None, self.engine_uuid, datetime.datetime.now() ) return (self.workflow_run_uri, self.document) def evaluate( self, process: Process, job: JobsType, job_order_object: CWLObjectType, research_obj: "ResearchObject", ) -> None: if not hasattr(process, "steps"): self.prospective_prov(job) customised_job = copy_job_order(job, job_order_object) self.used_artefacts(customised_job, self.workflow_run_uri) research_obj.create_job(customised_job) elif hasattr(job, "workflow"): self.prospective_prov(job) customised_job = copy_job_order(job, job_order_object) self.used_artefacts(customised_job, self.workflow_run_uri) def record_process_start( self, process: Process, job: JobsType, process_run_id: Optional[str] = None ) -> Optional[str]: if not hasattr(process, "steps"): process_run_id = self.workflow_run_uri elif not hasattr(job, "workflow"): name = "" if isinstance(job, (CommandLineJob, JobBase, WorkflowJob)): name = job.name process_name = urllib.parse.quote(name, safe=":/,#") process_run_id = self.start_process(process_name, datetime.datetime.now()) return process_run_id def start_process( self, process_name: str, when: datetime.datetime, process_run_id: Optional[str] = None, ) -> str: if process_run_id is None: process_run_id = uuid.uuid4().urn prov_label = "Run of workflow/packed.cwl#main/" + process_name self.document.activity( process_run_id, None, None, {PROV_TYPE: WFPROV["ProcessRun"], PROV_LABEL: prov_label}, ) self.document.wasAssociatedWith( process_run_id, self.engine_uuid, str("wf:main/" + process_name) ) self.document.wasStartedBy( process_run_id, None, self.workflow_run_uri, when, None, None ) return process_run_id def record_process_end( self, process_name: str, process_run_id: str, outputs: Union[CWLObjectType, MutableSequence[CWLObjectType], None], when: datetime.datetime, ) -> None: self.generate_output_prov(outputs, process_run_id, process_name) self.document.wasEndedBy(process_run_id, None, self.workflow_run_uri, when) def declare_file(self, value: CWLObjectType) -> Tuple[ProvEntity, ProvEntity, str]: if value["class"] != "File": raise ValueError("Must have class:File: %s" % value) entity = None checksum = None if "checksum" in value: csum = cast(str, value["checksum"]) (method, checksum) = csum.split("$", 1) if method == SHA1 and self.research_object.has_data_file(checksum): entity = self.document.entity("data:" + checksum) if not entity and "location" in value: location = str(value["location"]) with self.fsaccess.open(location, "rb") as fhandle: relative_path = self.research_object.add_data_file(fhandle) checksum = PurePath(relative_path).name entity = self.document.entity( "data:" + checksum, {PROV_TYPE: WFPROV["Artifact"]} ) if "checksum" not in value: value["checksum"] = f"{SHA1}${checksum}" if not entity and "contents" in value: entity, checksum = self.declare_string(cast(str, value["contents"])) if not entity or not checksum: raise ValueError( "class:File but missing checksum/location/content: %r" % value ) file_id = value.setdefault("@id", uuid.uuid4().urn) file_entity = self.document.entity( file_id, [(PROV_TYPE, WFPROV["Artifact"]), (PROV_TYPE, WF4EVER["File"])], ) if "basename" in value: file_entity.add_attributes({CWLPROV["basename"]: value["basename"]}) if "nameroot" in value: file_entity.add_attributes({CWLPROV["nameroot"]: value["nameroot"]}) if "nameext" in value: file_entity.add_attributes({CWLPROV["nameext"]: value["nameext"]}) self.document.specializationOf(file_entity, entity) for sec in cast( MutableSequence[CWLObjectType], value.get("secondaryFiles", []) ): if sec["class"] == "File": (sec_entity, _, _) = self.declare_file(sec) elif sec["class"] == "Directory": sec_entity = self.declare_directory(sec) else: raise ValueError(f"Got unexpected secondaryFiles value: {sec}") self.document.derivation( sec_entity, file_entity, other_attributes={PROV["type"]: CWLPROV["SecondaryFile"]}, ) return file_entity, entity, checksum def declare_directory(self, value: CWLObjectType) -> ProvEntity: dir_id = cast(str, value.setdefault("@id", uuid.uuid4().urn)) ore_doc_fn = dir_id.replace("urn:uuid:", "directory-") + ".ttl" dir_bundle = self.document.bundle(self.metadata_ns[ore_doc_fn]) coll = self.document.entity( dir_id, [ (PROV_TYPE, WFPROV["Artifact"]), (PROV_TYPE, PROV["Collection"]), (PROV_TYPE, PROV["Dictionary"]), (PROV_TYPE, RO["Folder"]), ], ) coll_b = dir_bundle.entity( dir_id, [(PROV_TYPE, RO["Folder"]), (PROV_TYPE, ORE["Aggregation"])], ) self.document.mentionOf(dir_id + "#ore", dir_id, dir_bundle.identifier) coll_attribs = [(ORE["isDescribedBy"], dir_bundle.identifier)] coll_b_attribs = [] is_empty = True if "listing" not in value: get_listing(self.fsaccess, value) for entry in cast(MutableSequence[CWLObjectType], value.get("listing", [])): is_empty = False entity = self.declare_artefact(entry) self.document.membership(coll, entity) m_id = uuid.uuid4().urn m_entity = self.document.entity(m_id) m_b = dir_bundle.entity(m_id) m_entity.add_asserted_type(PROV["KeyEntityPair"]) m_entity.add_attributes( { PROV["pairKey"]: entry["basename"], PROV["pairEntity"]: entity, } ) m_b.add_asserted_type(RO["FolderEntry"]) m_b.add_asserted_type(ORE["Proxy"]) m_b.add_attributes( { RO["entryName"]: entry["basename"], ORE["proxyIn"]: coll, ORE["proxyFor"]: entity, } ) coll_attribs.append((PROV["hadDictionaryMember"], m_entity)) coll_b_attribs.append((ORE["aggregates"], m_b)) coll.add_attributes(coll_attribs) coll_b.add_attributes(coll_b_attribs) ore_doc = ProvDocument() ore_doc.add_namespace(ORE) ore_doc.add_namespace(RO) ore_doc.add_namespace(UUID) ore_doc.add_bundle(dir_bundle) ore_doc = ore_doc.flattened() ore_doc_path = str(PurePosixPath(METADATA, ore_doc_fn)) with self.research_object.write_bag_file(ore_doc_path) as provenance_file: ore_doc.serialize(provenance_file, format="rdf", rdf_format="turtle") self.research_object.add_annotation( dir_id, [ore_doc_fn], ORE["isDescribedBy"].uri ) if is_empty: coll.add_asserted_type(PROV["EmptyCollection"]) coll.add_asserted_type(PROV["EmptyDictionary"]) self.research_object.add_uri(coll.identifier.uri) return coll def declare_string(self, value: str) -> Tuple[ProvEntity, str]: byte_s = BytesIO(str(value).encode(ENCODING)) data_file = self.research_object.add_data_file(byte_s, content_type=TEXT_PLAIN) checksum = PurePosixPath(data_file).name data_id = "data:%s" % PurePosixPath(data_file).stem entity = self.document.entity( data_id, {PROV_TYPE: WFPROV["Artifact"], PROV_VALUE: str(value)} ) return entity, checksum
Apache License 2.0
tugstugi/pytorch-saltnet
models/basenet.py
create_basenet
python
def create_basenet(name, pretrained): if name.startswith('vgg'): layers, bn, n_pretrained = vgg(name, pretrained) elif name.startswith('resnet'): layers, bn, n_pretrained = resnet(name, pretrained) elif name.startswith('resnext'): layers, bn, n_pretrained = resnext(name, pretrained) elif name.startswith('se'): layers, bn, n_pretrained = se_net(name, pretrained) elif name == 'darknet': layers, bn, n_pretrained = darknet(pretrained) else: raise NotImplemented(name) if pretrained in ('coco', 'oid'): load_pretrained_weights(layers, name, pretrained) n_pretrained = len(layers) return layers, bn, n_pretrained
Parameters ---------- name: model name pretrained: dataset name Returns ------- list of modules, is_batchnorm, num_of_pretrained_module
https://github.com/tugstugi/pytorch-saltnet/blob/a3e63b357f975924e75b5db24ee528c5797f5efb/models/basenet.py#L341-L369
from collections import OrderedDict import torch import torch.nn as nn import torch.utils.model_zoo as model_zoo import torchvision from .inplace_abn import ActivatedBatchNorm BASENET_CHOICES = ('vgg11', 'vgg13', 'vgg16', 'vgg19', 'vgg11_bn', 'vgg13_bn', 'vgg16_bn', 'vgg19_bn', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152', 'resnext101_32x4d', 'resnext101_64x4d', 'se_resnet50', 'se_resnet101', 'se_resnet152', 'se_resnext50_32x4d', 'se_resnext101_32x4d', 'senet154', 'darknet') MODEL_ZOO_URL = 'https://drontheimerstr.synology.me/model_zoo/' MODEL_URLS = { 'resnet50': { 'voc': MODEL_ZOO_URL + 'SSDretina_resnet50_c21-1c85a349.pth', 'coco': MODEL_ZOO_URL + 'SSDretina_resnet50_c81-a584ead7.pth', 'oid': MODEL_ZOO_URL + 'SSDretina_resnet50_c501-06095077.pth'}, 'resnext101_32x4d': {'coco': MODEL_ZOO_URL + 'SSDretina_resnext101_32x4d_c81-fdb37546.pth'} } def conv(*args, **kwargs): return lambda last_layer: nn.Conv2d(last_layer.out_channels, *args, **kwargs) def get_out_channels(layers): if hasattr(layers, 'out_channels'): return layers.out_channels elif isinstance(layers, int): return layers else: for i in range(len(layers) - 1, -1, -1): layer = layers[i] if hasattr(layer, 'out_channels'): return layer.out_channels elif isinstance(layer, nn.Sequential): return get_out_channels(layer) raise RuntimeError("cant get_out_channels from {}".format(layers)) def Sequential(*args): f = nn.Sequential(*args) f.out_channels = get_out_channels(args) return f def sequential(*args): def create_sequential(last_layer): layers = [] for a in args: layers.append(a(last_layer)) last_layer = layers[-1] return Sequential(*layers) return create_sequential def ConvBnRelu(*args, **kwargs): c = nn.Conv2d(*args, **kwargs) return Sequential(c, nn.BatchNorm2d(c.out_channels), nn.ReLU(inplace=True)) def conv_bn_relu(*args, **kwargs): return lambda last_layer: ConvBnRelu(get_out_channels(last_layer), *args, **kwargs) def ConvRelu(*args, **kwargs): return Sequential(nn.Conv2d(*args, **kwargs), nn.ReLU(inplace=True)) def conv_relu(*args, **kwargs): return lambda last_layer: ConvRelu(get_out_channels(last_layer), *args, **kwargs) def ReluConv(*args, **kwargs): return Sequential(nn.ReLU(inplace=True), nn.Conv2d(*args, **kwargs)) def relu_conv(*args, **kwargs): return lambda last_layer: ReluConv(get_out_channels(last_layer), *args, **kwargs) def BnReluConv(*args, **kwargs): c = nn.Conv2d(*args, **kwargs) return Sequential(nn.BatchNorm2d(c.in_channels), nn.ReLU(inplace=True), c) def bn_relu_conv(*args, **kwargs): return lambda last_layer: BnReluConv(get_out_channels(last_layer), *args, **kwargs) def vgg_base_extra(bn): pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) block = ConvBnRelu if bn else ConvRelu conv6 = block(512, 1024, kernel_size=3, padding=6, dilation=6) conv7 = block(1024, 1024, kernel_size=1) return [pool5, conv6, conv7] def vgg(name, pretrained): if name == 'vgg11': net_class = torchvision.models.vgg11 elif name == 'vgg13': net_class = torchvision.models.vgg13 elif name == 'vgg16': net_class = torchvision.models.vgg16 elif name == 'vgg19': net_class = torchvision.models.vgg19 elif name == 'vgg11_bn': net_class = torchvision.models.vgg11_bn elif name == 'vgg13_bn': net_class = torchvision.models.vgg13_bn elif name == 'vgg16_bn': net_class = torchvision.models.vgg16_bn elif name == 'vgg19_bn': net_class = torchvision.models.vgg19_bn else: raise RuntimeError("unknown model {}".format(name)) imagenet_pretrained = pretrained == 'imagenet' vgg = net_class(pretrained=imagenet_pretrained) if name == 'vgg16': vgg.features[16].ceil_mode = True bn = name.endswith('bn') layers = [] l = [] for i in range(len(vgg.features) - 1): if isinstance(vgg.features[i], nn.MaxPool2d): layers.append(l) l = [] l.append(vgg.features[i]) l += vgg_base_extra(bn=bn) layers.append(l) block = ConvBnRelu if bn else ConvRelu layer5 = [block(1024, 256, 1, 1, 0), block(256, 512, 3, 2, 1)] layers.append(layer5) layers = [Sequential(*l) for l in layers] n_pretrained = 4 if imagenet_pretrained else 0 return layers, bn, n_pretrained def resnet(name, pretrained): if name == 'resnet18': net_class = torchvision.models.resnet18 elif name == 'resnet34': net_class = torchvision.models.resnet34 elif name == 'resnet50': net_class = torchvision.models.resnet50 elif name == 'resnet101': net_class = torchvision.models.resnet101 elif name == 'resnet152': net_class = torchvision.models.resnet152 imagenet_pretrained = pretrained == 'imagenet' resnet = net_class(pretrained=imagenet_pretrained) layer0 = Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool) layer0[-1].out_channels = resnet.bn1.num_features def get_out_channels_from_resnet_block(layer): block = layer[-1] if isinstance(block, torchvision.models.resnet.BasicBlock): return block.conv2.out_channels elif isinstance(block, torchvision.models.resnet.Bottleneck): return block.conv3.out_channels raise RuntimeError("unknown resnet block: {}".format(block)) resnet.layer1.out_channels = resnet.layer1[-1].out_channels = get_out_channels_from_resnet_block(resnet.layer1) resnet.layer2.out_channels = resnet.layer2[-1].out_channels = get_out_channels_from_resnet_block(resnet.layer2) resnet.layer3.out_channels = resnet.layer3[-1].out_channels = get_out_channels_from_resnet_block(resnet.layer3) resnet.layer4.out_channels = resnet.layer4[-1].out_channels = get_out_channels_from_resnet_block(resnet.layer4) n_pretrained = 5 if imagenet_pretrained else 0 return [layer0, resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4], True, n_pretrained def resnext(name, pretrained): import pretrainedmodels if name in ['resnext101_32x4d', 'resnext101_64x4d']: imagenet_pretrained = 'imagenet' if pretrained == 'imagenet' else None resnext = pretrainedmodels.__dict__[name](num_classes=1000, pretrained=imagenet_pretrained) else: return NotImplemented resnext_features = resnext.features layer0 = [resnext_features[i] for i in range(4)] layer0 = nn.Sequential(*layer0) layer0.out_channels = layer0[-1].out_channels = 64 layer1 = resnext_features[4] layer1.out_channels = layer1[-1].out_channels = 256 layer2 = resnext_features[5] layer2.out_channels = layer2[-1].out_channels = 512 layer3 = resnext_features[6] layer3.out_channels = layer3[-1].out_channels = 1024 layer4 = resnext_features[7] layer4.out_channels = layer4[-1].out_channels = 2048 n_pretrained = 5 if imagenet_pretrained else 0 return [layer0, layer1, layer2, layer3, layer4], True, n_pretrained def replace_bn(bn, act=None): slop = 0.01 if isinstance(act, nn.ReLU): activation = 'leaky_relu' elif isinstance(act, nn.LeakyReLU): activation = 'leaky_relu' slope = act.negative_slope elif isinstance(act, nn.ELU): activation = 'elu' else: activation = 'none' abn = ActivatedBatchNorm(num_features=bn.num_features, eps=bn.eps, momentum=bn.momentum, affine=bn.affine, track_running_stats=bn.track_running_stats, activation=activation, slope=slop) abn.load_state_dict(bn.state_dict()) return abn def replace_bn_in_sequential(layer0, block=None): layer0_modules = [] last_bn = None for n, m in layer0.named_children(): if isinstance(m, nn.BatchNorm2d): last_bn = (n, m) else: activation = 'none' if last_bn: abn = replace_bn(last_bn[1], m) activation = abn.activation layer0_modules.append((last_bn[0], abn)) last_bn = None if activation == 'none': if block and isinstance(m, block): m = replace_bn_in_block(m) elif isinstance(m, nn.Sequential): m = replace_bn_in_sequential(m, block) layer0_modules.append((n, m)) if last_bn: abn = replace_bn(last_bn[1]) layer0_modules.append((last_bn[0], abn)) return nn.Sequential(OrderedDict(layer0_modules)) class DummyModule(nn.Module): def forward(self, x): return x def replace_bn_in_block(block): block.bn1 = replace_bn(block.bn1, block.relu) block.bn2 = replace_bn(block.bn2, block.relu) block.bn3 = replace_bn(block.bn3) block.relu = DummyModule() if block.downsample: block.downsample = replace_bn_in_sequential(block.downsample) return nn.Sequential(block, nn.ReLU(inplace=True)) def se_net(name, pretrained): import pretrainedmodels if name in ['se_resnet50', 'se_resnet101', 'se_resnet152', 'se_resnext50_32x4d', 'se_resnext101_32x4d', 'senet154']: imagenet_pretrained = 'imagenet' if pretrained == 'imagenet' else None senet = pretrainedmodels.__dict__[name](num_classes=1000, pretrained=imagenet_pretrained) else: return NotImplemented layer0 = replace_bn_in_sequential(senet.layer0) block = senet.layer1[0].__class__ layer1 = replace_bn_in_sequential(senet.layer1, block=block) layer1.out_channels = layer1[-1].out_channels = senet.layer1[-1].conv3.out_channels layer0.out_channels = layer0[-1].out_channels = senet.layer1[0].conv1.in_channels layer2 = replace_bn_in_sequential(senet.layer2, block=block) layer2.out_channels = layer2[-1].out_channels = senet.layer2[-1].conv3.out_channels layer3 = replace_bn_in_sequential(senet.layer3, block=block) layer3.out_channels = layer3[-1].out_channels = senet.layer3[-1].conv3.out_channels layer4 = replace_bn_in_sequential(senet.layer4, block=block) layer4.out_channels = layer4[-1].out_channels = senet.layer4[-1].conv3.out_channels n_pretrained = 5 if imagenet_pretrained else 0 return [layer0, layer1, layer2, layer3, layer4], True, n_pretrained def darknet(pretrained): from .darknet import KitModel as DarkNet net = DarkNet() if pretrained: state_dict = torch.load("/media/data/model_zoo/coco/pytorch_yolov3.pth") net.load_state_dict(state_dict) n_pretrained = 3 if pretrained else 0 return [net.model0, net.model1, net.model2], True, n_pretrained class MockModule(nn.Module): def __init__(self, layers): super().__init__() self.backbone = nn.ModuleList(layers) def load_pretrained_weights(layers, name, dataset_name): state_dict = model_zoo.load_url(MODEL_URLS[name][dataset_name]) mock_module = MockModule(layers) mock_module.load_state_dict(state_dict, strict=False)
MIT License
briarfox/shellistaext
ShellistaExt/plugins/core/untar_plugin.py
main
python
def main(self, line): args = bash(line) if args is None: return elif not (1 <= len(args) <= 2): print "untar: Usage: untar file [destination]" else: filename = os.path.abspath(args[0]) if not os.path.isfile(filename): print "untar: %s: No such file" % args[0] else: f = open(filename) try: f.seek(257) ustar_check = f.read(5) except Exception: ustar_check = '' finally: f.close() if ustar_check != 'ustar': print "untar: %s: does not appear to be a tar file" % args[0] else: if (os.path.basename(filename).lower().endswith('.tar')): altpath = os.path.splitext(os.path.basename(filename))[0] else: altpath = os.path.basename(filename) + '_untarred' altpath = os.path.join(os.path.dirname(filename), altpath) location = (args[1:2] or [altpath])[0] if (os.path.exists(location)) and not (os.path.isdir(location)): print "untar: %s: destination is not a directory" % location return elif not os.path.exists(location): os.makedirs(location) try: tar = tarfile.open(filename, 'r') dirnames = [os.path.join(os.path.dirname(x.name), '') for x in tar.getmembers() if x.name != 'pax_global_header'] common_dir = os.path.commonprefix(dirnames or ['/']) if not common_dir.endswith('/'): common_dir = os.path.join(os.path.dirname(common_dir), '') for member in tar.getmembers(): fn = member.name if fn == 'pax_global_header': continue if common_dir: if fn.startswith(common_dir): fn = fn.split(common_dir, 1)[-1] elif fn.startswith('/' + common_dir): fn = fn.split('/' + common_dir, 1)[-1] fn = fn.lstrip('/') fn = os.path.join(location, fn) dirf = os.path.dirname(fn) if member.isdir(): if not os.path.exists(fn): os.makedirs(fn) elif member.issym(): continue else: try: fp = tar.extractfile(member) except (KeyError, AttributeError): continue if not os.path.exists(dirf): os.makedirs(dirf) with open(fn, 'wb') as destfp: shutil.copyfileobj(fp, destfp) fp.close() except Exception, e: print e tar.close() print "untar: %s: tar file is corrupt" % args[0] return finally: tar.close()
untar a tar archive
https://github.com/briarfox/shellistaext/blob/ec97fbf23a36479e067a19ce54ccd1affc945fdb/ShellistaExt/plugins/core/untar_plugin.py#L9-L88
from .. tools.toolbox import bash import os,tarfile,shutil
MIT License
docusign/docusign-python-client
docusign_esign/models/jurisdiction.py
Jurisdiction.notary_public_in_seal
python
def notary_public_in_seal(self): return self._notary_public_in_seal
Gets the notary_public_in_seal of this Jurisdiction. # noqa: E501 # noqa: E501 :return: The notary_public_in_seal of this Jurisdiction. # noqa: E501 :rtype: str
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/jurisdiction.py#L275-L283
import pprint import re import six from docusign_esign.client.configuration import Configuration class Jurisdiction(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'allow_system_created_seal': 'str', 'allow_user_uploaded_seal': 'str', 'commission_id_in_seal': 'str', 'county': 'str', 'county_in_seal': 'str', 'enabled': 'str', 'jurisdiction_id': 'str', 'name': 'str', 'notary_public_in_seal': 'str', 'state_name_in_seal': 'str' } attribute_map = { 'allow_system_created_seal': 'allowSystemCreatedSeal', 'allow_user_uploaded_seal': 'allowUserUploadedSeal', 'commission_id_in_seal': 'commissionIdInSeal', 'county': 'county', 'county_in_seal': 'countyInSeal', 'enabled': 'enabled', 'jurisdiction_id': 'jurisdictionId', 'name': 'name', 'notary_public_in_seal': 'notaryPublicInSeal', 'state_name_in_seal': 'stateNameInSeal' } def __init__(self, _configuration=None, **kwargs): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._allow_system_created_seal = None self._allow_user_uploaded_seal = None self._commission_id_in_seal = None self._county = None self._county_in_seal = None self._enabled = None self._jurisdiction_id = None self._name = None self._notary_public_in_seal = None self._state_name_in_seal = None self.discriminator = None setattr(self, "_{}".format('allow_system_created_seal'), kwargs.get('allow_system_created_seal', None)) setattr(self, "_{}".format('allow_user_uploaded_seal'), kwargs.get('allow_user_uploaded_seal', None)) setattr(self, "_{}".format('commission_id_in_seal'), kwargs.get('commission_id_in_seal', None)) setattr(self, "_{}".format('county'), kwargs.get('county', None)) setattr(self, "_{}".format('county_in_seal'), kwargs.get('county_in_seal', None)) setattr(self, "_{}".format('enabled'), kwargs.get('enabled', None)) setattr(self, "_{}".format('jurisdiction_id'), kwargs.get('jurisdiction_id', None)) setattr(self, "_{}".format('name'), kwargs.get('name', None)) setattr(self, "_{}".format('notary_public_in_seal'), kwargs.get('notary_public_in_seal', None)) setattr(self, "_{}".format('state_name_in_seal'), kwargs.get('state_name_in_seal', None)) @property def allow_system_created_seal(self): return self._allow_system_created_seal @allow_system_created_seal.setter def allow_system_created_seal(self, allow_system_created_seal): self._allow_system_created_seal = allow_system_created_seal @property def allow_user_uploaded_seal(self): return self._allow_user_uploaded_seal @allow_user_uploaded_seal.setter def allow_user_uploaded_seal(self, allow_user_uploaded_seal): self._allow_user_uploaded_seal = allow_user_uploaded_seal @property def commission_id_in_seal(self): return self._commission_id_in_seal @commission_id_in_seal.setter def commission_id_in_seal(self, commission_id_in_seal): self._commission_id_in_seal = commission_id_in_seal @property def county(self): return self._county @county.setter def county(self, county): self._county = county @property def county_in_seal(self): return self._county_in_seal @county_in_seal.setter def county_in_seal(self, county_in_seal): self._county_in_seal = county_in_seal @property def enabled(self): return self._enabled @enabled.setter def enabled(self, enabled): self._enabled = enabled @property def jurisdiction_id(self): return self._jurisdiction_id @jurisdiction_id.setter def jurisdiction_id(self, jurisdiction_id): self._jurisdiction_id = jurisdiction_id @property def name(self): return self._name @name.setter def name(self, name): self._name = name @property
MIT License
gregmuellegger/django-publicmanager
django_publicmanager/queryset.py
PublicQuerySet.public
python
def public(self): clone = self._clone() if self.is_public_attr: clone = clone.filter(**{self.is_public_attr: True}) if self.pub_date_attr: query = models.Q(**{self.pub_date_attr + '__lte': datetime.now()}) | models.Q(**{self.pub_date_attr: None}) clone = clone.filter(query) if self.status_attr and self.status_values: clone = clone.filter(**{self.status_attr + '__in': self.status_values}) return clone
The following conditions must be true: * is_public must be ``True`` * pub_date must be ``None`` or greater/equal datetime.now() * status must be in ``self.status_values``
https://github.com/gregmuellegger/django-publicmanager/blob/ab3043189a3060cc3b01dafd5ce371d9a3724126/django_publicmanager/queryset.py#L40-L56
from datetime import datetime from django.db import models from django.db.models.fields import FieldDoesNotExist from django.db.models.query import QuerySet class PublicQuerySet(QuerySet): is_public_attr = None pub_date_attr = None status_attr = None status_values = () def __init__(self, model=None, query=None, is_public_attr=None, pub_date_attr=None, status_attr=None, status_values=(), *args, **kwargs): super(PublicQuerySet, self).__init__(model, query, *args, **kwargs) if is_public_attr: try: model._meta.get_field_by_name(is_public_attr) self.is_public_attr = is_public_attr except FieldDoesNotExist: pass if pub_date_attr: try: model._meta.get_field_by_name(pub_date_attr) self.pub_date_attr = pub_date_attr except FieldDoesNotExist: pass if status_attr: try: model._meta.get_field_by_name(status_attr) self.status_attr = status_attr self.status_values = status_values except FieldDoesNotExist: pass
BSD 3-Clause New or Revised License
cleverhans-lab/cleverhans
cleverhans/tf2/attacks/carlini_wagner_l2.py
carlini_wagner_l2
python
def carlini_wagner_l2(model_fn, x, **kwargs): return CarliniWagnerL2(model_fn, **kwargs).attack(x)
This is the function interface for the Carlini-Wagner-L2 attack. For more details on the attack and the parameters see the corresponding class.
https://github.com/cleverhans-lab/cleverhans/blob/4aed4be702be5ce13d5017b8a3c6a2cdc4fc0009/cleverhans/tf2/attacks/carlini_wagner_l2.py#L8-L13
import numpy as np import tensorflow as tf from cleverhans.tf2.utils import get_or_guess_labels, set_with_mask
MIT License
neurodiffgym/neurodiffeq
neurodiffeq/solvers.py
BaseSolver._generate_train_batch
python
def _generate_train_batch(self): return self._generate_batch('train')
r"""Generate the next training batch, register in ``self._batch`` and return.
https://github.com/neurodiffgym/neurodiffeq/blob/ab670a1af2e58766849f3bc683f7e6b0a6444124/neurodiffeq/solvers.py#L295-L297
import sys import warnings import inspect from inspect import signature from abc import ABC, abstractmethod from itertools import chain from copy import deepcopy import torch import torch.nn as nn from torch.optim import Adam from tqdm import tqdm from .solvers_utils import PretrainedSolver from .networks import FCNN from ._version_utils import deprecated_alias from .generators import GeneratorSpherical from .generators import SamplerGenerator from .generators import Generator1D from .generators import Generator2D from .generators import GeneratorND from .function_basis import RealSphericalHarmonics from .conditions import BaseCondition from .neurodiffeq import safe_diff as diff from .losses import _losses def _requires_closure(optimizer): return inspect.signature(optimizer.step).parameters.get('closure').default == inspect._empty class BaseSolver(ABC, PretrainedSolver): def __init__(self, diff_eqs, conditions, nets=None, train_generator=None, valid_generator=None, analytic_solutions=None, optimizer=None, criterion=None, n_batches_train=1, n_batches_valid=4, metrics=None, n_input_units=None, n_output_units=None, shuffle=None, batch_size=None): if shuffle: warnings.warn( "param `shuffle` is deprecated and ignored; shuffling should be performed by generators", FutureWarning, ) if batch_size is not None: warnings.warn( "param `batch_size` is deprecated and ignored; specify n_batches_train and n_batches_valid instead", FutureWarning, ) self.diff_eqs = diff_eqs self.conditions = conditions self.n_funcs = len(conditions) if nets is None: self.nets = [ FCNN(n_input_units=n_input_units, n_output_units=n_output_units, hidden_units=(32, 32), actv=nn.Tanh) for _ in range(self.n_funcs) ] else: self.nets = nets if train_generator is None: raise ValueError("train_generator must be specified") if valid_generator is None: raise ValueError("valid_generator must be specified") self.metrics_fn = metrics if metrics else {} if analytic_solutions: warnings.warn( 'The `analytic_solutions` argument is deprecated and could lead to unstable behavior. ' 'Pass a `metrics` dict instead.', FutureWarning, ) def analytic_mse(*args): x = args[-n_input_units:] u_hat = analytic_solutions(*x) u = args[:-n_input_units] u, u_hat = torch.stack(u), torch.stack(u_hat) return ((u - u_hat) ** 2).mean() if 'analytic_mse' in self.metrics_fn: warnings.warn( "Ignoring `analytic_solutions` in presence of key 'analytic_mse' in `metrics`", FutureWarning, ) else: self.metrics_fn['analytic_mse'] = analytic_mse self.metrics_history = {} self.metrics_history.update({'train_loss': [], 'valid_loss': []}) self.metrics_history.update({'train__' + name: [] for name in self.metrics_fn}) self.metrics_history.update({'valid__' + name: [] for name in self.metrics_fn}) self.optimizer = optimizer if optimizer else Adam(chain.from_iterable(n.parameters() for n in self.nets)) self._set_criterion(criterion) def make_pair_dict(train=None, valid=None): return {'train': train, 'valid': valid} self.generator = make_pair_dict( train=SamplerGenerator(train_generator), valid=SamplerGenerator(valid_generator), ) self.n_batches = make_pair_dict(train=n_batches_train, valid=n_batches_valid) self._batch = make_pair_dict() self.best_nets = None self.lowest_loss = None self.local_epoch = 0 self._max_local_epoch = 0 self._stop_training = False self._phase = None def _set_criterion(self, criterion): if criterion is None: self.criterion = lambda r, f, x: (r ** 2).mean() elif isinstance(criterion, nn.modules.loss._Loss): self.criterion = lambda r, f, x: criterion(r, torch.zeros_like(r)) elif isinstance(criterion, str): self.criterion = _losses[criterion.lower()] elif callable(criterion): self.criterion = criterion else: raise TypeError(f"Unknown type of criterion {type(criterion)}") @property def global_epoch(self): return len(self.metrics_history['train_loss']) @property def batch(self): return self._batch @property def _batch_examples(self): warnings.warn( '`._batch_examples` has been deprecated in favor of `._batch` and will be removed in a future version', FutureWarning, ) return self._batch def compute_func_val(self, net, cond, *coordinates): return cond.enforce(net, *coordinates) def _update_history(self, value, metric_type, key): self._phase = key if metric_type == 'loss': self.metrics_history[f'{key}_{metric_type}'].append(value) elif metric_type in self.metrics_fn: self.metrics_history[f'{key}__{metric_type}'].append(value) else: raise KeyError(f"metric '{metric_type}' not specified") def _update_train_history(self, value, metric_type): self._update_history(value, metric_type, key='train') def _update_valid_history(self, value, metric_type): self._update_history(value, metric_type, key='valid') def _generate_batch(self, key): self._phase = key self._batch[key] = [v.reshape(-1, 1) for v in self.generator[key].get_examples()] return self._batch[key]
MIT License
zettaio/restic-compose-backup
src/restic_compose_backup/containers.py
Container.filter_mounts
python
def filter_mounts(self): filtered = [] exclude_bind_mounts = utils.is_true(config.exclude_bind_mounts) mounts = list(filter(lambda m: not exclude_bind_mounts or m.type == "volume", self._mounts)) if not self.volume_backup_enabled: return filtered if self._include: for mount in mounts: for pattern in self._include: if pattern in mount.source: break else: continue filtered.append(mount) elif self._exclude: for mount in mounts: for pattern in self._exclude: if pattern in mount.source: break else: filtered.append(mount) else: return mounts return filtered
Get all mounts for this container matching include/exclude filters
https://github.com/zettaio/restic-compose-backup/blob/b52655a23bc385dbf78f7ef07fb26c86d76e07fa/src/restic_compose_backup/containers.py#L193-L224
import os import logging from pathlib import Path from typing import List from restic_compose_backup import enums, utils from restic_compose_backup.config import config logger = logging.getLogger(__name__) VOLUME_TYPE_BIND = "bind" VOLUME_TYPE_VOLUME = "volume" class Container: container_type = None def __init__(self, data: dict): self._data = data self._state = data.get('State') self._config = data.get('Config') self._mounts = [Mount(mnt, container=self) for mnt in data.get('Mounts')] if not self._state: raise ValueError('Container meta missing State') if self._config is None: raise ValueError('Container meta missing Config') self._labels = self._config.get('Labels') if self._labels is None: raise ValueError('Container meta missing Config->Labels') self._include = self._parse_pattern(self.get_label(enums.LABEL_VOLUMES_INCLUDE)) self._exclude = self._parse_pattern(self.get_label(enums.LABEL_VOLUMES_EXCLUDE)) @property def instance(self) -> 'Container': if self.database_backup_enabled: from restic_compose_backup import containers_db if self.mariadb_backup_enabled: return containers_db.MariadbContainer(self._data) if self.mysql_backup_enabled: return containers_db.MysqlContainer(self._data) if self.postgresql_backup_enabled: return containers_db.PostgresContainer(self._data) else: return self @property def id(self) -> str: return self._data.get('Id') @property def hostname(self) -> str: return self.id[:12] @property def image(self) -> str: return self.get_config('Image') @property def name(self) -> str: return self._data['Name'].replace('/', '') @property def service_name(self) -> str: return self.get_label('com.docker.compose.service', default='') or self.get_label('com.docker.swarm.service.name', default='') @property def backup_process_label(self) -> str: return f"{enums.LABEL_BACKUP_PROCESS}-{self.project_name}" @property def project_name(self) -> str: return self.get_label('com.docker.compose.project', default='') @property def stack_name(self) -> str: return self.get_label("com.docker.stack.namespace") @property def is_oneoff(self) -> bool: return self.get_label('com.docker.compose.oneoff', default='False') == 'True' @property def environment(self) -> list: return self.get_config('Env') def remove(self): self._data.remove() def get_config_env(self, name) -> str: data = {i[0:i.find('=')]: i[i.find('=') + 1:] for i in self.environment} return data.get(name) def set_config_env(self, name, value): env = self.environment new_value = f'{name}={value}' for i, entry in enumerate(env): if f'{name}=' in entry: env[i] = new_value break else: env.append(new_value) @property def volumes(self) -> dict: volumes = {} for mount in self._mounts: volumes[mount.source] = { 'bind': mount.destination, 'mode': 'rw', } return volumes @property def backup_enabled(self) -> bool: return any([ self.volume_backup_enabled, self.database_backup_enabled, ]) @property def volume_backup_enabled(self) -> bool: return utils.is_true(self.get_label(enums.LABEL_VOLUMES_ENABLED)) @property def database_backup_enabled(self) -> bool: return any([ self.mysql_backup_enabled, self.mariadb_backup_enabled, self.postgresql_backup_enabled, ]) @property def mysql_backup_enabled(self) -> bool: return utils.is_true(self.get_label(enums.LABEL_MYSQL_ENABLED)) @property def mariadb_backup_enabled(self) -> bool: return utils.is_true(self.get_label(enums.LABEL_MARIADB_ENABLED)) @property def postgresql_backup_enabled(self) -> bool: return utils.is_true(self.get_label(enums.LABEL_POSTGRES_ENABLED)) @property def is_backup_process_container(self) -> bool: return self.get_label(self.backup_process_label) == 'True' @property def is_running(self) -> bool: return self._state.get('Running', False) def get_config(self, name, default=None): return self._config.get(name, default) def get_label(self, name, default=None): return self._labels.get(name, None)
MIT License
sophilabs/django-qurl-templatetag
qurl_templatetag/templatetags/qurl.py
qurl
python
def qurl(parser, token): bits = token.split_contents() if len(bits) < 2: raise TemplateSyntaxError( '"{0}" takes at least one argument (url)'.format(bits[0])) url = parser.compile_filter(bits[1]) asvar = None bits = bits[2:] if len(bits) >= 2 and bits[-2] == 'as': asvar = bits[-1] bits = bits[:-2] qs = [] if len(bits): kwarg_re = re.compile(r'(\w+)(\-=|\+=|=|\+\+|\-\-)(.*)') for bit in bits: match = kwarg_re.match(bit) if not match: raise TemplateSyntaxError('Malformed arguments to url tag') name, op, value = match.groups() qs.append((name, op, parser.compile_filter(value),)) return QURLNode(url, qs, asvar)
Append, remove or replace query string parameters (preserve order) {% qurl url [param]* [as <var_name>] %} param: name=value: replace all values of name by one value name=None: remove all values of name name+=value: append a new value for name name-=value: remove the value of name with the value name++: increment value by one name--: decrement value by one Example:: {% qurl '/search?page=1&color=blue&color=green' order='name' page=None color+='red' color-='green' %} Output: /search?color=blue&order=name&color=red {% qurl request.get_full_path order='name' %}
https://github.com/sophilabs/django-qurl-templatetag/blob/000bff1e5f5a9a835016ed9cd565aa3c97104c30/qurl_templatetag/templatetags/qurl.py#L17-L62
import re from django.utils.encoding import smart_str from django.template import Library, Node, TemplateSyntaxError from .. import Qurl register = Library() @register.tag
MIT License
nipy/nipy
nipy/core/reference/coordinate_map.py
CoordinateMap.renamed_range
python
def renamed_range(self, newnames, name=''): return renamed_range(self, newnames)
New CoordinateMap with function_domain renamed Parameters ---------- newnames : dict A dictionary whose keys are integers or are in mapping.function_range.coord_names and whose values are the new names. Returns ------- newmapping : CoordinateMap A new CoordinateMap with renamed function_range. Examples -------- >>> domain = CoordinateSystem('ijk') >>> range = CoordinateSystem('xyz') >>> cm = CoordinateMap(domain, range, lambda x:x+1) >>> new_cm = cm.renamed_range({'x':'u'}) >>> new_cm.function_range CoordinateSystem(coord_names=('u', 'y', 'z'), name='', coord_dtype=float64) >>> new_cm = cm.renamed_range({'w':'u'}) Traceback (most recent call last): ... ValueError: no range coordinate named w
https://github.com/nipy/nipy/blob/d16d268938dcd5c15748ca051532c21f57cf8a22/nipy/core/reference/coordinate_map.py#L324-L354
from __future__ import absolute_import import warnings import numpy as np import numpy.linalg as npl from nibabel.affines import to_matvec, from_matvec from ...fixes.nibabel import io_orientation from .coordinate_system import(CoordinateSystem, safe_dtype, is_coordsys, product as coordsys_product ) from nipy.testing import legacy_printing as setup_module CS = CoordinateSystem TINY = 1e-5 class CoordinateMap(object): _doc = {} function = np.exp _doc['function'] = 'The function from function_domain to function_range.' function_domain = CoordinateSystem('x') _doc['function_domain'] = 'The domain of the function, a CoordinateSystem.' function_range = CoordinateSystem('y') _doc['function_range'] = 'The range of the function, a CoordinateSystem.' inverse_function = np.log _doc['inverse_function'] = 'The inverse function from function_range' + 'to function_domain, if supplied.' ndims = (1,1) _doc['ndims'] = 'Number of dimensions of domain and range, respectively.' def __init__(self, function_domain, function_range, function, inverse_function=None): if not is_coordsys(function_domain): function_domain = CoordinateSystem(function_domain) self.function_domain = function_domain if not is_coordsys(function_range): function_range = CoordinateSystem(function_range) self.function_range = function_range self.function = function self.inverse_function = inverse_function self.ndims = (function_domain.ndim, function_range.ndim) if not callable(function): raise ValueError('The function must be callable.') if inverse_function is not None: if not callable(inverse_function): raise ValueError('The inverse_function must be callable.') self._checkfunction() def __setattr__(self, key, value): if key in self.__dict__: raise AttributeError('the value of %s has already been ' 'set and all attributes are ' 'read-only' % key) object.__setattr__(self, key, value) def reordered_domain(self, order=None): return reordered_domain(self, order) def reordered_range(self, order=None): return reordered_range(self, order) def renamed_domain(self, newnames, name=''): return renamed_domain(self, newnames)
BSD 3-Clause New or Revised License
owtf/http-request-translator
hrt/base.py
AbstractScript.generate_script
python
def generate_script(self, headers=None, details=None, search=None): self.headers = headers or self.headers self.details = details or self.details self.search = search or self.search if not self.headers: raise ValueError("'headers' cannot be equal to '%s'" % self.headers) elif not self.details: raise ValueError("'details' cannot be equal to '%s'" % self.details) if not self.url and self.details: self.url = self.encode_url(self.create_url()) if self.code_begin: self._script += self._generate_begin() if self.code_proxy: self._script += self._generate_proxy() method = self.details.get('method', '').strip().lower() if method == 'get': pass elif method == 'post': if self.code_post: self._script += self._generate_post() else: raise ValueError("'%s' is not supported! Only GET and POST are supported for now." % self.details['method']) if self.code_https: self._script += self._generate_https() self._script += self._generate_request() return self._script
Generate script code. :param list headers: Headers list containing fields like 'Host', 'User-Agent', etc. :param dict details: Request specific details dictionary like body and method of the request. :param str search: String to search for in the response to the request. :raises ValueError: when unsupported HTTP method, invalid `headers` or `details` values. :return: Generated script code. :rtype: str
https://github.com/owtf/http-request-translator/blob/71fac59ac0673a6695a93da211ea2af4a31862b7/hrt/base.py#L48-L84
try: from urllib import quote except ImportError: from urllib.parse import quote from importlib import import_module from .url import get_url, check_valid_url class AbstractScript(object): __language__ = '' code_begin = '' code_header = '' code_proxy = '' code_post = '' code_https = '' code_search = '' code_nosearch = '' def __init__(self, headers=None, details=None, search=None): self.load_attributes(self.__class__) self._script = '' self.headers = headers self.details = details self.search = search self.url = '' if self.details: self.url = self.encode_url(self.create_url())
BSD 3-Clause New or Revised License
facebookincubator/reindeer
build/fbcode_builder/getdeps/fetcher.py
Fetcher.clean
python
def clean(self): pass
Reverts any changes that might have been made to the src dir
https://github.com/facebookincubator/reindeer/blob/d3a70b069cd6774f2be374fa19bea68a3cb6142c/build/fbcode_builder/getdeps/fetcher.py#L121-L124
from __future__ import absolute_import, division, print_function, unicode_literals import errno import hashlib import os import re import shutil import stat import subprocess import sys import tarfile import time import zipfile from datetime import datetime from typing import Dict, NamedTuple from .copytree import prefetch_dir_if_eden from .envfuncs import Env from .errors import TransientFailure from .platform import is_windows from .runcmd import run_cmd try: from urllib import urlretrieve from urlparse import urlparse except ImportError: from urllib.parse import urlparse from urllib.request import urlretrieve def file_name_is_cmake_file(file_name): file_name = file_name.lower() base = os.path.basename(file_name) return ( base.endswith(".cmake") or base.endswith(".cmake.in") or base == "cmakelists.txt" ) class ChangeStatus(object): def __init__(self, all_changed=False): if all_changed: self.source_files = 1 self.make_files = 1 else: self.source_files = 0 self.make_files = 0 def record_change(self, file_name): file_name = file_name.lower() if file_name_is_cmake_file(file_name): self.make_files += 1 elif "/fbcode_builder/cmake" in file_name: self.source_files += 1 elif "/fbcode_builder/" not in file_name: self.source_files += 1 def sources_changed(self): return self.source_files > 0 def build_changed(self): return self.make_files > 0 class Fetcher(object): def update(self): return ChangeStatus()
MIT License
vfilimonov/pyppt
pyppt/core.py
_delete_empty_placeholders
python
def _delete_empty_placeholders(Slide): for p in _empty_placeholders(Slide)[::-1]: if p.PlaceholderFormat.type not in pp_titles: p.delete()
Delete all empty placeholders except Title and Subtitle
https://github.com/vfilimonov/pyppt/blob/a81374388009b18d45660deacc36c03881741f85/pyppt/core.py#L297-L302
import numpy as np import warnings import tempfile import os import sys try: from win32com import client as win32client except ImportError: win32client = None try: import matplotlib.pyplot as plt except RuntimeError: plt = None try: basestring = basestring except NameError: unicode = str basestring = (str, bytes) try: long = long except NameError: long = int from pyppt._ver_ import __version__, __author__, __email__, __url__ _LOCALHOST = '127.0.0.1' _DEFAULT_PORT = '8877' msoShapeType = {'msoShapeTypeMixed': -2, 'msoAutoShape': 1, 'msoCallout': 2, 'msoChart': 3, 'msoComment': 4, 'msoFreeform': 5, 'msoGroup': 6, 'msoEmbeddedOLEObject': 7, 'msoFormControl': 8, 'msoLine': 9, 'msoLinkedOLEObject': 10, 'msoLinkedPicture': 11, 'msoOLEControlObject': 12, 'msoPicture': 13, 'msoPlaceholder': 14, 'msoTextEffect': 15, 'msoMedia': 16, 'msoTextBox': 17, 'msoScriptAnchor': 18, 'msoTable': 19, 'msoCanvas': 20, 'msoDiagram': 21, 'msoInk': 22, 'msoInkComment': 23, 'msoIgxGraphic': 24} ppPlaceholderType = {'ppPlaceholderMixed': -2, 'ppPlaceholderTitle': 1, 'ppPlaceholderBody': 2, 'ppPlaceholderCenterTitle': 3, 'ppPlaceholderSubtitle': 4, 'ppPlaceholderVerticalTitle': 5, 'ppPlaceholderVerticalBody': 6, 'ppPlaceholderObject': 7, 'ppPlaceholderChart': 8, 'ppPlaceholderBitmap': 9, 'ppPlaceholderMediaClip': 10, 'ppPlaceholderOrgChart': 11, 'ppPlaceholderTable': 12, 'ppPlaceholderSlideNumber': 13, 'ppPlaceholderHeader': 14, 'ppPlaceholderFooter': 15, 'ppPlaceholderDate': 16, 'ppPlaceholderVerticalObject': 17, 'ppPlaceholderPicture': 18} pp_titles = [ppPlaceholderType['ppPlaceholder' + _] for _ in ('Title', 'Subtitle', 'Body')] pp_pictures = [ppPlaceholderType['ppPlaceholder' + _] for _ in ('Object', 'Bitmap', 'Picture')] msoZOrderCmd = {'msoBringToFront': 0, 'msoSendToBack': 1, 'msoBringForward': 2, 'msoSendBackward': 3, 'msoBringInFrontOfText': 4, 'msoSendBehindText': 5} msoShapeTypeInt = {v: k for k, v in msoShapeType.items()} ppPlaceholderTypeInt = {v: k for k, v in ppPlaceholderType.items()} msoZOrderCmdInt = {v: k for k, v in msoZOrderCmd.items()} _TEMPTEXT = '--TO-BE-REMOVED--' presets = {'center': [0.0415, 0.227, 0.917, 0.716], 'full': [0, 0, 1., 1.]} preset_sizes = {'': [0.0415, 0.227, 0.917, 0.716], 'L': [0.0415, 0.153, 0.917, 0.790], 'XL': [0.0415, 0.049, 0.917, 0.888], 'XXL': [0, 0, 1., 1.]} preset_modifiers = {'center': [0, 0, 1, 1], 'left': [0, 0, 0.5, 1], 'right': [0.5, 0, 0.5, 1], 'topleft': [0, 0, 0.5, 0.5], 'topright': [0.5, 0, 0.5, 0.5], 'bottomleft': [0, 0.5, 0.5, 0.5], 'bottomright': [0.5, 0.5, 0.5, 0.5], '221': [0, 0, 0.5, 0.5], '222': [0.5, 0, 0.5, 0.5], '223': [0, 0.5, 0.5, 0.5], '224': [0.5, 0.5, 0.5, 0.5], '231': [0, 0, 1./3., 0.5], '232': [1./3., 0, 1./3., 0.5], '233': [2./3., 0, 1./3., 0.5], '234': [0, 0.5, 1./3., 0.5], '235': [1./3., 0.5, 1./3., 0.5], '236': [2./3., 0.5, 1./3., 0.5]} def _temp_fname(): f = tempfile.NamedTemporaryFile(delete=False) f.close() name = f.name os.remove(name) return name + '.png' def _check_win32com(): if win32client is None: raise Exception('win32com module is not found (current platform: %s). ' 'Most likely the code is running on the remote server, ' 'and thus the core functionality will not work. ' 'Check the documentation for the possible solution: %s.' % (sys.platform, __url__)) try: import pythoncom pythoncom.CoInitialize() except: pass def _get_application(): _check_win32com() Application = win32client.Dispatch('PowerPoint.Application') Application.Visible = True return Application def _get_active_presentation(): return _get_application().ActivePresentation def _get_slide(slide_no=None): if slide_no is None: return _get_application().ActiveWindow.View.Slide else: Presentation = _get_active_presentation() return Presentation.Slides[slide_no - 1] def _shapes(Slide, types=None): if Slide is None or isinstance(Slide, (int, long)): Slide = _get_slide(Slide) shapes = [Slide.Shapes.Item(1+ii) for ii in range(Slide.Shapes.Count)] if types is not None: types = [msoShapeType[_] for _ in types] shapes = [s for s in shapes if s.Type in types] return shapes def _placeholders(Slide): return _shapes(Slide, ['msoPlaceholder']) def _placeholders_pictures(Slide, empty=False): pics = [p for p in _placeholders(Slide) if p.PlaceholderFormat.type in pp_pictures] if empty: pics = [p for p in pics if _is_placeholder_empty(p)] return pics def _pictures(Slide): pics = [] for s in _shapes(Slide): if s.Type == msoShapeType['msoPicture']: pics.append(s) elif s.Type == msoShapeType['msoPlaceholder']: if s.PlaceholderFormat.type in pp_pictures: if not _is_placeholder_empty(s): pics.append(s) return pics def _has_textframe(obj): return hasattr(obj, 'TextFrame') and hasattr(obj.TextFrame, 'TextRange') def _is_placeholder_empty(obj): if obj.PlaceholderFormat.ContainedType == msoShapeType['msoAutoShape']: if _has_textframe(obj): if obj.TextFrame.TextRange.Length == 0: return True else: return True return False def _empty_placeholders(Slide): return [s for s in _placeholders(Slide) if _is_placeholder_empty(s)] def _fill_empty_placeholders(Slide): filled = [] for p in _empty_placeholders(Slide): if p.PlaceholderFormat.type not in pp_titles: if _has_textframe(p): p.TextFrame.TextRange.Text = _TEMPTEXT filled.append(p) return filled def _revert_filled_placeholders(items): for item in items: item.TextFrame.TextRange.Text = ''
MIT License
diofant/diofant
diofant/concrete/expr_with_intlimits.py
ExprWithIntLimits.reorder_limit
python
def reorder_limit(self, x, y): var = {limit[0] for limit in self.limits} limit_x = self.limits[x] limit_y = self.limits[y] if (len(set(limit_x[1].free_symbols).intersection(var)) == 0 and len(set(limit_x[2].free_symbols).intersection(var)) == 0 and len(set(limit_y[1].free_symbols).intersection(var)) == 0 and len(set(limit_y[2].free_symbols).intersection(var)) == 0): limits = [] for i, limit in enumerate(self.limits): if i == x: limits.append(limit_y) elif i == y: limits.append(limit_x) else: limits.append(limit) return type(self)(self.function, *limits) else: raise ReorderError(self, 'could not interchange the two limits specified')
Interchange two limit tuples of a Sum or Product expression. Parameters ========== x, y: int are integers corresponding to the index variables of the two limits which are to be interchanged. Examples ======== >>> from diofant.abc import e, f >>> Sum(x*y*z, (x, a, b), (y, c, d), (z, e, f)).reorder_limit(0, 2) Sum(x*y*z, (z, e, f), (y, c, d), (x, a, b)) >>> Sum(x**2, (x, a, b), (x, c, d)).reorder_limit(1, 0) Sum(x**2, (x, c, d), (x, a, b)) >>> Product(x*y*z, (x, a, b), (y, c, d), (z, e, f)).reorder_limit(0, 2) Product(x*y*z, (z, e, f), (y, c, d), (x, a, b)) See Also ======== diofant.concrete.expr_with_intlimits.ExprWithIntLimits.index diofant.concrete.expr_with_intlimits.ExprWithIntLimits.reorder diofant.concrete.summations.Sum.reverse_order diofant.concrete.products.Product.reverse_order
https://github.com/diofant/diofant/blob/05c50552b0e0533f1dbf2ec05e65b6c45b7e2c11/diofant/concrete/expr_with_intlimits.py#L248-L301
from ..core import oo from .expr_with_limits import ExprWithLimits class ReorderError(NotImplementedError): def __init__(self, expr, msg): super().__init__(f'{expr} could not be reordered: {msg}.') class ExprWithIntLimits(ExprWithLimits): def __init__(self, function, *symbols, **assumptions): if not all(all(abs(_) == oo or (_.is_integer is not False) for _ in l[1:]) for l in self.limits): raise ValueError('Limits must be integers or ±oo.') def change_index(self, var, trafo, newvar=None): if newvar is None: newvar = var limits = [] for limit in self.limits: if limit[0] == var: p = trafo.as_poly(var) if p.degree() != 1: raise ValueError('Index transformation is not linear') alpha = p.coeff_monomial(var) beta = p.coeff_monomial(1) if alpha.is_number: if alpha == 1: limits.append((newvar, alpha*limit[1] + beta, alpha*limit[2] + beta)) elif alpha == -1: limits.append((newvar, alpha*limit[2] + beta, alpha*limit[1] + beta)) else: raise ValueError('Linear transformation results in non-linear summation stepsize') else: limits.append((newvar, alpha*limit[2] + beta, alpha*limit[1] + beta)) else: limits.append(limit) function = self.function.subs({var: (var - beta)/alpha}) function = function.subs({var: newvar}) return self.func(function, *limits) def index(self, x): variables = [limit[0] for limit in self.limits] if variables.count(x) != 1: raise ValueError(self, 'Number of instances of variable not equal to one') else: return variables.index(x) def reorder(self, *arg): new_expr = self for r in arg: if len(r) != 2: raise ValueError(r, 'Invalid number of arguments') index1 = r[0] index2 = r[1] if not isinstance(r[0], int): index1 = self.index(r[0]) if not isinstance(r[1], int): index2 = self.index(r[1]) new_expr = new_expr.reorder_limit(index1, index2) return new_expr
BSD 3-Clause New or Revised License
gmr/infoblox
infoblox/mapping.py
Mapping.__ne__
python
def __ne__(self, other): return not self.__eq__(other)
Test two mappings for inequality. :param mapping other: The mapping to test against this one :rtype: bool
https://github.com/gmr/infoblox/blob/163dd9cff5f77c08751936c56aa8428acfd2d208/infoblox/mapping.py#L91-L98
import collections import inspect import json class Mapping(collections.Mapping): _dirty = False def __init__(self, **kwargs): self.from_dict(kwargs) def __contains__(self, item): return item in self.keys() def __eq__(self, other): if not isinstance(other, self.__class__): return False return all([getattr(self, k) == getattr(other, k) for k in self.keys()]) def __delitem__(self, key): if key not in self.keys(): raise KeyError(key) delattr(self, key) def __getitem__(self, item): if item not in self.keys(): raise KeyError(item) return getattr(self, item) def __hash__(self): return hash(self.items()) def __iter__(self): return self.iterkeys() def __len__(self): return len(self.keys())
BSD 3-Clause New or Revised License
homebysix/recipe-robot
scripts/recipe_robot_lib/tools.py
congratulate
python
def congratulate(prefs, first_timer): congrats_msg = ( "Amazing.", "Easy peasy.", "Fantastic.", "Good on ya!", "Imagine all the typing you saved.", "Isn't meta-automation great?", "(Yep, it's pretty fun for me too.)", "Pretty cool, right?", "Round of applause for you!", "Terrific job!", "Thanks!", "That's awesome!", "Want to do another?", "Well done!", "You rock star, you.", ) if prefs["RecipeCreateCount"] > 0: if first_timer: if prefs["RecipeCreateCount"] == 1: recipe_count = "your first recipe" else: recipe_count = "your first {} recipes".format( prefs["RecipeCreateCount"] ) congrats = "Congratulations!" else: if prefs["RecipeCreateCount"] == 1: recipe_count = "1 recipe" else: recipe_count = "{} recipes".format(prefs["RecipeCreateCount"]) congrats = random_choice(congrats_msg) robo_print( "\nYou've created {} with Recipe Robot. {}\n".format(recipe_count, congrats) )
Display a friendly congratulatory message upon creating recipes. Args: prefs (dict): The dictionary containing a key/value pair for Recipe Robot preferences. first_timer (bool): True if this is the first time the user has run Recipe Robot.
https://github.com/homebysix/recipe-robot/blob/fc51b3134b6db7cd86641785d75a0b994ae88154/scripts/recipe_robot_lib/tools.py#L584-L627
from __future__ import absolute_import, print_function import os import plistlib import re import shlex import subprocess import sys import timeit from datetime import datetime from functools import wraps from random import choice as random_choice from urllib.parse import quote_plus from Foundation import ( CFPreferencesAppSynchronize, CFPreferencesCopyAppValue, CFPreferencesCopyKeyList, CFPreferencesSetAppValue, kCFPreferencesAnyHost, kCFPreferencesCurrentUser, ) from .exceptions import RoboError __version__ = "2.2.0" ENDC = "\033[0m" BUNDLE_ID = "com.elliotjordan.recipe-robot" PREFS_FILE = os.path.expanduser("~/Library/Preferences/%s.plist" % BUNDLE_ID) SUPPORTED_IMAGE_FORMATS = ("dmg", "iso") SUPPORTED_ARCHIVE_FORMATS = ("zip", "tar.gz", "gzip", "tar.bz2", "tbz", "tgz") SUPPORTED_INSTALL_FORMATS = ("pkg",) ALL_SUPPORTED_FORMATS = ( SUPPORTED_IMAGE_FORMATS + SUPPORTED_ARCHIVE_FORMATS + SUPPORTED_INSTALL_FORMATS ) SUPPORTED_BUNDLE_TYPES = { "app": "/Applications", "plugin": "/Library/Internet Plug-Ins", "prefpane": "/Library/PreferencePanes", "qlgenerator": "/Library/QuickLook", "saver": "/Library/Screen Savers", } PREFERENCE_KEYS = ( "DSPackagesPath", "FollowOfficialJSSRecipesFormat", "IgnoreExisting", "Initialized", "LastRecipeRobotVersion", "RecipeCreateCount", "RecipeCreateLocation", "RecipeIdentifierPrefix", "RecipeTypes", "StripDeveloperSuffixes", "SUEnableAutomaticChecks", "SUHasLaunchedBefore", "SULastCheckTime", "SUSendProfileInfo", ) CACHE_DIR = os.path.join( os.path.expanduser("~/Library/Caches/Recipe Robot"), datetime.now().strftime("%Y-%m-%d_%H-%M-%S_%f"), ) color_setting = False GITHUB_DOMAINS = ("github.com", "githubusercontent.com", "github.io") KNOWN_403_ON_HEAD = ( "bitbucket.org", "github.com", "hockeyapp.net", "updates.devmate.com", ) class LogLevel(object): DEBUG = ("\033[95m", "DEBUG") ERROR = ("\033[1;38;5;196m", "ERROR") LOG = ("", "") REMINDER = ("\033[1;38;5;33m", "REMINDER") VERBOSE = ("\033[0m", "") WARNING = ("\033[1;38;5;208m", "WARNING") class OutputMode(object): verbose_mode = False debug_mode = False @classmethod def set_verbose_mode(cls, value): if isinstance(value, bool): cls.verbose_mode = value else: raise ValueError @classmethod def set_debug_mode(cls, value): if isinstance(value, bool): cls.debug_mode = value else: raise ValueError def timed(func): @wraps(func) def run_func(*args, **kwargs): start = timeit.default_timer() result = func(*args, **kwargs) end = timeit.default_timer() return (end - start, result) return run_func def robo_print(message, log_level=LogLevel.LOG, indent=0): color = log_level[0] if color_setting else "" indents = indent * " " if log_level[1]: prefix = "[%s] " % log_level[1] else: prefix = "" suffix = ENDC if color_setting else "" line = color + indents + prefix + message + suffix if ( log_level in (LogLevel.ERROR, LogLevel.REMINDER, LogLevel.WARNING, LogLevel.LOG) or (log_level is LogLevel.DEBUG and OutputMode.debug_mode) or ( log_level is LogLevel.VERBOSE and (OutputMode.verbose_mode or OutputMode.debug_mode) ) ): if os.environ.get("NSUnbufferedIO") == "YES": subprocess.run(["echo", line], check=False) elif log_level in (LogLevel.ERROR, LogLevel.WARNING): print(line, file=sys.stderr) else: print(line) def get_github_token(): github_token_file = os.path.expanduser("~/.autopkg_gh_token") if os.path.isfile(github_token_file): try: with open(github_token_file, "r") as tokenfile: return tokenfile.read().strip() except IOError: facts["warnings"].append( "Couldn't read GitHub token file at {}.".format(github_token_file) ) return None def strip_dev_suffix(dev): corp_suffixes = ( "incorporated", "corporation", "limited", "oy/ltd", "pty ltd", "pty. ltd", "pvt ltd", "pvt. ltd", "s.a r.l", "sa rl", "sarl", "srl", "corp", "gmbh", "l.l.c", "inc", "llc", "ltd", "pvt", "oy", "sa", "ab", ) if dev not in (None, ""): for suffix in corp_suffixes: if dev.lower().rstrip(" .").endswith(suffix): dev = dev.rstrip(" .")[: len(dev) - len(suffix) - 1].rstrip(",. ") break return dev def get_bundle_name_info(facts): if "app" in facts["inspections"]: bundle_type = "app" bundle_name_key = "app_name" else: bundle_types = [x for x in SUPPORTED_BUNDLE_TYPES if x in facts["inspections"]] bundle_type = bundle_types[0] if bundle_types else None bundle_name_key = bundle_type + "_name" if bundle_types else None return bundle_type, bundle_name_key def recipe_dirpath(app_name, dev, prefs): char_replacements = (("/", "-"), ("\\", "-"), (":", "-"), ("*", "-"), ("?", "")) for char in char_replacements: app_name = app_name.replace(char[0], char[1]) path_components = [prefs["RecipeCreateLocation"]] if dev is not None and prefs.get("FollowOfficialJSSRecipesFormat", False) is False: if prefs.get("StripDeveloperSuffixes", False) is True: dev = strip_dev_suffix(dev) for char in char_replacements: dev = dev.replace(char[0], char[1]) path_components.append(dev) else: path_components.append(app_name) return robo_join(*path_components) def create_dest_dirs(path): dest_dir = os.path.expanduser(path) if not os.path.exists(dest_dir): try: os.makedirs(dest_dir) except OSError as error: raise RoboError("Unable to create directory at %s." % dest_dir, error) def extract_app_icon(facts, png_path): icon_path = facts["icon_path"] png_path_absolute = os.path.expanduser(png_path) create_dest_dirs(os.path.dirname(png_path_absolute)) if not icon_path.endswith(".icns"): icon_path = icon_path + ".icns" if not os.path.exists(png_path_absolute): cmd = ( '/usr/bin/sips -s format png "%s" --out "%s" ' "--resampleHeightWidthMax 300" % (icon_path, png_path_absolute) ) exitcode, _, err = get_exitcode_stdout_stderr(cmd) if exitcode == 0: robo_print("%s" % png_path, LogLevel.VERBOSE, 4) facts["icons"].append(png_path) else: facts["warnings"].append( "An error occurred during icon extraction: %s" % err ) def get_exitcode_stdout_stderr(cmd, stdin="", text=True): robo_print("Shell command: %s" % cmd, LogLevel.DEBUG, 4) try: proc = subprocess.Popen( shlex.split(cmd), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=text, ) out, err = proc.communicate(stdin) except UnicodeDecodeError: proc = subprocess.Popen( shlex.split(cmd), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=False, ) out, err = proc.communicate(stdin) exitcode = proc.returncode return exitcode, out, err def print_welcome_text(): welcome_text = ( """ ----------------------------------- | Welcome to Recipe Robot v%s. | ----------------------------------- \ _[]_ \ [oo] d-||-b || _/ \_ """ % __version__ ) robo_print(welcome_text) def print_death_text(): death_text = """ _[]_ [xx] q-||-p || _/ \_ """ robo_print(death_text) def reset_term_colors(): sys.stdout.write(ENDC) def get_user_defaults(): prefs_dict = { key: CFPreferencesCopyAppValue(key, BUNDLE_ID) for key in PREFERENCE_KEYS } return prefs_dict def save_user_defaults(prefs): cfprefs_keylist = CFPreferencesCopyKeyList( BUNDLE_ID, kCFPreferencesCurrentUser, kCFPreferencesAnyHost ) if cfprefs_keylist: external_keys = [x for x in cfprefs_keylist if x not in PREFERENCE_KEYS] for ext_key in external_keys: CFPreferencesSetAppValue(ext_key, None, BUNDLE_ID) for key in PREFERENCE_KEYS: if prefs.get(key): CFPreferencesSetAppValue(key, prefs[key], BUNDLE_ID) CFPreferencesAppSynchronize(BUNDLE_ID) def any_item_in_string(items, test_string): return any([True for item in items if item in test_string]) def create_existing_recipe_list(facts): app_name = facts["app_name"] recipes = facts["recipes"] recipe_searches = [quote_plus(app_name)] app_name_no_space = quote_plus("".join(app_name.split())) if app_name_no_space not in recipe_searches: recipe_searches.append(app_name_no_space) app_name_no_symbol = quote_plus(re.sub(r"[^\w]", "", app_name)) if app_name_no_symbol not in recipe_searches: recipe_searches.append(app_name_no_symbol) for this_search in recipe_searches: robo_print( "Searching for existing AutoPkg recipes for %s..." % this_search, LogLevel.VERBOSE, ) if os.path.isfile(os.path.expanduser("~/.autopkg_gh_token")): robo_print("Using GitHub token file", LogLevel.VERBOSE, 4) cmd = ( "/usr/local/bin/autopkg search --path-only --use-token " "%s" % this_search ) else: cmd = "/usr/local/bin/autopkg search --path-only %s" % this_search exitcode, out, err = get_exitcode_stdout_stderr(cmd) out = out.split("\n") is_existing = False for recipe in recipes: recipe_name = "%s.%s.recipe" % (this_search, recipe["type"]) for line in out: if line.lower().startswith(recipe_name.lower()): if is_existing is False: robo_print("Found existing recipe(s):", LogLevel.LOG, 4) is_existing = True recipe["existing"] = True robo_print(recipe_name, LogLevel.LOG, 8) break if is_existing is True: raise RoboError( "Sorry, AutoPkg recipes already exist for this app, and " "I can't blend new recipes with existing recipes.\n\nHere " "are my suggestions:\n\t- See if one of the above recipes " "meets your needs, either as-is or using an override." "\n\t- Write your own recipe using one of the above as " "the ParentRecipe.\n\t- Use Recipe Robot to assist in " "the creation of a new child recipe, as seen here:\n\t " "https://youtu.be/5VKDzY8bBxI?t=2829" ) else: robo_print("No results", LogLevel.VERBOSE, 4)
Apache License 2.0
adafruit/adafruit_python_bluefruitle
Adafruit_BluefruitLE/corebluetooth/metadata.py
CoreBluetoothMetadata.get_all
python
def get_all(self, cbobjects): try: with self._lock: return [self._metadata[x] for x in cbobjects] except KeyError: raise RuntimeError('Failed to find expected metadata for CoreBluetooth object!')
Retrieve a list of metadata objects associated with the specified list of CoreBluetooth objects. If an object cannot be found then an exception is thrown.
https://github.com/adafruit/adafruit_python_bluefruitle/blob/a01dec2c88fa38143afb855e1df4f9ac774156b7/Adafruit_BluefruitLE/corebluetooth/metadata.py#L56-L69
import threading class CoreBluetoothMetadata(object): def __init__(self): self._metadata = {} self._lock = threading.Lock() def list(self): with self._lock: return self._metadata.values() def get(self, cbobject): with self._lock: return self._metadata.get(cbobject, None)
MIT License
m0nhawk/grafana_api
grafana_api/api/admin.py
Admin.create_user
python
def create_user(self, user): create_user_path = "/admin/users" r = self.api.POST(create_user_path, json=user) return r
:param user: :return:
https://github.com/m0nhawk/grafana_api/blob/d2e152022923a02b7d5918fa6c8e447d832867c9/grafana_api/api/admin.py#L27-L35
from .base import Base class Admin(Base): def __init__(self, api): super(Admin, self).__init__(api) self.api = api def settings(self): path = "/admin/settings" r = self.api.GET(path) return r def stats(self): path = "/admin/stats" r = self.api.GET(path) return r
MIT License
arthurmeyer/convolutional_deep_belief_network
crbm_backup.py
CRBM.draw_samples
python
def draw_samples(self, mean_activation, method ='forward'): if self.gaussian_unit and method == 'backward': mu = tf.reshape(mean_activation, [-1]) dist = tf.contrib.distributions.MultivariateNormalDiag(mu, self.sigma) samples = dist.sample() return tf.reshape(samples,[self.batch_size,self.visible_height,self.visible_width,self.visible_channels]) elif method == 'forward': height = self.hidden_height width = self.hidden_width channels = self.filter_number elif method == 'backward': height = self.visible_height width = self.visible_width channels = self.visible_channels return tf.select(tf.random_uniform([self.batch_size,height,width,channels]) - mean_activation < 0, tf.ones([self.batch_size,height,width,channels]), tf.zeros([self.batch_size,height,width,channels]))
INTENT : Draw samples from distribution of specified parameter ------------------------------------------------------------------------------------------------------------------------------------------ PARAMETERS : mean_activation : parameter of the distribution to draw sampels from method : which direction for drawing sample ie forward or backward ------------------------------------------------------------------------------------------------------------------------------------------ REMARK : If FORWARD then samples for HIDDEN layer (BERNOULLI) If BACKWARD then samples for VISIBLE layer (BERNOULLI OR GAUSSIAN if self.gaussian_unit = True)
https://github.com/arthurmeyer/convolutional_deep_belief_network/blob/9da4ea1f15e9a38f005dafd85cdd8616083c7d3d/crbm_backup.py#L219-L243
from __future__ import division import tensorflow as tf import numpy as np class CRBM(object): def __init__(self, name, fully_connected = True, v_height = 1, v_width = 1, v_channels = 784, f_height = 1, f_width = 1, f_number = 400, init_biases_H = -3, init_biases_V = 0.01, init_weight_stddev = 0.01, gaussian_unit = True, gaussian_variance = 0.2, prob_maxpooling = False, padding = False, batch_size = 20, learning_rate = 0.0001, learning_rate_decay = 0.5, momentum = 0.9, decay_step = 50000, weight_decay = 0.1, sparsity_target = 0.1, sparsity_coef = 0.1): try: if fully_connected and not (v_height == 1 and v_width == 1 and f_height == 1 and f_width == 1): raise ValueError('Trying to initialize CRBM ' + name + ' which is fully connected but height and width of visible and filters are not set to 1') if fully_connected and prob_maxpooling: raise ValueError('Trying to initialize CRBM ' + name + ' which is fully connected but with max pooling enabled (should set prob_maxpooling to False)') if fully_connected and padding: raise ValueError('Trying to initialize CRBM ' + name + ' which is fully connected but with padding enabled (should set padding to False)') if padding and ((f_height % 2 == 0) or (f_width % 2 == 0)): raise ValueError('Trying to initialize CRBM ' + name + ' which has padded enable but filter dimension are not odd (padding feature only support odd size for filter dimension)') self.name = name self.fully_connected = fully_connected self.visible_height = v_height self.visible_width = v_width self.visible_channels = v_channels self.filter_height = f_height self.filter_width = f_width self.filter_number = f_number self.gaussian_unit = gaussian_unit if gaussian_unit: self.gaussian_variance = gaussian_variance self.prob_maxpooling = prob_maxpooling self.padding = padding if padding: self.hidden_height = v_height self.hidden_width = v_width else: self.hidden_height = v_height - f_height + 1 self.hidden_width = v_width - f_width + 1 self.batch_size = batch_size self.learning_rate = learning_rate self.learning_rate_decay = learning_rate_decay self.momentum = momentum self.decay_step = decay_step self.weight_decay = weight_decay self.sparsity_target = sparsity_target self.sparsity_coef = sparsity_coef with tf.variable_scope(name): with tf.device('/cpu:0'): self.kernels = tf.get_variable('weights', (f_height, f_width,v_channels,f_number), initializer=tf.truncated_normal_initializer(stddev=init_weight_stddev, dtype=tf.float32), dtype=tf.float32) self.biases_V = tf.get_variable('biases_V', (v_channels), initializer=tf.constant_initializer(init_biases_V), dtype=tf.float32) self.biases_H = tf.get_variable('biases_H', (f_number), initializer=tf.constant_initializer(init_biases_H), dtype=tf.float32) self.vitesse_kernels = tf.get_variable('vitesse_weights', (f_height, f_width,v_channels,f_number), initializer=tf.constant_initializer(0), dtype=tf.float32) self.vitesse_biases_V = tf.get_variable('vitesse_biases_V', (v_channels), initializer=tf.constant_initializer(0), dtype=tf.float32) self.vitesse_biases_H = tf.get_variable('vitesse_biases_H', (f_number), initializer=tf.constant_initializer(0), dtype=tf.float32) if gaussian_unit: self.sigma = tf.get_variable('sigma', (v_width*v_height*batch_size*v_channels), initializer=tf.constant_initializer(self.gaussian_variance * self.gaussian_variance), dtype = tf.float32) except ValueError as error: print('--------------------------') print(error.args) print('--------------------------') def compute_energy(self, visible, hidden, method = 'forward'): if method == 'forward': if self.padding: conv = tf.nn.conv2d(visible, self.kernels, [1, 1, 1, 1], padding='SAME') else: conv = tf.nn.conv2d(visible, self.kernels, [1, 1, 1, 1], padding='VALID') operand = hidden elif method == 'backward': if self.padding: conv = tf.nn.conv2d(hidden, self._get_flipped_kernel(), [1, 1, 1, 1], padding='SAME') else: conv = tf.nn.conv2d(self._get_padded_hidden(hidden), self._get_flipped_kernel(), [1, 1, 1, 1], padding='VALID') operand = visible weight = tf.reduce_sum(tf.mul(operand,conv)) bias_H = tf.reduce_sum(tf.mul(self.biases_H,tf.reduce_sum(hidden,[0,1,2]))) if self.gaussian_unit: weight = tf.div(weight,self.gaussian_variance) bias_V = tf.reduce_sum(tf.square(tf.sub(visible,tf.reshape(self.biases_V,[1,1,1,self.visible_channels])))) bias_V = tf.div(bias_V,2*self.gaussian_variance*self.gaussian_variance) output = tf.sub(bias_V,tf.add(bias_H,weight)) else: bias_V = tf.reduce_sum(tf.mul(self.biases_V,tf.reduce_sum(visible,[0,1,2]))) output = tf.mul(-1,tf.add(weight,tf.add(bias_H,bias_V))) return tf.div(output,self.batch_size) def infer_probability(self, operand, method, result = 'hidden'): 'Computing HIDDEN layer with VISIBLE layer given' if method == 'forward': if self.padding: conv = tf.nn.conv2d(operand, self.kernels, [1, 1, 1, 1], padding='SAME') else: conv = tf.nn.conv2d(operand, self.kernels, [1, 1, 1, 1], padding='VALID') if self.gaussian_unit: conv = tf.div(conv,self.gaussian_variance) bias = tf.nn.bias_add(conv, self.biases_H) if self.prob_maxpooling: exp = tf.exp(bias) custom_kernel = tf.constant(1.0, shape=[2,2,self.filter_number,1]) sum = tf.nn.depthwise_conv2d(exp, custom_kernel, [1, 2, 2, 1], padding='VALID') sum = tf.add(1.0,sum) ret_kernel = np.zeros((2,2,self.filter_number,self.filter_number)) for i in range(2): for j in range(2): for k in range(self.filter_number): ret_kernel[i,j,k,k] = 1 custom_kernel_bis = tf.constant(ret_kernel,dtype = tf.float32) sum_bis = tf.nn.conv2d_transpose(sum, custom_kernel_bis, (self.batch_size,self.hidden_height,self.hidden_width,self.filter_number), strides= [1, 2, 2, 1], padding='VALID', name=None) if result == 'hidden': return tf.div(exp,sum_bis) elif result == 'pooling': return tf.sub(1.0,tf.div(1.0,sum)) return tf.sigmoid(bias) 'Computing VISIBLE layer with HIDDEN layer given' if method == 'backward': if self.padding: conv = tf.nn.conv2d(operand, self._get_flipped_kernel(), [1, 1, 1, 1], padding='SAME') else: conv = tf.nn.conv2d(self._get_padded_hidden(operand), self._get_flipped_kernel(), [1, 1, 1, 1], padding='VALID') if self.gaussian_unit: conv = tf.mul(conv,self.gaussian_variance) bias = tf.nn.bias_add(conv, self.biases_V) if self.gaussian_unit: return bias return tf.sigmoid(bias)
MIT License
azuki-miho/rfcr
KPConv_deform_S3DIS/utils/visualizer.py
ModelVisualizer.top_relu_activations
python
def top_relu_activations(self, model, dataset, relu_idx=0, top_num=5): all_ops = [op for op in tf.get_default_graph().get_operations() if op.name.startswith('KernelPointNetwork') and op.name.endswith('LeakyRelu')] print('\nPossible Relu indices:') for i, t in enumerate(all_ops): print(i, ': ', t.name) if relu_idx is not None: features_tensor = all_ops[relu_idx].outputs[0] else: relu_idx = int(input('Choose a Relu index: ')) features_tensor = all_ops[relu_idx].outputs[0] layer_idx = int(features_tensor.name.split('/')[1][6:]) if 'strided' in all_ops[relu_idx].name and not ('strided' in all_ops[relu_idx+1].name): layer_idx += 1 features_dim = int(features_tensor.shape[1]) radius = model.config.first_subsampling_dl * model.config.density_parameter * (2 ** layer_idx) print('You chose to compute the output of operation named:\n' + all_ops[relu_idx].name) print('\nIt contains {:d} features.'.format(int(features_tensor.shape[1]))) print('\n****************************************************************************') self.top_features = -np.ones((top_num, features_dim)) self.top_classes = -np.ones((top_num, features_dim), dtype=np.int32) self.saving = model.config.saving num_votes = 3 self.visu_path = None self.fmt_str = None if model.config.saving: self.visu_path = join('visu', 'visu_' + model.saving_path.split('/')[-1], 'top_activations', 'Relu{:02d}'.format(relu_idx)) self.fmt_str = 'f{:04d}_top{:02d}.ply' if not exists(self.visu_path): makedirs(self.visu_path) mean_dt = np.zeros(2) last_display = time.time() for v in range(num_votes): if model.config.dataset.startswith('S3DIS'): self.sess.run(dataset.val_init_op) else: self.sess.run(dataset.test_init_op) count = 0 while True: try: if model.config.dataset.startswith('ShapeNetPart'): if model.config.dataset.split('_')[1] == 'multi': label_op = model.inputs['super_labels'] else: label_op = model.inputs['point_labels'] elif model.config.dataset.startswith('S3DIS'): label_op = model.inputs['point_labels'] elif model.config.dataset.startswith('Scannet'): label_op = model.inputs['point_labels'] elif model.config.dataset.startswith('ModelNet40'): label_op = model.inputs['labels'] else: raise ValueError('Unsupported dataset') t = [time.time()] ops = (all_ops[-1].outputs[0], features_tensor, label_op, model.inputs['points'], model.inputs['pools'], model.inputs['in_batches']) _, stacked_features, labels, all_points, all_pools, in_batches = self.sess.run(ops, {model.dropout_prob: 1.0}) t += [time.time()] count += in_batches.shape[0] max_ind = np.max(in_batches) stacked_batches = [] for b_i, b in enumerate(in_batches): stacked_batches += [b[b < max_ind - 0.5]*0+b_i] stacked_batches = np.hstack(stacked_batches) for l in range(model.config.num_layers - 1): if l >= layer_idx: break stacked_batches = stacked_batches[all_pools[l][:, 0]] for b_i, b in enumerate(in_batches): b = b[b < max_ind - 0.5] in_points = all_points[0][b] features = stacked_features[stacked_batches == b_i] points = all_points[layer_idx][stacked_batches == b_i] if model.config.dataset in ['ShapeNetPart_multi', 'ModelNet40_classif']: l = labels[b_i] else: l = np.argmax(np.bincount(labels[b])) self.update_top_activations(features, labels[b_i], points, in_points, radius) t += [time.time()] mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1])) if (t[-1] - last_display) > 1.0: last_display = t[-1] if model.config.dataset.startswith('S3DIS'): completed = count / (model.config.validation_size * model.config.batch_num) else: completed = count / dataset.num_test message = 'Vote {:d} : {:.1f}% (timings : {:4.2f} {:4.2f})' print(message.format(v, 100 * completed, 1000 * (mean_dt[0]), 1000 * (mean_dt[1]))) except tf.errors.OutOfRangeError: break return relu_idx
Test the model on test dataset to see which points activate the most each neurons in a relu layer :param model: model used at training :param dataset: dataset used at training :param relu_idx: which features are to be visualized :param top_num: how many top candidates are kept per features
https://github.com/azuki-miho/rfcr/blob/039733f25818d0d3db6af8c9e00e7ad989b69ee1/KPConv_deform_S3DIS/utils/visualizer.py#L69-L229
import tensorflow as tf import numpy as np from sklearn.neighbors import KDTree from os import makedirs, remove, rename, listdir from os.path import exists, join import time from mayavi import mlab import sys import tensorflow.contrib.graph_editor as ge from utils.ply import write_ply, read_ply from utils.config import Config class ModelVisualizer: def __init__(self, model, restore_snap=None): model.prob_logits = tf.nn.softmax(model.logits) my_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='KernelPointNetwork') self.saver = tf.train.Saver(my_vars, max_to_keep=100) on_CPU = False if on_CPU: cProto = tf.ConfigProto(device_count={'GPU': 0}) else: cProto = tf.ConfigProto() cProto.gpu_options.allow_growth = True self.sess = tf.Session(config=cProto) self.sess.run(tf.global_variables_initializer()) if (restore_snap is not None): self.saver.restore(self.sess, restore_snap) print("Model restored.")
MIT License
pimoroni/piano-hat
library/pianohat.py
on_octave_up
python
def on_octave_up(handler): global _on_octave_up setup() _on_octave_up = handler
Register handler for press/release of octave_up key :param handler: handler function to register See on_note for details.
https://github.com/pimoroni/piano-hat/blob/ec5db83f3fb5a48702e6c155f5161380fef52ded/library/pianohat.py#L148-L159
import signal from sys import exit try: import cap1xxx except ImportError: raise ImportError("This library requires the cap1xxx module\nInstall with: sudo pip install cap1xxx") __version__ = '0.1.0' _on_note = None _on_octave_up = None _on_octave_down = None _on_instrument = None _is_setup = False _pressed = [False for x in range(16)] PRESSED = True RELEASED = False C = 0 CSHARP = 1 D = 2 DSHARP = 3 E = 4 F = 5 FSHARP = 6 G = 7 GSHARP = 8 A = 9 ASHARP = 10 B = 11 C2 = 12 OCTAVE_DOWN = 13 OCTAVE_UP = 14 INSTRUMENT = 15 def _setup_cap(cap): for x in range(8): cap._write_byte(0x30 + x, 0b00000110) cap._write_byte(cap1xxx.R_LED_BEHAVIOUR_1, 0b00000000) cap._write_byte(cap1xxx.R_LED_BEHAVIOUR_2, 0b00000000) cap._write_byte(cap1xxx.R_LED_LINKING, 0b11111111) cap._write_byte(cap1xxx.R_SAMPLING_CONFIG, 0b00000000) cap._write_byte(cap1xxx.R_SENSITIVITY, 0b01100000) cap._write_byte(cap1xxx.R_GENERAL_CONFIG, 0b00111000) cap._write_byte(cap1xxx.R_CONFIGURATION2, 0b01100000) cap.set_touch_delta(10) cap.set_led_direct_ramp_rate(0,0) def _handle_event(cap_index, event, state): global _pressed offset = 0 if cap_index == 1: offset = 8 channel = offset + event.channel _pressed[channel] = (state == PRESSED) if (channel) == OCTAVE_DOWN and callable(_on_octave_down): _on_octave_down(channel, state) if (channel) == OCTAVE_UP and callable(_on_octave_up): _on_octave_up(channel, state) if (channel) == INSTRUMENT and callable(_on_instrument): _on_instrument(channel, state) if (channel) <= 12 and callable(_on_note): _on_note(channel, state) def auto_leds(enable = True): setup() _piano_ctog._write_byte(cap1xxx.R_LED_LINKING, 0b11111111 * enable) _piano_atoc._write_byte(cap1xxx.R_LED_LINKING, 0b11111111 * enable) def set_led(index, state): setup() if index >= 8: _piano_atoc.set_led_state(index-8,state) else: _piano_ctog.set_led_state(index,state) def set_led_ramp_rate(rise,fall): setup() _piano_ctog.set_led_direct_ramp_rate(rise, fall) _piano_atoc.set_led_direct_ramp_rate(rise, fall) def get_state(index=-1): setup() if index > 0 and index < 16: return _pressed[index] else: return _pressed def on_note(handler): global _on_note setup() _on_note = handler
MIT License
facebookresearch/reagent
reagent/models/convolutional_network.py
ConvolutionalNetwork.forward
python
def forward(self, input) -> torch.FloatTensor: x = self.conv_forward(input) x = x.view(-1, self.fc_input_dim) return self.feed_forward.forward(x)
Forward pass for generic convnet DNNs. Assumes activation names are valid pytorch activation names. :param input image tensor
https://github.com/facebookresearch/reagent/blob/57b58a8b3a6b74bb87a197b73a6cd108ddad895e/reagent/models/convolutional_network.py#L89-L97
import collections import logging import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init from reagent.models.fully_connected_network import FullyConnectedNetwork logger = logging.getLogger(__name__) CnnParameters = collections.namedtuple( "CnnParameters", [ "conv_dims", "conv_height_kernels", "conv_width_kernels", "pool_types", "pool_kernels_strides", "num_input_channels", "input_height", "input_width", ], ) class ConvolutionalNetwork(nn.Module): def __init__(self, cnn_parameters, layers, activations, use_layer_norm) -> None: super().__init__() self.conv_dims = cnn_parameters.conv_dims self.conv_height_kernels = cnn_parameters.conv_height_kernels self.conv_width_kernels = cnn_parameters.conv_width_kernels self.use_layer_norm = use_layer_norm self.conv_layers: nn.ModuleList = nn.ModuleList() self.pool_layers: nn.ModuleList = nn.ModuleList() self.layer_norm_layers: nn.ModuleList = nn.ModuleList() for i, _ in enumerate(self.conv_dims[1:]): self.conv_layers.append( nn.Conv2d( self.conv_dims[i], self.conv_dims[i + 1], kernel_size=( self.conv_height_kernels[i], self.conv_width_kernels[i], ), ) ) nn.init.kaiming_normal_(self.conv_layers[i].weight) if cnn_parameters.pool_types[i] == "max": self.pool_layers.append( nn.MaxPool2d(kernel_size=cnn_parameters.pool_kernels_strides[i]) ) else: assert False, "Unknown pooling type".format(layers) if self.use_layer_norm: self.layer_norm_layers.append(nn.GroupNorm(1, self.conv_dims[i + 1])) input_size = ( cnn_parameters.num_input_channels, cnn_parameters.input_height, cnn_parameters.input_width, ) conv_out = self.conv_forward(torch.ones(1, *input_size)) self.fc_input_dim = int(np.prod(conv_out.size()[1:])) layers[0] = self.fc_input_dim self.feed_forward = FullyConnectedNetwork( layers, activations, use_layer_norm=use_layer_norm ) def conv_forward(self, input): x = input for i, _ in enumerate(self.conv_layers): x = self.conv_layers[i](x) if self.use_layer_norm: x = self.layer_norm_layers[i](x) x = F.relu(x) x = self.pool_layers[i](x) return x
BSD 3-Clause New or Revised License
cloverhealth/pytest-pgsql
pytest_pgsql/ext.py
create_engine_fixture
python
def create_engine_fixture(name, scope='session', **engine_params): @pytest.fixture(name=name, scope=scope) def _engine_fixture(database_uri, request): engine = sqla.create_engine(database_uri, **engine_params) quote_id = engine.dialect.preparer(engine.dialect).quote_identifier opt_string = request.config.getoption('--pg-extensions') to_install = (s.strip() for s in opt_string.split(',')) query_string = ';'.join( 'CREATE EXTENSION IF NOT EXISTS %s' % quote_id(ext) for ext in to_install if ext) if query_string: engine.execute('BEGIN TRANSACTION; ' + query_string + '; COMMIT;') yield engine engine.dispose() return _engine_fixture
A factory function that creates a fixture with a customized SQLAlchemy :class:`~sqlalchemy.engine.Engine`. Because setup and teardown will require additional time and resources if you're using both a custom *and* the default engine, if you need this engine in more than one module you might want to consider using this scoped at the session level, i.e. initialized and torn down once for the entire test run. The tradeoff is that if you use multiple engines, each custom one will use additional resources such as connection pools and memory for the entirety of the session. If you only need this custom engine in a few places, it may be more resource-efficient to scope this to an individual test, class, or module. Any extensions declared using the ``--pg-extensions`` command-line option will be installed as part of this engine's setup process. .. warning:: Because an engine performs no cleanup itself, any changes made with an engine fixture directly are *not* rolled back and can result in the failure of other tests (usually with a :class:`~pytest_pgsql.errors.DatabaseIsDirtyError` at teardown). You should only use this in conjunction with :meth:`~pytest_pgsql.database.PostgreSQLTestDBBase.create_fixture` to create a *database* fixture that you'll use. Engine fixtures shouldn't be used directly. Arguments: name (str): The name of the fixture. It must be unique, so ``pg_engine`` is not allowed. scope (str): The scope that this customized engine should have. Valid values are: * ``class``: The engine is initialized and torn down for each test class that uses it. * ``function``: The engine is initialized and torn down for each test that uses it. * ``module``: The engine is initialized and torn down once per module that uses it. * ``session``: The engine is initialized and torn down once per pytest run. Default: ``session``. **engine_params: Keyword arguments to pass to :func:`sqlalchemy.create_engine`. (You cannot change the connection URL with this.) Usage: .. code-block:: python # conftest.py import simplejson as json # Create an engine fixture named `jengine` jengine = pytest_pgsql.create_engine_fixture( 'jengine', json_serializer=json.dumps, json_deserializer=json.loads) # Create a new database fixture that uses our `jengine`. jdb = pytest_pgsql.PostgreSQLTestDB.create_fixture('jdb', 'jengine') # ---------------- # test_json.py import datetime import sqlalchemy as sqla import sqlalchemy.dialects.postgresql as sqla_pg def test_blah(jdb): meta = sqla.MetaData(bind=jdb.connection) table = sqla.Table('test', meta, sqla.Column('col', sqla_pg.JSON)) meta.create_all() jdb.connection.execute(table.insert(), [ {'col': datetime.datetime.now()} ])
https://github.com/cloverhealth/pytest-pgsql/blob/75c7be051545ea20420557d90886cbd32eb46151/pytest_pgsql/ext.py#L7-L104
import pytest import sqlalchemy as sqla
BSD 3-Clause New or Revised License
henniggroup/gasp-python
gasp/general.py
Organism.__init__
python
def __init__(self, cell, id_generator, maker, composition_space): self.cell = cell self.composition = self.cell.composition self.composition_vector = self.compute_composition_vector( composition_space) self.total_energy = None self.value = None self.epa = None self.fitness = None self.relative_fitness = None self.selection_prob = None self.selection_loc = None self.relative_selection_prob = None self.relative_selection_loc = None self.is_active = False self._id = id_generator.make_id() self.made_by = maker
Makes an Organism. Args: cell: the cell of this organism, as a Cell object id_generator: the IDGenerator used to assign id numbers to all organisms maker: the name of algorithm that made the organism, as a string. Either a creator or a variation composition_space: the CompositionSpace of the search
https://github.com/henniggroup/gasp-python/blob/0c8d993c82e0e1c69a05b3c34bbb2fcbbdbb7f07/gasp/general.py#L74-L116
from __future__ import division, unicode_literals, print_function from pymatgen.core.structure import Structure, Molecule from pymatgen.core.lattice import Lattice from pymatgen.core.composition import Composition from pymatgen.core.periodic_table import Element, DummySpecie from pymatgen.analysis.phase_diagram import CompoundPhaseDiagram from pymatgen.analysis.phase_diagram import PDEntry from pymatgen.transformations.standard_transformations import RotationTransformation import numpy as np class IDGenerator(object): def __init__(self): self.id = 0 def make_id(self): self.id += 1 return self.id class Organism(object):
MIT License
hobson/aima
aima/agents.py
Thing.show_state
python
def show_state(self): print "I don't know how to show_state."
Display the agent's internal state. Subclasses should override.
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/agents.py#L55-L57
from aima.utils import * import random, copy class Thing(object): def __repr__(self): return '<%s>' % getattr(self, '__name__', self.__class__.__name__) def is_alive(self): return hasattr(self, 'alive') and self.alive
MIT License
vyxal/vyxal
vyxal/LazyList.py
vyxalify
python
def vyxalify(value: Any) -> Any: if ( isinstance(value, int) or isinstance(value, Rational) or isinstance(value, str) or isinstance(value, list) or isinstance(value, LazyList) ): return value else: return LazyList(map(vyxalify, value))
Takes a value and returns it as one of the four types we use here.
https://github.com/vyxal/vyxal/blob/327d3b0a0c58a0fcd4d459bcb8bb063d16b0367e/vyxal/LazyList.py#L15-L27
import copy import types from typing import Any, Union from sympy import Rational
MIT License
matbarofex/pyrofex
src/pyRofex/service.py
_set_environment_parameter
python
def _set_environment_parameter(parameter, value, environment): environment = _validate_environment(environment) _validate_parameter(parameter, environment) globals.environment_config[environment][parameter] = value
Set environment parameter. Set 'value' into the specified 'parameter' for the environment 'environment'. :param parameter: parameter of the environment to be set. :type parameter: string :param value: new value for the parameter. :type value: string :param environment: the environment to set the parameter. :type environment: Environment
https://github.com/matbarofex/pyrofex/blob/536dd896d7f45dd066fe8ca31986ed1da011a942/src/pyRofex/service.py#L62-L76
from inspect import getargspec from .clients.rest_rfx import RestClient from .clients.websocket_rfx import WebSocketClient from .components import globals from .components.exceptions import ApiException from .components.enums import Environment from .components.enums import MarketDataEntry from .components.enums import TimeInForce from .components.enums import Market def initialize(user, password, account, environment, proxies=None): _validate_environment(environment) _set_environment_parameters(user, password, account, environment, proxies) globals.environment_config[environment]["rest_client"] = RestClient(environment) globals.environment_config[environment]["ws_client"] = WebSocketClient(environment) set_default_environment(environment) def set_default_environment(environment): _validate_environment(environment) globals.default_environment = environment
MIT License
juju/charm-helpers
charmhelpers/contrib/storage/linux/lvm.py
extend_logical_volume_by_device
python
def extend_logical_volume_by_device(lv_name, block_device): cmd = ['lvextend', lv_name, block_device] check_call(cmd)
Extends the size of logical volume lv_name by the amount of free space on physical volume block_device. :param lv_name: str: name of logical volume to be extended (vg/lv format) :param block_device: str: name of block_device to be allocated to lv_name
https://github.com/juju/charm-helpers/blob/25b740578385d15b38f11bed8e4b6e732bdfb7c6/charmhelpers/contrib/storage/linux/lvm.py#L144-L153
import functools from subprocess import ( CalledProcessError, check_call, check_output, Popen, PIPE, ) def deactivate_lvm_volume_group(block_device): vg = list_lvm_volume_group(block_device) if vg: cmd = ['vgchange', '-an', vg] check_call(cmd) def is_lvm_physical_volume(block_device): try: check_output(['pvdisplay', block_device]) return True except CalledProcessError: return False def remove_lvm_physical_volume(block_device): p = Popen(['pvremove', '-ff', block_device], stdin=PIPE) p.communicate(input='y\n') def list_lvm_volume_group(block_device): vg = None pvd = check_output(['pvdisplay', block_device]).splitlines() for lvm in pvd: lvm = lvm.decode('UTF-8') if lvm.strip().startswith('VG Name'): vg = ' '.join(lvm.strip().split()[2:]) return vg def create_lvm_physical_volume(block_device): check_call(['pvcreate', block_device]) def create_lvm_volume_group(volume_group, block_device): check_call(['vgcreate', volume_group, block_device]) def list_logical_volumes(select_criteria=None, path_mode=False): lv_diplay_attr = 'lv_name' if path_mode: lv_diplay_attr = 'vg_name,' + lv_diplay_attr cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings'] if select_criteria: cmd.extend(['--select', select_criteria]) lvs = [] for lv in check_output(cmd).decode('UTF-8').splitlines(): if not lv: continue if path_mode: lvs.append('/'.join(lv.strip().split())) else: lvs.append(lv.strip()) return lvs list_thin_logical_volume_pools = functools.partial( list_logical_volumes, select_criteria='lv_attr =~ ^t') list_thin_logical_volumes = functools.partial( list_logical_volumes, select_criteria='lv_attr =~ ^V')
Apache License 2.0
jest-community/jest-pytest
src/__tests__/integration/home-assistant/homeassistant/components/sensor/torque.py
convert_pid
python
def convert_pid(value): return int(value, 16)
Convert pid from hex string to integer.
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/homeassistant/components/sensor/torque.py#L44-L46
import logging import re import voluptuous as vol from homeassistant.core import callback from homeassistant.components.http import HomeAssistantView from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import (CONF_EMAIL, CONF_NAME) from homeassistant.helpers.entity import Entity import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) API_PATH = '/api/torque' DEFAULT_NAME = 'vehicle' DEPENDENCIES = ['http'] DOMAIN = 'torque' ENTITY_NAME_FORMAT = '{0} {1}' SENSOR_EMAIL_FIELD = 'eml' SENSOR_NAME_KEY = r'userFullName(\w+)' SENSOR_UNIT_KEY = r'userUnit(\w+)' SENSOR_VALUE_KEY = r'k(\w+)' NAME_KEY = re.compile(SENSOR_NAME_KEY) UNIT_KEY = re.compile(SENSOR_UNIT_KEY) VALUE_KEY = re.compile(SENSOR_VALUE_KEY) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_EMAIL): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, })
MIT License
openstack/openstacksdk
openstack/proxy.py
Proxy._update
python
def _update(self, resource_type, value, base_path=None, **attrs): res = self._get_resource(resource_type, value, **attrs) return res.commit(self, base_path=base_path)
Update a resource :param resource_type: The type of resource to update. :type resource_type: :class:`~openstack.resource.Resource` :param value: The resource to update. This must either be a :class:`~openstack.resource.Resource` or an id that corresponds to a resource. :param str base_path: Base part of the URI for updating resources, if different from :data:`~openstack.resource.Resource.base_path`. :param dict attrs: Attributes to be passed onto the :meth:`~openstack.resource.Resource.update` method to be updated. These should correspond to either :class:`~openstack.resource.Body` or :class:`~openstack.resource.Header` values on this resource. :returns: The result of the ``update`` :rtype: :class:`~openstack.resource.Resource`
https://github.com/openstack/openstacksdk/blob/b38f16e0e8f47f5bdbfd57506869bb6ee2533005/openstack/proxy.py#L417-L439
import urllib from urllib.parse import urlparse try: import simplejson JSONDecodeError = simplejson.scanner.JSONDecodeError except ImportError: JSONDecodeError = ValueError import iso8601 from keystoneauth1 import adapter from openstack import _log from openstack import exceptions from openstack import resource def _check_resource(strict=False): def wrap(method): def check(self, expected, actual=None, *args, **kwargs): if (strict and actual is not None and not isinstance(actual, resource.Resource)): raise ValueError("A %s must be passed" % expected.__name__) elif (isinstance(actual, resource.Resource) and not isinstance(actual, expected)): raise ValueError("Expected %s but received %s" % ( expected.__name__, actual.__class__.__name__)) return method(self, expected, actual, *args, **kwargs) return check return wrap class Proxy(adapter.Adapter): retriable_status_codes = None def __init__( self, session, statsd_client=None, statsd_prefix=None, prometheus_counter=None, prometheus_histogram=None, influxdb_config=None, influxdb_client=None, *args, **kwargs): kwargs.setdefault('retriable_status_codes', self.retriable_status_codes) super(Proxy, self).__init__(session=session, *args, **kwargs) self._statsd_client = statsd_client self._statsd_prefix = statsd_prefix self._prometheus_counter = prometheus_counter self._prometheus_histogram = prometheus_histogram self._influxdb_client = influxdb_client self._influxdb_config = influxdb_config if self.service_type: log_name = 'openstack.{0}'.format(self.service_type) else: log_name = 'openstack' self.log = _log.setup_logging(log_name) def request( self, url, method, error_message=None, raise_exc=False, connect_retries=1, global_request_id=None, *args, **kwargs): if not global_request_id: conn = self._get_connection() if conn: global_request_id = conn._global_request_id try: response = super(Proxy, self).request( url, method, connect_retries=connect_retries, raise_exc=raise_exc, global_request_id=global_request_id, **kwargs) for h in response.history: self._report_stats(h) self._report_stats(response) return response except Exception as e: self._report_stats(None, url, method, e) raise def _extract_name(self, url, service_type=None, project_id=None): url_path = urllib.parse.urlparse(url).path.strip() if url_path.startswith('/'): url_path = url_path[1:] if url_path.endswith('.json'): url_path = url_path[:-len('.json')] url_parts = [ x for x in url_path.split('/') if ( x != project_id and ( not project_id or (project_id and x != 'AUTH_' + project_id) )) ] if url_parts[-1] == 'detail': name_parts = url_parts[-2:] else: if (url_parts[0] and url_parts[0][0] == 'v' and url_parts[0][1] and url_parts[0][1].isdigit()): url_parts = url_parts[1:] name_parts = self._extract_name_consume_url_parts(url_parts) if url_path.endswith('tokens'): name_parts = ['tokens'] if not name_parts: name_parts = ['discovery'] return [part for part in name_parts if part] def _extract_name_consume_url_parts(self, url_parts): name_parts = [] for idx in range(0, len(url_parts)): if not idx % 2 and url_parts[idx]: if (len(url_parts) > idx + 1 and url_parts[idx][-1] == 's' and url_parts[idx][-2:] != 'is'): name_parts.append(url_parts[idx][:-1]) else: name_parts.append(url_parts[idx]) return name_parts def _report_stats(self, response, url=None, method=None, exc=None): if self._statsd_client: self._report_stats_statsd(response, url, method, exc) if self._prometheus_counter and self._prometheus_histogram: self._report_stats_prometheus(response, url, method, exc) if self._influxdb_client: self._report_stats_influxdb(response, url, method, exc) def _report_stats_statsd(self, response, url=None, method=None, exc=None): if response is not None and not url: url = response.request.url if response is not None and not method: method = response.request.method name_parts = self._extract_name(url, self.service_type, self.session.get_project_id()) key = '.'.join( [self._statsd_prefix, self.service_type, method] + name_parts) with self._statsd_client.pipeline() as pipe: if response is not None: pipe.timing(key, response.elapsed) pipe.incr(key) elif exc is not None: pipe.incr('%s.failed' % key) def _report_stats_prometheus(self, response, url=None, method=None, exc=None): if response is not None and not url: url = response.request.url if response is not None and not method: method = response.request.method parsed_url = urlparse(url) endpoint = "{}://{}{}".format( parsed_url.scheme, parsed_url.netloc, parsed_url.path) if response is not None: labels = dict( method=method, endpoint=endpoint, service_type=self.service_type, status_code=response.status_code, ) self._prometheus_counter.labels(**labels).inc() self._prometheus_histogram.labels(**labels).observe( response.elapsed.total_seconds() * 1000) def _report_stats_influxdb(self, response, url=None, method=None, exc=None): if response is not None and not url: url = response.request.url if response is not None and not method: method = response.request.method tags = dict( method=method, name='_'.join(self._extract_name( url, self.service_type, self.session.get_project_id())) ) fields = dict( attempted=1 ) if response is not None: fields['duration'] = int(response.elapsed.total_seconds() * 1000) tags['status_code'] = str(response.status_code) fields[str(response.status_code)] = 1 fields['%s.%s' % (method, response.status_code)] = 1 fields['status_code_val'] = response.status_code elif exc: fields['failed'] = 1 if 'additional_metric_tags' in self._influxdb_config: tags.update(self._influxdb_config['additional_metric_tags']) measurement = self._influxdb_config.get( 'measurement', 'openstack_api') if self._influxdb_config else 'openstack_api' measurement = '%s.%s' % (measurement, self.service_type) data = [dict( measurement=measurement, tags=tags, fields=fields )] try: self._influxdb_client.write_points(data) except Exception: self.log.exception('Error writing statistics to InfluxDB') def _version_matches(self, version): api_version = self.get_api_major_version() if api_version: return api_version[0] == version return False def _get_connection(self): return getattr( self, '_connection', getattr( self.session, '_sdk_connection', None)) def _get_resource(self, resource_type, value, **attrs): conn = self._get_connection() if value is None: res = resource_type.new(connection=conn, **attrs) elif (isinstance(value, dict) and not isinstance(value, resource.Resource)): res = resource_type._from_munch( value, connection=conn) res._update(**attrs) elif not isinstance(value, resource_type): res = resource_type.new( id=value, connection=conn, **attrs) else: res = value res._update(**attrs) return res def _get_uri_attribute(self, child, parent, name): if parent is None: value = getattr(child, name) else: value = resource.Resource._get_id(parent) return value def _find(self, resource_type, name_or_id, ignore_missing=True, **attrs): return resource_type.find(self, name_or_id, ignore_missing=ignore_missing, **attrs) @_check_resource(strict=False) def _delete(self, resource_type, value, ignore_missing=True, **attrs): res = self._get_resource(resource_type, value, **attrs) try: rv = res.delete(self) except exceptions.ResourceNotFound: if ignore_missing: return None raise return rv @_check_resource(strict=False)
Apache License 2.0
devopshq/teamcity
dohq_teamcity/models/agent_pool.py
AgentPool.href
python
def href(self, href): self._href = href
Sets the href of this AgentPool. :param href: The href of this AgentPool. # noqa: E501 :type: str
https://github.com/devopshq/teamcity/blob/84f1757ec1fddef27d39246a75739d047be0e831/dohq_teamcity/models/agent_pool.py#L124-L132
from dohq_teamcity.custom.base_model import TeamCityObject class AgentPool(TeamCityObject): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'id': 'int', 'name': 'str', 'href': 'str', 'max_agents': 'int', 'projects': 'Projects', 'agents': 'Agents', 'locator': 'str' } attribute_map = { 'id': 'id', 'name': 'name', 'href': 'href', 'max_agents': 'maxAgents', 'projects': 'projects', 'agents': 'agents', 'locator': 'locator' } def __init__(self, id=None, name=None, href=None, max_agents=None, projects=None, agents=None, locator=None, teamcity=None): self._id = None self._name = None self._href = None self._max_agents = None self._projects = None self._agents = None self._locator = None self.discriminator = None if id is not None: self.id = id if name is not None: self.name = name if href is not None: self.href = href if max_agents is not None: self.max_agents = max_agents if projects is not None: self.projects = projects if agents is not None: self.agents = agents if locator is not None: self.locator = locator super(AgentPool, self).__init__(teamcity=teamcity) @property def id(self): return self._id @id.setter def id(self, id): self._id = id @property def name(self): return self._name @name.setter def name(self, name): self._name = name @property def href(self): return self._href @href.setter
MIT License
mara/mara-pipelines
mara_pipelines/incremental_processing/processed_files.py
track_processed_file
python
def track_processed_file(node_path: str, file_name: str, last_modified_timestamp: datetime): with mara_db.postgresql.postgres_cursor_context('mara') as cursor: cursor.execute(f''' INSERT INTO data_integration_processed_file (node_path, file_name, last_modified_timestamp) VALUES ({'%s,%s,%s'}) ON CONFLICT (node_path, file_name) DO UPDATE SET last_modified_timestamp = EXCLUDED.last_modified_timestamp ''', (node_path, file_name, last_modified_timestamp)) return True
Records that a file has been 'processed' by a node Args: node_path: The path of the node that processed the file file_name: The name of the file that has been processed last_modified_timestamp: The time when the file was modified last Returns: True
https://github.com/mara/mara-pipelines/blob/b2bd30ffe35dd02483f681d6a3856bbdb7eee682/mara_pipelines/incremental_processing/processed_files.py#L24-L42
import datetime import sqlalchemy from sqlalchemy.ext.declarative import declarative_base import mara_db.config import mara_db.dbs import mara_db.postgresql Base = declarative_base() class ProcessedFile(Base): __tablename__ = 'data_integration_processed_file' node_path = sqlalchemy.Column(sqlalchemy.ARRAY(sqlalchemy.Text), primary_key=True) file_name = sqlalchemy.Column(sqlalchemy.Text, primary_key=True) last_modified_timestamp = sqlalchemy.Column(sqlalchemy.TIMESTAMP(timezone=True))
MIT License
lunixbochs/actualvim
lib/msgpack/umsgpack.py
Ext.__eq__
python
def __eq__(self, other): return (isinstance(other, self.__class__) and self.code == other.code and self.data == other.data)
Compare this Ext object with another for equality.
https://github.com/lunixbochs/actualvim/blob/1f555ce719e49d6584f0e35e9f0db2f216b98fa5/lib/msgpack/umsgpack.py#L102-L108
__version__ = "2.3.0" version = (2,3,0) import collections import gc import io import struct import sys class Ext: def __init__(self, code, data): if not isinstance(code, int) or not (code >= 0 and code <= 127): raise TypeError("ext code out of range") elif sys.version_info[0] == 3 and not isinstance(data, bytes): raise TypeError("ext data is not code \'bytes\'") elif sys.version_info[0] == 2 and not isinstance(data, str): raise TypeError("ext data is not code \'str\'") self.code = code self.data = data
MIT License
lithium876/controll_remote_access_trojan
pyinstaller/PyInstaller/lib/__subprocess.py
Popen._get_handles
python
def _get_handles(self, stdin, stdout, stderr): p2cread, p2cwrite = None, None c2pread, c2pwrite = None, None errread, errwrite = None, None if stdin is None: pass elif stdin == PIPE: p2cread, p2cwrite = os.pipe() elif isinstance(stdin, int): p2cread = stdin else: p2cread = stdin.fileno() if stdout is None: pass elif stdout == PIPE: c2pread, c2pwrite = os.pipe() elif isinstance(stdout, int): c2pwrite = stdout else: c2pwrite = stdout.fileno() if stderr is None: pass elif stderr == PIPE: errread, errwrite = os.pipe() elif stderr == STDOUT: errwrite = c2pwrite elif isinstance(stderr, int): errwrite = stderr else: errwrite = stderr.fileno() return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite)
Construct and return tupel with IO objects: p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
https://github.com/lithium876/controll_remote_access_trojan/blob/7ba48b51d98723e0dd0bca7d0e2586d422f78419/pyinstaller/PyInstaller/lib/__subprocess.py#L967-L1009
import sys mswindows = (sys.platform == "win32") import os import types import traceback import gc import signal class CalledProcessError(Exception): def __init__(self, returncode, cmd): self.returncode = returncode self.cmd = cmd def __str__(self): return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode) if mswindows: import threading import msvcrt if 1: import pywintypes from win32api import GetStdHandle, STD_INPUT_HANDLE, STD_OUTPUT_HANDLE, STD_ERROR_HANDLE from win32api import GetCurrentProcess, DuplicateHandle, GetModuleFileName, GetVersion from win32con import DUPLICATE_SAME_ACCESS, SW_HIDE from win32pipe import CreatePipe from win32process import CreateProcess, STARTUPINFO, GetExitCodeProcess, STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW, CREATE_NEW_CONSOLE from win32process import TerminateProcess from win32event import WaitForSingleObject, INFINITE, WAIT_OBJECT_0 else: from _subprocess import * class STARTUPINFO: dwFlags = 0 hStdInput = None hStdOutput = None hStdError = None wShowWindow = 0 class pywintypes: error = IOError else: import select import errno import fcntl import pickle __all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "CalledProcessError"] try: MAXFD = os.sysconf("SC_OPEN_MAX") except: MAXFD = 256 _active = [] def _cleanup(): for inst in _active[:]: if inst._internal_poll(_deadstate=sys.maxint) >= 0: try: _active.remove(inst) except ValueError: pass PIPE = -1 STDOUT = -2 def _eintr_retry_call(func, *args): while True: try: return func(*args) except OSError, e: if e.errno == errno.EINTR: continue raise def call(*popenargs, **kwargs): return Popen(*popenargs, **kwargs).wait() def check_call(*popenargs, **kwargs): retcode = call(*popenargs, **kwargs) cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] if retcode: raise CalledProcessError(retcode, cmd) return retcode def list2cmdline(seq): result = [] needquote = False for arg in seq: bs_buf = [] if result: result.append(' ') needquote = (" " in arg) or ("\t" in arg) or ("|" in arg) or not arg if needquote: result.append('"') for c in arg: if c == '\\': bs_buf.append(c) elif c == '"': result.append('\\' * len(bs_buf)*2) bs_buf = [] result.append('\\"') else: if bs_buf: result.extend(bs_buf) bs_buf = [] result.append(c) if bs_buf: result.extend(bs_buf) if needquote: result.extend(bs_buf) result.append('"') return ''.join(result) class Popen(object): def __init__(self, args, bufsize=0, executable=None, stdin=None, stdout=None, stderr=None, preexec_fn=None, close_fds=False, shell=False, cwd=None, env=None, universal_newlines=False, startupinfo=None, creationflags=0): _cleanup() self._child_created = False if not isinstance(bufsize, (int, long)): raise TypeError("bufsize must be an integer") if mswindows: if preexec_fn is not None: raise ValueError("preexec_fn is not supported on Windows " "platforms") if close_fds and (stdin is not None or stdout is not None or stderr is not None): raise ValueError("close_fds is not supported on Windows " "platforms if you redirect stdin/stdout/stderr") else: if startupinfo is not None: raise ValueError("startupinfo is only supported on Windows " "platforms") if creationflags != 0: raise ValueError("creationflags is only supported on Windows " "platforms") self.stdin = None self.stdout = None self.stderr = None self.pid = None self.returncode = None self.universal_newlines = universal_newlines (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) = self._get_handles(stdin, stdout, stderr) self._execute_child(args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) if mswindows: if p2cwrite is not None: p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0) if c2pread is not None: c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0) if errread is not None: errread = msvcrt.open_osfhandle(errread.Detach(), 0) if p2cwrite is not None: self.stdin = os.fdopen(p2cwrite, 'wb', bufsize) if c2pread is not None: if universal_newlines: self.stdout = os.fdopen(c2pread, 'rU', bufsize) else: self.stdout = os.fdopen(c2pread, 'rb', bufsize) if errread is not None: if universal_newlines: self.stderr = os.fdopen(errread, 'rU', bufsize) else: self.stderr = os.fdopen(errread, 'rb', bufsize) def _translate_newlines(self, data): data = data.replace("\r\n", "\n") data = data.replace("\r", "\n") return data def __del__(self, sys=sys): if not self._child_created: return self._internal_poll(_deadstate=sys.maxint) if self.returncode is None and _active is not None: _active.append(self) def communicate(self, input=None): if [self.stdin, self.stdout, self.stderr].count(None) >= 2: stdout = None stderr = None if self.stdin: if input: self.stdin.write(input) self.stdin.close() elif self.stdout: stdout = self.stdout.read() self.stdout.close() elif self.stderr: stderr = self.stderr.read() self.stderr.close() self.wait() return (stdout, stderr) return self._communicate(input) def poll(self): return self._internal_poll() if mswindows: def _get_handles(self, stdin, stdout, stderr): if stdin is None and stdout is None and stderr is None: return (None, None, None, None, None, None) p2cread, p2cwrite = None, None c2pread, c2pwrite = None, None errread, errwrite = None, None if stdin is None: p2cread = GetStdHandle(STD_INPUT_HANDLE) if p2cread is None: p2cread, _ = CreatePipe(None, 0) elif stdin == PIPE: p2cread, p2cwrite = CreatePipe(None, 0) elif isinstance(stdin, int): p2cread = msvcrt.get_osfhandle(stdin) else: p2cread = msvcrt.get_osfhandle(stdin.fileno()) p2cread = self._make_inheritable(p2cread) if stdout is None: c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE) if c2pwrite is None: _, c2pwrite = CreatePipe(None, 0) elif stdout == PIPE: c2pread, c2pwrite = CreatePipe(None, 0) elif isinstance(stdout, int): c2pwrite = msvcrt.get_osfhandle(stdout) else: c2pwrite = msvcrt.get_osfhandle(stdout.fileno()) c2pwrite = self._make_inheritable(c2pwrite) if stderr is None: errwrite = GetStdHandle(STD_ERROR_HANDLE) if errwrite is None: _, errwrite = CreatePipe(None, 0) elif stderr == PIPE: errread, errwrite = CreatePipe(None, 0) elif stderr == STDOUT: errwrite = c2pwrite elif isinstance(stderr, int): errwrite = msvcrt.get_osfhandle(stderr) else: errwrite = msvcrt.get_osfhandle(stderr.fileno()) errwrite = self._make_inheritable(errwrite) return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) def _make_inheritable(self, handle): return DuplicateHandle(GetCurrentProcess(), handle, GetCurrentProcess(), 0, 1, DUPLICATE_SAME_ACCESS) def _find_w9xpopen(self): w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)), "w9xpopen.exe") if not os.path.exists(w9xpopen): w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix), "w9xpopen.exe") if not os.path.exists(w9xpopen): raise RuntimeError("Cannot locate w9xpopen.exe, which is " "needed for Popen to work with your " "shell or platform.") return w9xpopen def _execute_child(self, args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite): if not isinstance(args, types.StringTypes): args = list2cmdline(args) if startupinfo is None: startupinfo = STARTUPINFO() if None not in (p2cread, c2pwrite, errwrite): startupinfo.dwFlags |= STARTF_USESTDHANDLES startupinfo.hStdInput = p2cread startupinfo.hStdOutput = c2pwrite startupinfo.hStdError = errwrite if shell: startupinfo.dwFlags |= STARTF_USESHOWWINDOW startupinfo.wShowWindow = SW_HIDE comspec = os.environ.get("COMSPEC", "cmd.exe") args = comspec + " /c " + args if (GetVersion() >= 0x80000000L or os.path.basename(comspec).lower() == "command.com"): w9xpopen = self._find_w9xpopen() args = '"%s" %s' % (w9xpopen, args) creationflags |= CREATE_NEW_CONSOLE try: hp, ht, pid, tid = CreateProcess(executable, args, None, None, int(not close_fds), creationflags, env, cwd, startupinfo) except pywintypes.error, e: raise WindowsError(*e.args) self._child_created = True self._handle = hp self.pid = pid ht.Close() if p2cread is not None: p2cread.Close() if c2pwrite is not None: c2pwrite.Close() if errwrite is not None: errwrite.Close() def _internal_poll(self, _deadstate=None): if self.returncode is None: if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0: self.returncode = GetExitCodeProcess(self._handle) return self.returncode def wait(self): if self.returncode is None: obj = WaitForSingleObject(self._handle, INFINITE) self.returncode = GetExitCodeProcess(self._handle) return self.returncode def _readerthread(self, fh, buffer): buffer.append(fh.read()) def _communicate(self, input): stdout = None stderr = None if self.stdout: stdout = [] stdout_thread = threading.Thread(target=self._readerthread, args=(self.stdout, stdout)) stdout_thread.setDaemon(True) stdout_thread.start() if self.stderr: stderr = [] stderr_thread = threading.Thread(target=self._readerthread, args=(self.stderr, stderr)) stderr_thread.setDaemon(True) stderr_thread.start() if self.stdin: if input is not None: self.stdin.write(input) self.stdin.close() if self.stdout: stdout_thread.join() if self.stderr: stderr_thread.join() if stdout is not None: stdout = stdout[0] if stderr is not None: stderr = stderr[0] if self.universal_newlines and hasattr(file, 'newlines'): if stdout: stdout = self._translate_newlines(stdout) if stderr: stderr = self._translate_newlines(stderr) self.wait() return (stdout, stderr) def send_signal(self, sig): if sig == signal.SIGTERM: self.terminate() else: raise ValueError("Only SIGTERM is supported on Windows") def terminate(self): TerminateProcess(self._handle, 1) kill = terminate else:
Apache License 2.0
seagate/cortx-hare
hax/hax/motr/delivery.py
DeliveryHerald.wait_for_all
python
def wait_for_all(self, promise: HaLinkMessagePromise, timeout_sec: float = 30.0): condition = Condition() skip_await = False with self.lock: self.groom_unsorted(promise) self.waiting_clients[promise] = condition skip_await = promise in self.recently_delivered while not promise.is_empty(): if skip_await: LOG.log( TRACE, 'Promise %s has been confirmed before, ' 'no need to block', promise) skip_await = False else: with condition: LOG.log(TRACE, 'Blocking until %s is confirmed', promise) condition.wait(timeout=timeout_sec) with self.lock: self._verify_delivered(promise, timeout_sec) if not promise.is_empty(): self.waiting_clients[promise] = condition
Blocks the current thread until all of the messages in promise._ids are reported by Motr as delivered. Raises NotDelivered exception when timeout_sec exceeds.
https://github.com/seagate/cortx-hare/blob/8b2592500f770d665b5b7d0497679c80ce3be574/hax/hax/motr/delivery.py#L114-L145
import logging import time from threading import Condition, Lock from typing import Dict, List from hax.exception import NotDelivered from hax.log import TRACE from hax.types import HaLinkMessagePromise, MessageId LOG = logging.getLogger('hax') MAX_UNSORTED_TTL = 10000 class DeliveryHerald: def __init__(self, unsorted_ttl_msec: int = MAX_UNSORTED_TTL): self.recently_delivered: Dict[HaLinkMessagePromise, List[MessageId]] = {} self.waiting_clients: Dict[HaLinkMessagePromise, Condition] = {} self.unsorted_deliveries: Dict[MessageId, int] = {} self.unsorted_ttl = unsorted_ttl_msec self.lock = Lock() def _verify_delivered(self, promise: HaLinkMessagePromise, timeout_sec: float): del self.waiting_clients[promise] if promise not in self.recently_delivered: raise NotDelivered('None of message tags =' + str(promise) + ' were delivered to Motr within ' + str(timeout_sec) + ' seconds timeout') confirmed_msgs = self.recently_delivered.pop(promise) LOG.log(TRACE, 'Thread unblocked - %s just received', confirmed_msgs) promise.exclude_ids(confirmed_msgs) def get_now_ts(self) -> int: return round(time.time() * 1000) def wait_for_any(self, promise: HaLinkMessagePromise, timeout_sec: float = 30.0): condition = Condition() skip_await = False with self.lock: self.groom_unsorted(promise) self.waiting_clients[promise] = condition skip_await = promise in self.recently_delivered if skip_await: LOG.log(TRACE, 'Promise %s has been confirmed before, no need to block', promise) else: with condition: LOG.log(TRACE, 'Blocking until %s is confirmed', promise) condition.wait(timeout=timeout_sec) with self.lock: self._verify_delivered(promise, timeout_sec)
Apache License 2.0
mikeiacovacci/axiom-framework
lib/classes.py
AxiomInteractiveTask.detect_prompt_change
python
def detect_prompt_change(self): if self.starting_prompt == self.ending_prompt: return False else: return True
SUMMARY: compares two prompt type names, called by AxiomInteractiveTask init method INPUT: self, two string values that represent prompt type names OUTPUT: True or False based on string comparison
https://github.com/mikeiacovacci/axiom-framework/blob/2edc8bb1a123eb3c67897b0742050ee6956058bf/lib/classes.py#L786-L794
import lib.config as config from lib.config import print_error from os import devnull, path from pexpect import exceptions, pty_spawn from prompt_toolkit import prompt, PromptSession from prompt_toolkit.auto_suggest import AutoSuggestFromHistory from prompt_toolkit.history import FileHistory from queue import Queue from re import search from shlex import split from subprocess import call, PIPE, Popen, STDOUT from threading import Event from time import sleep class AxiomAction: def __init__(self, name, prompt_type, execution_type, text, output_list, note): self.execution_type = execution_type self.name = name self.note = note self.output_list = output_list self.prompt_type = prompt_type self.text = text def cli_print(self): print() if isinstance(self.text, str): print(self.text) elif isinstance(self.text, list): line = 0 while line < self.text.__len__(): print(self.text[line]) line += 1 print() def confirm_and_execute(self, tool): self.show() response = input("\n[AXIOM] Execute? [Y/n] ") if response not in ["Y", "y", "Yes", "yes"]: return False else: self.run(tool) return True def existing_subprocess(self): i = 0 while i < dispatch.subprocesses.__len__(): if self.prompt_type == dispatch.subprocesses[i].current_prompt: return True i += 1 return False def extract_ending_prompt(self): ending_prompt = str() if self.execution_type != "interactive": return False for x in self.output_list: if isinstance(x, tuple): if x[0] == "PROMPT": ending_prompt = x[1] break return ending_prompt def print_text(self): if isinstance(self.text, str): print("\n TEXT: " + self.text) elif isinstance(self.text, list): print("\n TEXT: ", end="") print(self.text[0]) line = 1 while line < self.text.__len__(): print(" " + self.text[line]) line += 1 def run(self, tool): if self.prompt_type == "bash" and not self.existing_subprocess(): if not tool.platform_matches(): print_error(str("\nERROR: Cannot execute " + tool.name + " (" + tool.platform + ") on " + config.axiom.platform)) dispatch.continue_trigger.set() return if tool.is_installed(): pass else: if tool.install(): self.show() print() else: if tool.proceed_despite_uninstalled(): pass else: dispatch.continue_trigger.set() return elif self.prompt_type != "other" and not self.existing_subprocess(): print_error("\nERROR: Prompt type incompatible with current runtime") dispatch.continue_trigger.set() return multiple_lines = False if isinstance(self, AxiomCommand): if isinstance(self.text[0], list): multiple_lines = True elif isinstance(self, AxiomAction): if isinstance(self.text, list): multiple_lines = True if self.execution_type == "standalone": if multiple_lines: self.run_multiline_standalone() else: self.run_standalone() elif self.execution_type == "autonomous": if multiple_lines: print_error("ERROR: Autonomous multi-line commands are unsupported") else: self.run_autonomous() elif self.execution_type == "interactive": self.run_interactive() elif self.execution_type == "NX": if multiple_lines: self.run_multiline_nx() else: self.run_nx() def run_autonomous(self): if self.prompt_type == "bash" and not self.existing_subprocess(): try: print() call(self.text, shell=True) except OSError: print_error("ERROR: Failed to execute via call()") else: dispatch.tasking.put(AxiomInteractiveTask(self.text, self.prompt_type, self.prompt_type)) dispatch.monitor_task_queue() dispatch.continue_trigger.set() def run_interactive(self): ending_prompt = self.extract_ending_prompt() if ending_prompt is not False: dispatch.tasking.put(AxiomInteractiveTask(self.text, self.prompt_type, ending_prompt)) dispatch.monitor_task_queue() dispatch.continue_trigger.set() def run_multiline_nx(self): print() line = 0 while line < self.text.__len__(): print(self.text[line]) line += 1 dispatch.continue_trigger.set() def run_multiline_standalone(self): if self.prompt_type == "bash" and not self.existing_subprocess(): try: print() proc = Popen(["bash", "-i"], shell=True, stdin=PIPE, stdout=PIPE) i = 0 while proc.returncode is None: if i < self.text.__len__(): proc.stdin.write(self.text[i].encode()) proc.stdin.write("\n".encode()) proc.stdin.flush() i += 1 else: proc.stdin.close() proc.poll() except OSError: print_error("ERROR: Failed to execute via Popen()") else: dispatch.tasking.put(AxiomInteractiveTask(self.text, self.prompt_type, self.prompt_type)) dispatch.monitor_task_queue() dispatch.continue_trigger.set() def run_nx(self): print() print(self.text) print() dispatch.continue_trigger.set() def run_standalone(self): if self.prompt_type == "bash" and not self.existing_subprocess(): try: print() call(split(self.text)) except OSError: print_error("ERROR: Failed to execute via call()") else: dispatch.tasking.put(AxiomInteractiveTask(self.text, self.prompt_type, self.prompt_type)) dispatch.monitor_task_queue() dispatch.continue_trigger.set() def show(self): print("\n NAME: " + self.name + "\n TYPE: " + self.execution_type + " action (" + self.prompt_type + ")" "\n NOTE: " + self.note) self.print_text() class AxiomCommand(AxiomAction): def __init__(self, name, prompt_type, execution_type, text, output_list, note, input_list): super().__init__(name, prompt_type, execution_type, text, output_list, note) self.input_list = input_list def build(self): input_count = 0 if isinstance(self.text[0], str): token_count = 0 built_text = str() while token_count < self.text.__len__() or input_count < self.input_list.__len__(): if token_count < self.text.__len__(): built_text += self.text[token_count] token_count += 1 if input_count < self.input_list.__len__(): built_text += self.input_build_prompt(input_count) input_count += 1 else: built_text = [] current_line = 0 while current_line < self.text.__len__(): line_tokens = self.text[current_line].__len__() current_token = 0 line_inputs = line_tokens - 1 current_input = 0 built_line = str() while current_token < line_tokens or current_input < line_inputs: if current_token < line_tokens: built_line += self.text[current_line][current_token] current_token += 1 if current_input < line_inputs: built_line += self.input_build_prompt(input_count) current_input += 1 input_count += 1 built_text.append(built_line) current_line += 1 return built_text def build_with_placeholders(self): input_count = 0 if isinstance(self.text[0], str): token_count = 0 built_text = str() while token_count < self.text.__len__() or input_count < self.input_list.__len__(): if token_count < self.text.__len__(): built_text += self.text[token_count] token_count += 1 if input_count < self.input_list.__len__(): built_text += str("{" + self.input_list[input_count][1] + "}") input_count += 1 else: built_text = [] current_line = 0 while current_line < self.text.__len__(): line_tokens = self.text[current_line].__len__() current_token = 0 line_inputs = line_tokens - 1 current_input = 0 built_line = str() while current_token < line_tokens or current_input < line_inputs: if current_token < line_tokens: built_line += self.text[current_line][current_token] current_token += 1 if current_input < line_inputs: built_line += str("{" + self.input_list[input_count][1] + "}") current_input += 1 input_count += 1 built_text.append(built_line) current_line += 1 return built_text def cli_print(self): text = self.build() print() if isinstance(text, str): print(text) elif isinstance(text, list): line = 0 while line < text.__len__(): print(text[line]) line += 1 print() def input_build_prompt(self, input_count): input_type = self.input_list[input_count][1] prompt_text = str("[AXIOM] Enter " + self.input_list[input_count][0] + ": ") if input_type in ["STRMENU", "INTMENU"]: option_name = self.input_list[input_count][0] option_list = self.input_list[input_count][2] response = self.option_prompt(option_name, option_list) return response elif input_type in ["STR", "INT", "IPV4", "IPV6", "IPV4RNGE", "IPV6RNGE", "IPV4CIDR", "IPV6CIDR", "MAC", "FILE", "RLATVPTH", "FULLPATH", "DOMAIN", "HTTPURL", "HTTPSURL", "WEBURL"]: if input_type == "HTTPSURL": history_file = str(config.axiom.history_folder + "/WEBURL" + ".axiom") else: history_file = str(config.axiom.history_folder + "/" + input_type + ".axiom") session = PromptSession(history=FileHistory(history_file)) response = session.prompt(prompt_text, auto_suggest=AutoSuggestFromHistory()) return response else: response = prompt(prompt_text) return response @staticmethod def option_prompt(option_name, option_list): while True: print("\n" + option_name + "\n") count = 0 while count < option_list.__len__(): print(" " + str(count + 1) + "\t" + str(option_list[count])) count += 1 number = prompt("\n[AXIOM] Select an option: ") try: number = int(number) number -= 1 except (ValueError, TypeError): number = -1 if 0 <= number < option_list.__len__(): return option_list[number] def print_text(self): text_with_placeholders = self.build_with_placeholders() if isinstance(text_with_placeholders, str): print("\n TEXT: " + text_with_placeholders) elif isinstance(text_with_placeholders, list): print("\n TEXT: ", end="") print(text_with_placeholders[0]) line = 1 while line < text_with_placeholders.__len__(): print(" " + text_with_placeholders[line]) line += 1 def run_autonomous(self): text = self.build() if self.prompt_type == "bash" and not self.existing_subprocess(): try: print() call(text, shell=True) except OSError: print_error("ERROR: Failed to execute via call()") else: dispatch.tasking.put(AxiomInteractiveTask(text, self.prompt_type, self.prompt_type)) dispatch.monitor_task_queue() dispatch.continue_trigger.set() def run_interactive(self): text = self.build() ending_prompt = self.extract_ending_prompt() if ending_prompt is not False: dispatch.tasking.put(AxiomInteractiveTask(text, self.prompt_type, ending_prompt)) dispatch.monitor_task_queue() dispatch.continue_trigger.set() def run_multiline_nx(self): text = self.build() print() line = 0 while line < self.text.__len__(): print(text[line]) line += 1 dispatch.continue_trigger.set() def run_multiline_standalone(self): text = self.build() if self.prompt_type == "bash" and not self.existing_subprocess(): try: print() proc = Popen(["bash", "-i"], shell=True, stdin=PIPE, stdout=PIPE) i = 0 while proc.returncode is None: if i < text.__len__(): proc.stdin.write(text[i].encode()) proc.stdin.write("\n".encode()) proc.stdin.flush() i += 1 else: proc.stdin.close() proc.poll() except OSError: print_error("ERROR: Failed to execute via Popen()") else: dispatch.tasking.put(AxiomInteractiveTask(text, self.prompt_type, self.prompt_type)) dispatch.monitor_task_queue() dispatch.continue_trigger.set() def run_nx(self): text = self.build() print() print(text) print() dispatch.continue_trigger.set() def run_standalone(self): text = self.build() if self.prompt_type == "bash" and not self.existing_subprocess(): try: print() call(split(text)) except OSError: print_error("ERROR: Failed to execute via call()") else: dispatch.tasking.put(AxiomInteractiveTask(text, self.prompt_type, self.prompt_type)) dispatch.monitor_task_queue() dispatch.continue_trigger.set() def show(self): print("\n NAME: " + self.name + "\n TYPE: " + self.execution_type + " command (" + self.prompt_type + ")" "\n NOTE: " + self.note) self.print_text() class AxiomDispatcher: def __init__(self): self.continue_trigger = Event() self.subprocesses = [] self.tasking = Queue(maxsize=0) self.trigger = Event() def check_for_ambiguous_target(self, current_task): prompt_type = current_task.ending_prompt for x in self.subprocesses: if x.current_prompt == prompt_type: return True return False @staticmethod def get_subprocess_output_detect_prompt(proc, pattern): timeout = 0 safety_timer = 0 while True: try: print(proc.readline().decode(), end='') except exceptions.TIMEOUT: if search(pattern, proc.before.decode()): if timeout >= config.axiom.pattern_timeout: print(proc.before.decode()) break else: timeout += 1 sleep(1) continue else: safety_timer += 1 sleep(1) if safety_timer >= config.axiom.safety_timeout: proc.sendline() continue else: timeout = 0 safety_timer = 0 def handle_new_tasks(self): if not self.tasking.empty(): current_task = self.tasking.get() if self.matching_subprocess(current_task) >= 0: target = self.matching_subprocess(current_task) if current_task.prompt_change: if self.check_for_ambiguous_target(current_task): print_error("\nERROR: Cannot create subprocess with same prompt type as existing subprocess") self.tasking.task_done() return self.read_and_transmit(target, current_task) self.tasking.task_done() return elif current_task.starting_prompt == "bash": if self.check_for_ambiguous_target(current_task): print_error("\nERROR: Cannot create subprocess with same prompt type as existing subprocess") self.tasking.task_done() return self.spawn_and_transmit(current_task) self.tasking.task_done() return else: print_error("\nERROR: Prompt type incompatible with current runtime") self.tasking.task_done() return def matching_subprocess(self, current_task): i = 0 while i < self.subprocesses.__len__(): if current_task.starting_prompt == self.subprocesses[i].current_prompt: return i else: i += 1 return -1 def monitor_task_queue(self): self.handle_new_tasks() def read_and_transmit(self, target, current_task): proc = self.subprocesses[target].process while True: try: print(proc.readline().decode(), end='') except exceptions.TIMEOUT: break self.transmit_text(current_task, proc) self.subprocesses[target].current_prompt = current_task.ending_prompt self.subprocesses[target].prompt_pattern = current_task.ending_prompt_pattern dispatch.continue_trigger.set() def spawn_and_transmit(self, current_task): try: self.subprocesses.append(AxiomExecutingSubprocess(current_task.starting_prompt, pty_spawn.spawn("/bin/bash -i", timeout=config.axiom.pty_timeout))) except OSError: print_error("ERROR: Failed to spawn /bin/bash subprocess") exit(1) else: target = self.matching_subprocess(current_task) proc = self.subprocesses[target].process self.transmit_text(current_task, proc) self.subprocesses[target].current_prompt = current_task.ending_prompt self.subprocesses[target].prompt_pattern = current_task.ending_prompt_pattern dispatch.continue_trigger.set() def transmit_text(self, current_task, proc): pattern = str(current_task.ending_prompt_pattern + "$") try: if isinstance(current_task.text, str): proc.sendline(current_task.text) elif isinstance(current_task.text, list): i = 0 while i < current_task.text.__len__(): proc.sendline(current_task.text[i]) i += 1 except OSError: print_error("ERROR: Failed to transmit command") exit(1) else: self.get_subprocess_output_detect_prompt(proc, pattern) class AxiomExecutingSubprocess: def __init__(self, current_prompt, process): self.current_prompt = current_prompt self.process = process self.prompt_pattern = None class AxiomInteractiveTask: def __init__(self, text, starting_prompt, ending_prompt): self.ending_prompt = ending_prompt self.starting_prompt = starting_prompt self.text = text self.prompt_change = self.detect_prompt_change() self.ending_prompt_pattern = self.resolve_ending_prompt_pattern()
Apache License 2.0
hirofumi0810/neural_sp
neural_sp/trainers/optimizer.py
set_optimizer
python
def set_optimizer(model, optimizer, lr, weight_decay=0.): parameters = [p for p in model.parameters() if p.requires_grad] logger.info("===== Freezed parameters =====") for n in [n for n, p in model.named_parameters() if not p.requires_grad]: logger.info("%s" % n) if optimizer == 'sgd': opt = torch.optim.SGD(parameters, lr=lr, weight_decay=weight_decay, nesterov=False) elif optimizer == 'momentum': opt = torch.optim.SGD(parameters, lr=lr, momentum=0.9, weight_decay=weight_decay, nesterov=False) elif optimizer == 'nesterov': opt = torch.optim.SGD(parameters, lr=lr, momentum=0.99, weight_decay=weight_decay, nesterov=True) elif optimizer == 'adadelta': opt = torch.optim.Adadelta(parameters, rho=0.9, eps=lr, weight_decay=weight_decay) elif optimizer == 'adam': opt = torch.optim.Adam(parameters, lr=lr, weight_decay=weight_decay) elif optimizer == 'noam': opt = torch.optim.Adam(parameters, lr=0, betas=(0.9, 0.98), eps=1e-09, weight_decay=weight_decay) elif optimizer == 'adagrad': opt = torch.optim.Adagrad(parameters, lr=lr, weight_decay=weight_decay) elif optimizer == 'rmsprop': opt = torch.optim.RMSprop(parameters, lr=lr, weight_decay=weight_decay) else: raise NotImplementedError(optimizer) return opt
Set optimizer. Args: model (): model class optimizer (str): name of optimizer lr (float): learning rate weight_decay (float): L2 penalty for weight decay Returns: opt (torch.optim): optimizer
https://github.com/hirofumi0810/neural_sp/blob/b91877c6d2a11f06026480ab422176274d88cbf2/neural_sp/trainers/optimizer.py#L12-L82
import logging import torch logger = logging.getLogger(__name__)
Apache License 2.0
awslabs/athena-glue-service-logs
athena_glue_service_logs/catalog_manager.py
BaseCatalogManager.get_and_create_partitions
python
def get_and_create_partitions(self): partition_list = self.partitioner.build_partitions_from_s3() self.create_partitions(partition_list)
Create partitions in this database table based off of the files that exist in S3. The subclass is responsible for implementing build_partitions_from_s3 that defines how partitions are structured.
https://github.com/awslabs/athena-glue-service-logs/blob/383e8f0a4315f99eaf4500e16eacf1d6b6bcc5b4/athena_glue_service_logs/catalog_manager.py#L82-L89
import abc import time import logging import boto3 logging.basicConfig(level=logging.INFO) LOGGER = logging.getLogger(__name__) class BaseCatalogManager(metaclass=abc.ABCMeta): MAX_PARTITION_INPUT_PER_CALL = 100 FILE_GROUPING_PARAMS = {'groupFiles': 'inPartition', 'groupSize': '134217728'} def __init__(self, region, database_name, table_name, s3_location): self.database_name = database_name self.table_name = table_name self.s3_location = s3_location self.glue_client = boto3.client('glue', region_name=region) self._partitioner = None def initialize_with_partitions(self, partition_values): if not self.does_database_exist(): self.create_database() self.create_table() self.create_partitions(partition_list=partition_values) def initialize_table_from_s3(self): if not self.does_database_exist(): self.create_database() self.create_table() self.get_and_create_partitions() def create_table(self): LOGGER.info("Creating database table %s", self.table_name) self.glue_client.create_table( DatabaseName=self.database_name, TableInput=self._build_table_input() ) def create_database(self): LOGGER.info("Creating database %s", self.database_name) self.glue_client.create_database( DatabaseInput={ 'Name': self.database_name } )
Apache License 2.0
monarch-initiative/dipper
dipper/models/Genotype.py
Genotype.addConstruct
python
def addConstruct( self, construct_id, construct_label, construct_type=None, construct_description=None, construct_category=None, construct_type_category=None): self.model.addIndividualToGraph(construct_id, construct_label, construct_type, construct_description, ind_category=construct_category, ind_type_category=construct_type_category)
:param construct_id: :param construct_label: :param construct_type: :param construct_description: :param construct_category: a biolink category CURIE for construct_id :param construct_type_category: a biolink category CURIE for construct_type :return:
https://github.com/monarch-initiative/dipper/blob/2a5fad1223b5dfc75311e1927fd56e2943253bc7/dipper/models/Genotype.py#L90-L110
import logging import re from dipper.models.Model import Model from dipper.models.Family import Family from dipper.graph.Graph import Graph from dipper.utils.GraphUtils import GraphUtils from dipper.models.GenomicFeature import makeChromID, makeChromLabel from dipper.models.BiolinkVocabulary import BioLinkVocabulary as blv __author__ = 'nlw' LOG = logging.getLogger(__name__) class Genotype(): def __init__(self, graph): if isinstance(graph, Graph): self.graph = graph else: raise ValueError("{} is not a graph".format(graph)) self.model = Model(self.graph) self.globaltt = self.graph.globaltt self.globaltcid = self.graph.globaltcid self.curie_map = self.graph.curie_map self.gut = GraphUtils(self.curie_map) def addGenotype( self, genotype_id, genotype_label, genotype_type=None, genotype_description=None ): if genotype_type is None: genotype_type = self.globaltt['intrinsic genotype'] self.model.addIndividualToGraph( genotype_id, genotype_label, genotype_type, genotype_description ) def addAllele( self, allele_id, allele_label, allele_type=None, allele_description=None): if allele_type is None: allele_type = self.globaltt['allele'] self.model.addIndividualToGraph( allele_id, allele_label, allele_type, allele_description ) def addGene( self, gene_id, gene_label=None, gene_type=None, gene_description=None ): if gene_type is None: gene_type = self.globaltt['gene'] self.model.addClassToGraph( gene_id, gene_label, gene_type, gene_description )
BSD 3-Clause New or Revised License
rlabbe/filterpy
filterpy/hinfinity/hinfinity_filter.py
HInfinityFilter.update
python
def update(self, z): if z is None: return I = self._I gamma = self.gamma Q = self.Q H = self.H P = self.P x = self.x V_inv = self._V_inv F = self.F W = self.W HTVI = dot(H.T, V_inv) L = linalg.inv(I - gamma * dot(Q, P) + dot(HTVI, H).dot(P)) PL = dot(P, L) K = dot(F, PL).dot(HTVI) self.y = z - dot(H, x) self.x = self.x + dot(K, self.y) self.P = dot(F, PL).dot(F.T) + W self.P = (self.P + self.P.T) / 2 try: self.z = np.copy(z) except: self.z = copy.deepcopy(z)
Add a new measurement `z` to the H-Infinity filter. If `z` is None, nothing is changed. Parameters ---------- z : ndarray measurement for this update.
https://github.com/rlabbe/filterpy/blob/a437893597957764fb6b415bfb5640bb117f5b99/filterpy/hinfinity/hinfinity_filter.py#L93-L142
from __future__ import absolute_import, division import copy import warnings import numpy as np from numpy import dot, zeros, eye import scipy.linalg as linalg from filterpy.common import pretty_str class HInfinityFilter(object): def __init__(self, dim_x, dim_z, dim_u, gamma): warnings.warn("This code is likely incorrect. DO NOT USE.", DeprecationWarning) self.dim_x = dim_x self.dim_z = dim_z self.dim_u = dim_u self.gamma = gamma self.x = zeros((dim_x, 1)) self.B = 0 self.F = eye(dim_x) self.H = zeros((dim_z, dim_x)) self.P = eye(dim_x) self.Q = eye(dim_x) self._V_inv = zeros((dim_z, dim_z)) self._V = zeros((dim_z, dim_z)) self.W = zeros((dim_x, dim_x)) self.K = 0 self.y = zeros((dim_z, 1)) self.z = zeros((dim_z, 1)) self._I = np.eye(dim_x)
MIT License
ww-tech/primrose
primrose/configuration/configuration.py
Configuration._get_configuration_hash
python
def _get_configuration_hash(self): configuration_string = json.dumps(self.complete_config, sort_keys=True) configuration_file_hashname = hashlib.sha256(configuration_string.encode("utf-8")).hexdigest() return configuration_string, configuration_file_hashname
Get configuration file string and hash Returns: (tuple): tuple containing: configuration_string (str): configuration_string configuration_file_hashname (str): terminate the DAG?
https://github.com/ww-tech/primrose/blob/ab3733dea316e3bea3659493587f97955cf6d983/primrose/configuration/configuration.py#L183-L196
import re import datetime import jstyleson import yaml import json from jinja2 import Environment, FileSystemLoader from jinja2.exceptions import TemplateNotFound import hashlib import os import logging import importlib import glob from primrose.node_factory import NodeFactory from primrose.configuration.util import ( OperationType, ConfigurationError, ConfigurationSectionType, ) from primrose.configuration.configuration_dag import ConfigurationDag from primrose.dag.traverser_factory import TraverserFactory SUPPORTED_EXTS = frozenset([".json", ".yaml", ".yml"]) CLASS_ENV_PACKAGE_KEY = "PRIMROSE_EXT_NODE_PACKAGE" class Configuration: def __init__(self, config_location, is_dict_config=False, dict_config=None): if is_dict_config: ext = None if dict_config is None: raise Exception("expected dict_config was None") if not isinstance(dict_config, dict): raise Exception("did not receive expected dict_config") dict_str = jstyleson.dumps(dict_config) config_str = Configuration.perform_any_config_fragment_substitution(dict_str) else: logging.info("Loading config file at {}".format(config_location)) self.config_location = config_location if os.path.exists(config_location): ext = os.path.splitext(config_location)[1].lower() if ext not in SUPPORTED_EXTS: raise ValueError( "config file at: {} has improper extension type - please use a .json or .yml file".format( config_location ) ) with open(config_location, "r") as f: config_str = f.read() config_str = Configuration.perform_any_config_fragment_substitution(config_str) else: raise Exception("config file at: {} not found".format(config_location)) if ext is None or ext == ".json": self.config = jstyleson.loads(config_str, object_pairs_hook=self.dict_raise_on_duplicates) elif ext in [".yaml", ".yml"]: self.config = yaml.load(config_str, Loader=yaml.FullLoader) assert isinstance(self.config, dict) for k in self.config: if k not in ConfigurationSectionType.values(): msg = "Unsupported top-level key: %s. " % k msg += "Supported keys are %s" % str(ConfigurationSectionType.values()) raise ConfigurationError(msg) self.config_metadata = None if ConfigurationSectionType.METADATA.value in self.config: self.config_metadata = self.config[ConfigurationSectionType.METADATA.value] if not ConfigurationSectionType.IMPLEMENTATION_CONFIG.value in self.config: raise ConfigurationError( "Did not find required top-level key %s" % ConfigurationSectionType.IMPLEMENTATION_CONFIG.value ) self.complete_config = self.config.copy() self.config = self.config[ConfigurationSectionType.IMPLEMENTATION_CONFIG.value] self.dag = ConfigurationDag(self.config) self.config_string, self.config_hash = self._get_configuration_hash() self.config_time = datetime.datetime.now().strftime("%Y%m%d_%H%M") self._parse_config() self.check_config() @staticmethod def perform_any_config_fragment_substitution(config_str): def env_override(value, key): return os.getenv(key, value) jinja_env = Environment(loader=FileSystemLoader([".", "/"])) jinja_env.filters["env_override"] = env_override try: config_str_template = jinja_env.from_string(config_str) config_str = config_str_template.render() except (TemplateNotFound) as error: filenames = str(error) raise ConfigurationError(f"Substitution files do not exist: {filenames}") return config_str def dict_raise_on_duplicates(self, ordered_pairs): d = {} for k, v in ordered_pairs: if k in d: raise ConfigurationError("duplicate key: %r" % (k,)) else: d[k] = v return d
Apache License 2.0
cmdmnt/commandment
commandment/dep/dep.py
DEP.remove_profile
python
def remove_profile(self, *serial_numbers: List[str]) -> DEPProfileRemovals: req = requests.Request("DELETE", self._url + "/profile/devices", json={'devices': serial_numbers}) res = self.send(req) return res.json()
Unassign all profiles from device(s) Args: serial_numbers (List[str]): A list of serial numbers to unassign from that profile. Returns: dict: Assignment information
https://github.com/cmdmnt/commandment/blob/17c1dbe3f5301eab0f950f82608c231c15a3ff43/commandment/dep/dep.py#L274-L286
from collections.abc import Iterator from typing import Union, List, Optional import requests from requests.auth import AuthBase from requests_oauthlib import OAuth1 import re from datetime import timedelta, datetime from dateutil import parser as dateparser from locale import atof import json import logging from flask import g, current_app from commandment.dep import DEPProfileRemovals from .errors import DEPServiceError, DEPClientError from email.utils import parsedate logger = logging.getLogger(__name__) class DEPAuth(AuthBase): def __init__(self, token: str) -> None: self.token = token def __call__(self, r): r.headers['X-ADM-Auth-Session'] = self.token return r class DEP: UserAgent = 'commandment' def __init__(self, consumer_key: str = None, consumer_secret: str = None, access_token: str = None, access_secret: str = None, access_token_expiry: Optional[str] = None, url: str = "https://mdmenrollment.apple.com") -> None: self._session_token: Optional[str] = None self._oauth = OAuth1( consumer_key, client_secret=consumer_secret, resource_owner_key=access_token, resource_owner_secret=access_secret, ) if access_token_expiry is not None: access_token_expiry_date = dateparser.parse(access_token_expiry) self._access_token_expiry = access_token_expiry_date else: self._access_token_expiry = None self._url = url self._session = requests.session() self._session.headers.update({ "X-Server-Protocol-Version": "3", "Content-Type": "application/json;charset=UTF8", "User-Agent": DEP.UserAgent, }) self._retry_after: Optional[datetime] = None @property def session_token(self) -> Optional[str]: return self._session_token @classmethod def from_token(cls, token: str): stoken = json.loads(token) return cls(**stoken) def _response_hook(self, r: requests.Response, *args, **kwargs): if r.status_code == 401: pass if 'X-ADM-Auth-Session' in r.headers: self._session_token = r.headers['X-ADM-Auth-Session'] if 'Retry-After' in r.headers: after = r.headers['Retry-After'] if re.compile(r"/[0-9]+/").match(after): d = timedelta(seconds=atof(after)) self._retry_after = datetime.utcnow() + d else: self._retry_after = datetime(*parsedate(after)[:6]) def send(self, req: requests.Request, **kwargs) -> Optional[requests.Response]: if self._access_token_expiry is not None and datetime.now() > self._access_token_expiry: raise DEPClientError("DEP Service Token has expired, please generate a new one.") if self._retry_after is not None: return None if self.session_token is None: self.fetch_token() req.hooks = dict(response=self._response_hook) req.auth = DEPAuth(self._session_token) prepared = self._session.prepare_request(req) res = self._session.send(prepared, **kwargs) try: res.raise_for_status() except requests.HTTPError as e: raise DEPServiceError(response=res, request=res.request) from e return res def fetch_token(self) -> Union[str, None]: res = self._session.get(self._url + "/session", auth=self._oauth) try: res.raise_for_status() except requests.HTTPError as e: raise DEPServiceError(response=res, request=res.request) from e self._session_token = res.json().get("auth_session_token", None) return self._session_token def account(self) -> Union[None, dict]: logger.debug("Fetching DEP account information") res = self.send(requests.Request("GET", self._url + "/account")) return res.json() def fetch_devices(self, cursor: Union[str, None] = None, limit: int = 100) -> dict: req = requests.Request("POST", self._url + "/server/devices", json={'limit': limit, 'cursor': cursor}) res = self.send(req) return res.json() def sync_devices(self, cursor: str, limit: int = 100) -> dict: req = requests.Request("POST", self._url + "/devices/sync", json={'limit': limit, 'cursor': cursor}) res = self.send(req) return res.json() def devices(self, cursor: Union[str, None] = None) -> Iterator: if cursor is not None: return DEPSyncCursor(self, cursor=cursor) else: return DEPFetchCursor(self) def device_detail(self, *serial_numbers: Union[str, List[str]]): req = requests.Request("POST", self._url + "/devices", json={'devices': serial_numbers}) res = self.send(req) return res.json() def define_profile(self, profile: dict): req = requests.Request("POST", self._url + "/profile", json=profile) res = self.send(req) return res.json() def assign_profile(self, profile_uuid: str, *serial_numbers: List[str]) -> dict: req = requests.Request("POST", self._url + "/profile/devices", json={'profile_uuid': profile_uuid, 'devices': serial_numbers}) res = self.send(req) return res.json()
MIT License
facelessuser/rummage
rummage/lib/gui/settings/__init__.py
Settings._update_search_object_to_unique
python
def _update_search_object_to_unique(cls, searches): import re not_word = re.compile(r'[^\w -]', re.UNICODE) new_search = {} for entry in searches: name = entry[0].strip() key_name = not_word.sub('', name).replace(' ', '-') entry = list(entry) if len(entry) == 3: entry.insert(2, '') if len(entry) == 4: entry.insert(3, '') if len(entry) == 5: entry.append(False) unique_id = 1 unique_name = key_name while unique_name in new_search: unique_id += 1 unique_name = "%s (%d)" % (key_name, unique_id) new_search[key_name] = tuple(entry) cls.settings["saved_searches"] = new_search cls.save_settings() return new_search
Update search object.
https://github.com/facelessuser/rummage/blob/17970fc3e4dfae23842782fedfae9d5c8ee3266e/rummage/lib/gui/settings/__init__.py#L810-L842
import codecs import json import os import copy from filelock import FileLock from ..app import custom_app from ..app.custom_app import debug, debug_struct, error from ..dialogs.generic_dialogs import errormsg from .. import localization from ..localization import _ from .. import data from .. import notify from ..notify.util import which from ... import rumcore from ...rumcore import text_decode from .. import util DEV_MODE = False SETTINGS_FILE = "rummage_dev.settings" if DEV_MODE else "rummage.settings" CACHE_FILE = "rummage_dev.cache" if DEV_MODE else "rummage.cache" LOG_FILE = "rummage.log" FIFO = "rummage.fifo" SETTINGS_FMT = '2.6.1' CACHE_FMT = '2.0.0' NOTIFY_STYLES = { "macos": ["default"], "windows": ["default"], "linux": ["default"] } NOTIFY_PLAYERS = { "macos": ["afplay"], "windows": ["windows"], "linux": ["paplay", "aplay", "play"] } NOTIFY_EXT = { "afplay": ['.wav', '.mp3', '.aiff'], "windows": ['.wav'], "paplay": ['.wav', '.mp3', '.ogg'], "aplay": ['.wav', '.mp3'], "play": ['.wav', '.mp3'], } BACKUP_FILE = 0 BACKUP_FOLDER = 1 DEFAULT_SETTINGS = { "__format__": SETTINGS_FMT, "alert_enabled": True, "alt_list_color": True, "backup_ext": "rum-bak", "backup_folder": ".rum-bak", "backup_type": BACKUP_FILE, "brace_expansion": False, "chains": {}, "check_prerelease": False, "check_updates": False, "current_version": (0, 0, 0, 'final', 0, 0), "editor": "", "encoding_options": copy.deepcopy(text_decode.DEFAULT_ENCODING_OPTIONS), "extmatch": False, "file_case_sensitive": False, "full_exclude_path": False, "full_file_path": False, "globstar": False, "hide_cols_content": [], "hide_cols_file": [], "international_time": False, "locale": "en_US", "matchbase": False, "minusnegate": True, "notify_enabled": True, "notify_method": "default", "notify_player": NOTIFY_PLAYERS[util.platform()][0], "notify_sound": "", "pattern_limit": 1000, "pos_cols_content": [], "pos_cols_file": [], "regex_mode": rumcore.RE_MODE, "regex_version": 0, "saved_searches": {}, "single_instance": False, "term_notifier": "" } class Settings: filename = None allow_save = True debug = False @classmethod def localize(cls): cls.ERR_LOAD_SETTINGS_FAILED = _("Failed to load settings file!") cls.ERR_LOAD_CACHE_FAILED = _("Failed to load cache file!") cls.ERR_SAVE_SETTINGS_FAILED = _("Failed to save settings file!") cls.ERR_SAVE_CACHE_FAILED = _("Failed to save cache file!") @classmethod def load_settings(cls): cls.localize() cls.setup_setting_files() cls.settings = {"__format__": SETTINGS_FMT} cls.cache = {"__format__": CACHE_FMT} cls.settings_time = None cls.cache_time = None cls.get_times() if cls.settings_file is not None: cls.open_settings() cls.open_cache() localization.setup('rummage', cls.get_language()) cls.localize() debug_struct(cls.settings) debug_struct(cls.cache) cls.init_notify(True) @classmethod def get_available_players(cls): return NOTIFY_PLAYERS[util.platform()][:] @classmethod def get_settings(cls): cls.reload_settings() return copy.deepcopy(cls.settings) @classmethod def get_log_file(cls): return cls.log @classmethod def set_current_version(cls, value): cls.reload_settings() cls._set_current_version(value) cls.save_settings() @classmethod def _set_current_version(cls, value): cls.settings["current_version"] = value @classmethod def get_current_version(cls): cls.reload_settings() value = cls.settings.get('current_version', DEFAULT_SETTINGS['current_version']) return value @classmethod def is_regex_available(cls): return rumcore.REGEX_SUPPORT @classmethod def is_cchardet_available(cls): return text_decode.CCDetect is not None @classmethod def set_chardet_mode(cls, value): cls.reload_settings() cls._set_chardet_mode(value) cls.save_settings() @classmethod def _set_chardet_mode(cls, value): if not cls.is_cchardet_available() or value > text_decode.CHARDET_CLIB: value = text_decode.CHARDET_DEFAULT if 'encoding_options' not in cls.settings: cls.settings['encoding_options'] = copy.deepcopy(text_decode.DEFAULT_ENCODING_OPTIONS) cls.settings["encoding_options"]['chardet_mode'] = value @classmethod def get_chardet_mode(cls): cls.reload_settings() value = cls.settings.get('encoding_options', {}).get('chardet_mode', text_decode.CHARDET_DEFAULT) if text_decode.CCDetect is None or value > text_decode.CHARDET_CLIB: value = text_decode.CHARDET_DEFAULT return value @classmethod def set_alt_list_color(cls, value): cls.reload_settings() cls._set_alt_list_color(value) cls.save_settings() @classmethod def _set_alt_list_color(cls, value): cls.settings["alt_list_color"] = value @classmethod def get_alt_list_color(cls): cls.reload_settings() return cls.settings['alt_list_color'] @classmethod def _set_encoding_ext(cls, values): if 'encoding_options' not in cls.settings: cls.settings['encoding_options'] = copy.deepcopy(text_decode.DEFAULT_ENCODING_OPTIONS) for k, v in values.items(): if k != 'chardet_mode' and k in text_decode.DEFAULT_ENCODING_OPTIONS: cls.settings['encoding_options'][k] = v[:] @classmethod def set_encoding_ext(cls, values): cls.reload_settings() cls._set_encoding_ext(values) cls.save_settings() @classmethod def get_encoding_ext(cls): cls.reload_settings() value = cls.settings.get('encoding_options', {}) options = {} for k, v in value.items(): if k != "chardet_mode": options[k] = v[:] return options @classmethod def get_encoding_options(cls): cls.reload_settings() options = cls.get_encoding_ext() options['chardet_mode'] = cls.get_chardet_mode() return options @classmethod def set_regex_mode(cls, value): cls.reload_settings() cls._set_regex_mode(value) cls.save_settings() @classmethod def _set_regex_mode(cls, value): if value in rumcore.REGEX_MODES and not rumcore.REGEX_SUPPORT: value = rumcore.REGEX_MODE cls.settings["regex_mode"] = value @classmethod def get_regex_mode(cls): cls.reload_settings() value = cls.settings.get('regex_mode', rumcore.RE_MODE) if value in rumcore.REGEX_MODES and not rumcore.REGEX_SUPPORT: value = rumcore.RE_MODE return value @classmethod def set_regex_version(cls, value): cls.reload_settings() cls._set_regex_version(value) cls.save_settings() @classmethod def _set_regex_version(cls, value): if 0 <= value <= 1: cls.settings["regex_version"] = value @classmethod def get_regex_version(cls): cls.reload_settings() return cls.settings.get('regex_version', 0) @classmethod def get_hide_cols_file(cls): cls.reload_settings() return cls.settings.get("hide_cols_file", []) @classmethod def set_hide_cols_file(cls, hide): cls.reload_settings() cls._set_hide_cols_file(sorted(hide)) cls.save_settings() @classmethod def _set_hide_cols_file(cls, hide): cls.settings["hide_cols_file"] = hide @classmethod def get_hide_cols_content(cls): cls.reload_settings() return cls.settings.get("hide_cols_content", []) @classmethod def set_hide_cols_content(cls, hide): cls.reload_settings() cls._set_hide_cols_content(sorted(hide)) cls.save_settings() @classmethod def _set_hide_cols_content(cls, hide): cls.settings["hide_cols_content"] = hide @classmethod def get_pos_cols_file(cls): cls.reload_settings() return cls.settings.get("pos_cols_file", []) @classmethod def set_pos_cols_file(cls, pos): cls.reload_settings() cls._set_pos_cols_file(pos) cls.save_settings() @classmethod def _set_pos_cols_file(cls, pos): cls.settings["pos_cols_file"] = pos @classmethod def get_pos_cols_content(cls): cls.reload_settings() return cls.settings.get("pos_cols_content", []) @classmethod def set_pos_cols_content(cls, pos): cls.reload_settings() cls._set_pos_cols_content(pos) cls.save_settings() @classmethod def _set_pos_cols_content(cls, pos): cls.settings["pos_cols_content"] = pos @classmethod def get_international_time(cls): cls.reload_settings() return cls.settings.get("international_time", False) @classmethod def set_international_time(cls, itime): cls.reload_settings() cls._set_international_time(itime) cls.save_settings() @classmethod def _set_international_time(cls, hide): cls.settings["international_time"] = hide @classmethod def get_language(cls): cls.reload_settings() locale = cls.settings.get("locale", "en_US") if locale == "en_US" and not os.path.exists(os.path.join(cls.config_folder, "locale", "en_US")): locale = None return locale @classmethod def set_language(cls, language): cls.reload_settings() cls._set_language(language) cls.save_settings() @classmethod def _set_language(cls, language): cls.settings["locale"] = language @classmethod def get_languages(cls): languages = [] base = localization.locale_path if os.path.exists(base): for file_obj in os.listdir(base): if os.path.isdir(os.path.join(base, file_obj)): languages.append(file_obj) if len(languages) == 0 or "en_US" not in languages: languages.append("en_US") languages.sort() return languages @classmethod def set_debug(cls, enable): cls.reload_settings() cls.settings["debug"] = enable custom_app.set_debug_mode(enable) cls.save_settings() @classmethod def get_times(cls): try: settings_time = os.path.getmtime(cls.settings_file) cache_time = os.path.getmtime(cls.cache_file) cls.settings_time = settings_time cls.cache_time = cache_time except Exception as e: debug(e) error("Could not get timestamp of file!") @classmethod def changed(cls): old_settings = cls.settings_time old_cache = cls.cache_time cls.get_times() try: changed = old_settings != cls.settings_time or old_cache != cls.cache_time except Exception: error("Could not compare timestamp of file!") changed = False return changed @classmethod def setup_setting_files(cls): platform = util.platform() if platform == "windows": folder = os.path.expanduser("~\\.Rummage") if not os.path.exists(folder): os.mkdir(folder) plugin_folder = os.path.join(folder, 'plugins') if not os.path.exists(plugin_folder): os.mkdir(plugin_folder) cls.settings_file = os.path.join(folder, SETTINGS_FILE) cls.cache_file = os.path.join(folder, CACHE_FILE) cls.log = os.path.join(folder, LOG_FILE) cls.fifo = os.path.join(folder, '\\\\.\\pipe\\rummage') cls.config_folder = folder elif platform == "macos": old_folder = os.path.expanduser("~/Library/Application Support/Rummage") folder = os.path.expanduser("~/.Rummage") if os.path.exists(old_folder) and not os.path.exists(folder): import shutil shutil.move(old_folder, folder) if not os.path.exists(folder): os.mkdir(folder) plugin_folder = os.path.join(folder, 'plugins') if not os.path.exists(plugin_folder): os.mkdir(plugin_folder) cls.settings_file = os.path.join(folder, SETTINGS_FILE) cls.cache_file = os.path.join(folder, CACHE_FILE) cls.log = os.path.join(folder, LOG_FILE) cls.fifo = os.path.join(folder, FIFO) cls.config_folder = folder elif platform == "linux": folder = os.path.expanduser("~/.config/Rummage") if not os.path.exists(folder): os.mkdir(folder) plugin_folder = os.path.join(folder, 'plugins') if not os.path.exists(plugin_folder): os.mkdir(plugin_folder) cls.settings_file = os.path.join(folder, SETTINGS_FILE) cls.cache_file = os.path.join(folder, CACHE_FILE) cls.log = os.path.join(folder, LOG_FILE) cls.fifo = os.path.join(folder, FIFO) cls.config_folder = folder cls.settings_lock = FileLock(cls.settings_file + '.lock') cls.cache_lock = FileLock(cls.cache_file + '.lock') if not os.path.exists(cls.settings_file): cls.new_settings(cls.settings_file) if not os.path.exists(cls.cache_file): cls.new_cache(cls.cache_file) @classmethod def open_settings(cls): try: with cls.settings_lock.acquire(2): with codecs.open(cls.settings_file, "r", encoding="utf-8") as f: cls.settings = json.loads(f.read()) except Exception: errormsg(cls.ERR_LOAD_SETTINGS_FAILED) cls.update_settings() @classmethod def new_settings(cls, settings): default_settings = {'__format__': SETTINGS_FMT} try: with cls.settings_lock.acquire(2): with codecs.open(settings, "w", encoding="utf-8") as f: f.write(json.dumps(default_settings, sort_keys=True, indent=4, separators=(',', ': '))) except Exception: pass return default_settings @classmethod def update_settings(cls): updated = False settings_format = tuple([int(x) for x in cls.settings.get('__format__').split('.')]) if settings_format < (2, 1, 0): updated = True searches = cls.settings.get("saved_searches", {}) if isinstance(searches, list): searches = cls._update_search_object_to_unique(searches) if "regex_support" in cls.settings: del cls.settings["regex_support"] for k, v in searches.items(): new_search = { "name": v[0], "search": v[1], "replace": v[2], "flags": v[3], "is_regex": v[4], "is_function": v[5] } searches[k] = new_search cls.settings["saved_searches"] = searches backup_type = cls.settings.get('backup_type') cls.settings["backup_type"] = 0 if backup_type is None or backup_type is False else 1 if settings_format < (2, 2, 0): if 'editor' in cls.settings and isinstance(cls.settings['editor'], (list, tuple)): cls.settings['editor'] = "" for k, v in DEFAULT_SETTINGS.items(): if k not in cls.settings: updated = True cls.settings[k] = copy.deepcopy(v) elif k == 'encoding_options': for k1, v1 in v.items(): if k1 not in cls.settings[k]: updated = True cls.settings[k][k1] = copy.copy(v1) if settings_format < tuple([int(x) for x in SETTINGS_FMT.split('.')]): updated = True cls.settings["__format__"] = SETTINGS_FMT if updated: cls.save_settings() @classmethod def open_cache(cls): success = False try: with cls.cache_lock.acquire(2): with codecs.open(cls.cache_file, "r", encoding="utf-8") as f: cls.cache = json.loads(f.read()) success = True except Exception: errormsg(cls.ERR_LOAD_CACHE_FAILED) if success: cls.update_cache() @classmethod def new_cache(cls, cache): default_cache = {'__format__': CACHE_FMT} try: with cls.cache_lock.acquire(2): with codecs.open(cache, "w", encoding="utf-8") as f: f.write(json.dumps(default_cache, sort_keys=True, indent=4, separators=(',', ': '))) except Exception: pass return default_cache @classmethod def update_cache(cls): cache_format = cls.cache.get('__format__') if cache_format is None: cls.cache = cls.new_cache(cls.cache_file) if cache_format == '2.0.0': pass @classmethod def get_config_folder(cls): return cls.config_folder @classmethod def get_fifo(cls): return cls.fifo @classmethod def reload_settings(cls): if cls.changed(): debug("Reloading settings.") settings = None cache = None if cls.settings_file is not None: try: with cls.settings_lock.acquire(2): with codecs.open(cls.settings_file, "r", encoding="utf-8") as f: settings = json.loads(f.read()) with cls.cache_lock: with codecs.open(cls.cache_file, "r", encoding="utf-8") as f: cache = json.loads(f.read()) except Exception: pass if settings is not None: cls.settings = settings if cache is not None: cls.cache = cache @classmethod def get_editor(cls, filename=None, line=None, col=None): cls.reload_settings() editor = cls.settings.get("editor", "") if isinstance(editor, dict): editor = editor.get(util.platform(), "") if filename is None or line is None or col is None: return editor if isinstance(editor, (list, tuple)): return [ arg.replace( "{$file}", filename ).replace( "{$line}", str(line) ).replace( "{$col}", str(col) ) for arg in editor ] else: return editor.replace( "{$file}", filename.replace('"', '\\"') ).replace( "{$line}", str(line) ).replace( "{$col}", str(col) ).replace( "{$col0}", str(col - 1) ) @classmethod def set_editor(cls, editor): cls.reload_settings() cls._set_editor(editor) cls.save_settings() @classmethod def _set_editor(cls, editor): cls.settings["editor"] = editor @classmethod def get_single_instance(cls): cls.reload_settings() return cls.settings.get("single_instance", False) @classmethod def set_single_instance(cls, single): cls.reload_settings() cls._set_single_instance(single) cls.save_settings() @classmethod def _set_single_instance(cls, single): cls.settings["single_instance"] = single @classmethod
MIT License
scieloorg/scielo-manager
scielomanager/api/resources_v1.py
AheadPressReleaseResource.build_filters
python
def build_filters(self, filters=None): if filters is None: filters = {} orm_filters = super(AheadPressReleaseResource, self).build_filters(filters) if 'article_pid' in filters: preleases = AheadPressRelease.objects.filter( articles__article_pid=filters['article_pid']) orm_filters['pk__in'] = preleases elif 'journal_pid' in filters: preleases = AheadPressRelease.objects.by_journal_pid( filters['journal_pid']) orm_filters['pk__in'] = preleases return orm_filters
Custom filter that retrieves data by the article PID.
https://github.com/scieloorg/scielo-manager/blob/0945f377376de8ef0ada83c35b4e2312062cdf45/scielomanager/api/resources_v1.py#L444-L463
import logging from django.db.models import Q from django.contrib.auth.models import User from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned from tastypie.resources import ModelResource from tastypie import fields from tastypie.contrib.contenttypes.fields import GenericForeignKeyField from tastypie.authentication import ApiKeyAuthentication from tastypie.authorization import DjangoAuthorization from tastypie.authorization import Authorization from tastypie.exceptions import BadRequest from journalmanager.models import ( Journal, UseLicense, Sponsor, Collection, Issue, Section, RegularPressRelease, AheadPressRelease, PressReleaseTranslation, PressReleaseArticle, SubjectCategory, ) from scielomanager.utils import usercontext logger = logging.getLogger(__name__) def current_user_active_collection(): return usercontext.get_finder().get_current_user_active_collection() def current_user_collections(): return usercontext.get_finder().get_current_user_collections() class ApiKeyAuthMeta: authentication = ApiKeyAuthentication() authorization = DjangoAuthorization() class SectionResource(ModelResource): journal = fields.ForeignKey('api.resources_v1.JournalResource', 'journal') issues = fields.OneToManyField('api.resources_v1.IssueResource', 'issue_set') titles = fields.CharField(readonly=True) class Meta(ApiKeyAuthMeta): queryset = Section.objects.all() resource_name = 'sections' allowed_methods = ['get'] excludes = ['legacy_code'] filtering = { "journal": ('exact'), } def dehydrate_titles(self, bundle): return [(title.language.iso_code, title.title) for title in bundle.obj.titles.all()] class UseLicenseResource(ModelResource): class Meta(ApiKeyAuthMeta): queryset = UseLicense.objects.all() resource_name = 'uselicenses' allowed_methods = ['get', ] class IssueResource(ModelResource): journal = fields.ForeignKey('api.resources_v1.JournalResource', 'journal') sections = fields.ManyToManyField(SectionResource, 'section') thematic_titles = fields.CharField(readonly=True) is_press_release = fields.BooleanField(readonly=True) suppl_volume = fields.CharField(attribute='volume', readonly=True) suppl_number = fields.CharField(attribute='number', readonly=True) use_license = fields.ForeignKey(UseLicenseResource, 'use_license', full=True, null=True) class Meta(ApiKeyAuthMeta): queryset = Issue.objects.all() resource_name = 'issues' allowed_methods = ['get', ] filtering = { "journal": ('exact'), "is_marked_up": ('exact'), "volume": ('exact'), "number": ('exact'), "publication_year": ('exact'), "suppl_number": ('exact'), "suppl_volume": ('exact') } def build_filters(self, filters=None): if filters is None: filters = {} orm_filters = super(IssueResource, self).build_filters(filters) param_filters = {} if 'collection' in filters: param_filters['journal__collections__name_slug'] = filters['collection'] if 'eletronic_issn' in filters: param_filters['journal__eletronic_issn'] = filters['eletronic_issn'] if 'print_issn' in filters: param_filters['journal__print_issn'] = filters['print_issn'] if 'suppl_number' in filters: param_filters['type'] = 'supplement' param_filters['number'] = filters['suppl_number'] if 'suppl_volume' in filters: param_filters['type'] = 'supplement' param_filters['number'] = '' param_filters['volume'] = filters['suppl_volume'] issues = Issue.objects.filter(**param_filters) orm_filters['pk__in'] = issues return orm_filters def dehydrate_thematic_titles(self, bundle): return dict([title.language.iso_code, title.title] for title in bundle.obj.issuetitle_set.all()) def dehydrate_is_press_release(self, bundle): return False def dehydrate_suppl_volume(self, bundle): if bundle.obj.type == 'supplement': return bundle.obj.suppl_text if bundle.obj.volume else '' else: return '' def dehydrate_suppl_number(self, bundle): if bundle.obj.type == 'supplement': return bundle.obj.suppl_text if bundle.obj.number else '' else: return '' class CollectionResource(ModelResource): class Meta(ApiKeyAuthMeta): queryset = Collection.objects.all() resource_name = 'collections' allowed_methods = ['get', ] class SubjectCategoryResource(ModelResource): class Meta(ApiKeyAuthMeta): queryset = SubjectCategory.objects.all() resource_name = 'subjectcategory' allowed_methods = ['get', ] class SponsorResource(ModelResource): class Meta(ApiKeyAuthMeta): queryset = Sponsor.objects.all() resource_name = 'sponsors' allowed_methods = ['get', ] class UserResource(ModelResource): class Meta(ApiKeyAuthMeta): queryset = User.objects.all() resource_name = 'users' allowed_methods = ['get', ] excludes = [ 'email', 'password', 'is_active', 'is_staff', 'is_superuser', ] class JournalResource(ModelResource): missions = fields.CharField(readonly=True) other_titles = fields.CharField(readonly=True) creator = fields.ForeignKey(UserResource, 'creator') abstract_keyword_languages = fields.CharField(readonly=True) languages = fields.CharField(readonly=True) use_license = fields.ForeignKey(UseLicenseResource, 'use_license', full=True) sponsors = fields.ManyToManyField(SponsorResource, 'sponsor') collections = fields.ManyToManyField(CollectionResource, 'collections') issues = fields.OneToManyField(IssueResource, 'issue_set') sections = fields.OneToManyField(SectionResource, 'section_set') subject_categories = fields.ManyToManyField(SubjectCategoryResource, 'subject_categories', readonly=True) pub_status_history = fields.ListField(readonly=True) contact = fields.DictField(readonly=True) study_areas = fields.ListField(readonly=True) pub_status = fields.CharField(readonly=True) pub_status_reason = fields.CharField(readonly=True) national_code = fields.CharField(attribute='ccn_code', readonly=True) previous_title = fields.ForeignKey('self', 'previous_title', null=True) succeeding_title = fields.ForeignKey('self', 'succeeding_title', null=True) class Meta(ApiKeyAuthMeta): queryset = Journal.objects.all().filter() resource_name = 'journals' allowed_methods = ['get', ] filtering = { 'is_trashed': ('exact',), 'eletronic_issn': ('exact',), 'print_issn': ('exact',), } def build_filters(self, filters=None): if filters is None: filters = {} orm_filters = super(JournalResource, self).build_filters(filters) if 'collection' in filters: journals = Journal.objects.filter( collections__name_slug=filters['collection']) orm_filters['pk__in'] = journals if 'pubstatus' in filters: try: j = orm_filters['pk__in'] except KeyError: j = Journal.objects statuses = filters.getlist('pubstatus') journals = j.filter( membership__status__in=statuses) orm_filters['pk__in'] = journals return orm_filters def dehydrate_missions(self, bundle): return [(mission.language.iso_code, mission.description) for mission in bundle.obj.missions.all()] def dehydrate_other_titles(self, bundle): return [(title.category, title.title) for title in bundle.obj.other_titles.all()] def dehydrate_languages(self, bundle): return [language.iso_code for language in bundle.obj.languages.all()] def dehydrate_subject_categories(self, bundle): return [subject_category.term for subject_category in bundle.obj.subject_categories.all()] def dehydrate_pub_status_history(self, bundle): return [{'date': event.since, 'status': event.status} for event in bundle.obj.statuses.order_by('-since').all()] def dehydrate_study_areas(self, bundle): return [area.study_area for area in bundle.obj.study_areas.all()] def dehydrate_collections(self, bundle): try: return bundle.data['collections'][0] except IndexError: return '' def dehydrate_pub_status(self, bundle): try: col = bundle.obj.collections.get() except MultipleObjectsReturned: query_collection = bundle.request.GET.get('collection') if query_collection: col = bundle.obj.collections.get(name_slug=query_collection) else: raise BadRequest("missing collection param") return bundle.obj.membership_info(col, 'status') def dehydrate_pub_status_reason(self, bundle): try: col = bundle.obj.collections.get() except MultipleObjectsReturned: query_collection = bundle.request.GET.get('collection') if query_collection: col = bundle.obj.collections.get(name_slug=query_collection) else: raise BadRequest("missing collection param") return bundle.obj.membership_info(col, 'reason') def dehydrate(self, bundle): bundle.data.pop('ccn_code', False) return bundle class PressReleaseTranslationResource(ModelResource): language = fields.CharField(readonly=True) class Meta(ApiKeyAuthMeta): resource_name = 'prtranslations' queryset = PressReleaseTranslation.objects.all() allowed_methods = ['get', ] def dehydrate_language(self, bundle): return bundle.obj.language.iso_code class PressReleaseResource(ModelResource): issue_uri = fields.ForeignKey(IssueResource, 'issue') translations = fields.OneToManyField(PressReleaseTranslationResource, 'translations', full=True) articles = fields.CharField(readonly=True) issue_meta = fields.CharField(readonly=True) class Meta(ApiKeyAuthMeta): resource_name = 'pressreleases' queryset = RegularPressRelease.objects.all() allowed_methods = ['get', ] ordering = ['id'] def build_filters(self, filters=None): if filters is None: filters = {} orm_filters = super(PressReleaseResource, self).build_filters(filters) if 'article_pid' in filters: preleases = RegularPressRelease.objects.filter( articles__article_pid=filters['article_pid']) orm_filters['pk__in'] = preleases elif 'journal_pid' in filters: preleases = RegularPressRelease.objects.by_journal_pid( filters['journal_pid']) orm_filters['pk__in'] = preleases elif 'issue_pid' in filters: preleases = RegularPressRelease.objects.by_issue_pid( filters['issue_pid']) orm_filters['pk__in'] = preleases return orm_filters def dehydrate_articles(self, bundle): return [art.article_pid for art in bundle.obj.articles.all()] def dehydrate_issue_meta(self, bundle): issue = bundle.obj.issue meta_data = { 'scielo_pid': issue.scielo_pid, 'short_title': issue.journal.short_title, 'volume': issue.volume, 'number': issue.number, 'suppl_volume': issue.suppl_text if issue.type == 'supplement' and issue.volume else '', 'suppl_number': issue.suppl_text if issue.type == 'supplement' and issue.number else '', 'publication_start_month': issue.publication_start_month, 'publication_end_month': issue.publication_end_month, 'publication_city': issue.journal.publication_city, 'publication_year': issue.publication_year, } return meta_data class AheadPressReleaseResource(ModelResource): journal_uri = fields.ForeignKey(JournalResource, 'journal') translations = fields.OneToManyField(PressReleaseTranslationResource, 'translations', full=True) articles = fields.CharField(readonly=True) class Meta(ApiKeyAuthMeta): resource_name = 'apressreleases' queryset = AheadPressRelease.objects.all() allowed_methods = ['get', ] def dehydrate_articles(self, bundle): return [art.article_pid for art in bundle.obj.articles.all()]
BSD 2-Clause Simplified License
maybeshewill-cv/bisenetv2-tensorflow
data_provider/cityscapes/cityscapes_reader.py
_CitySpacesDataset._load_batch_images
python
def _load_batch_images(self, image_paths): src_images = [] label_images = [] for paths in image_paths: src_images.append(cv2.imread(paths[0], cv2.IMREAD_COLOR)) label_images.append(self._pil_imread(paths[1])) return src_images, label_images
:param image_paths: :return:
https://github.com/maybeshewill-cv/bisenetv2-tensorflow/blob/3f1e1bddcedb73a317cf8db1e734283ebbfe1440/data_provider/cityscapes/cityscapes_reader.py#L51-L64
import os.path as ops import cv2 import numpy as np import tensorflow as tf import tqdm from PIL import Image from local_utils.augment_utils.cityscapes import augmentation_utils as aug from local_utils.config_utils import parse_config_utils CFG = parse_config_utils.cityscapes_cfg_v2 class _CitySpacesDataset(object): def __init__(self, image_file_paths): self._image_file_paths = image_file_paths self._epoch_nums = CFG.TRAIN.EPOCH_NUMS self._batch_size = CFG.TRAIN.BATCH_SIZE self._batch_count = 0 self._sample_nums = len(image_file_paths) self._num_batchs = int(np.ceil(self._sample_nums / self._batch_size)) @staticmethod def _pil_imread(file_path): im = Image.open(file_path) return np.asarray(im)
MIT License
awslabs/aws-data-wrangler
awswrangler/emr.py
get_step_state
python
def get_step_state(cluster_id: str, step_id: str, boto3_session: Optional[boto3.Session] = None) -> str: client_emr: boto3.client = _utils.client(service_name="emr", session=boto3_session) response: Dict[str, Any] = client_emr.describe_step(ClusterId=cluster_id, StepId=step_id) _logger.debug("response: \n%s", pprint.pformat(response)) return cast(str, response["Step"]["Status"]["State"])
Get EMR step state. Possible states: 'PENDING', 'CANCEL_PENDING', 'RUNNING', 'COMPLETED', 'CANCELLED', 'FAILED', 'INTERRUPTED' Parameters ---------- cluster_id : str Cluster ID. step_id : str Step ID. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. Returns ------- str State. Examples -------- >>> import awswrangler as wr >>> state = wr.emr.get_step_state("cluster-id", "step-id")
https://github.com/awslabs/aws-data-wrangler/blob/f82b7e12d4126ec63f739f6f172139ed2e7d73ac/awswrangler/emr.py#L897-L926
import logging import pprint from typing import Any, Dict, List, Optional, Union, cast import boto3 from awswrangler import _utils, exceptions, sts _logger: logging.Logger = logging.getLogger(__name__) def _get_ecr_credentials_refresh_content(region: str) -> str: return f""" import subprocess from pyspark.sql import SparkSession spark = SparkSession.builder.appName("ECR Setup Job").getOrCreate() COMMANDS = [ "sudo -s eval $(aws ecr get-login --region {region} --no-include-email)", "sudo hdfs dfs -put -f /root/.docker/config.json /user/hadoop/" ] for command in COMMANDS: subprocess.run(command.split(" "), timeout=6.0, check=True) print("done!") """ def _get_default_logging_path( subnet_id: Optional[str] = None, account_id: Optional[str] = None, region: Optional[str] = None, boto3_session: Optional[boto3.Session] = None, ) -> str: if account_id is None: boto3_session = _utils.ensure_session(session=boto3_session) _account_id: str = sts.get_account_id(boto3_session=boto3_session) else: _account_id = account_id if (region is None) and (subnet_id is not None): _region: str = _utils.get_region_from_session(boto3_session=boto3_session) elif (region is None) and (subnet_id is None): raise exceptions.InvalidArgumentCombination("You must pass region or subnet_id or both.") else: _region = region return f"s3://aws-logs-{_account_id}-{_region}/elasticmapreduce/" def _build_cluster_args(**pars: Any) -> Dict[str, Any]: account_id: str = sts.get_account_id(boto3_session=pars["boto3_session"]) region: str = _utils.get_region_from_session(boto3_session=pars["boto3_session"]) if pars.get("logging_s3_path") is None: pars["logging_s3_path"] = _get_default_logging_path( subnet_id=None, account_id=account_id, region=region, boto3_session=pars["boto3_session"] ) spark_env: Optional[Dict[str, str]] = None yarn_env: Optional[Dict[str, str]] = None livy_env: Optional[Dict[str, str]] = None if pars["spark_pyarrow"] is True: if pars["spark_defaults"] is None: pars["spark_defaults"] = {"spark.sql.execution.arrow.enabled": "true"} else: pars["spark_defaults"]["spark.sql.execution.arrow.enabled"] = "true" spark_env = {"ARROW_PRE_0_15_IPC_FORMAT": "1"} yarn_env = {"ARROW_PRE_0_15_IPC_FORMAT": "1"} livy_env = {"ARROW_PRE_0_15_IPC_FORMAT": "1"} if pars["python3"] is True: if spark_env is None: spark_env = {"PYSPARK_PYTHON": "/usr/bin/python3"} else: spark_env["PYSPARK_PYTHON"] = "/usr/bin/python3" if pars["spark_jars_path"] is not None: paths: str = ",".join(pars["spark_jars_path"]) if pars["spark_defaults"] is None: pars["spark_defaults"] = {"spark.jars": paths} else: pars["spark_defaults"]["spark.jars"] = paths args: Dict[str, Any] = { "Name": pars["cluster_name"], "LogUri": pars["logging_s3_path"], "ReleaseLabel": pars["emr_release"], "VisibleToAllUsers": pars["visible_to_all_users"], "JobFlowRole": pars["emr_ec2_role"], "ServiceRole": pars["emr_role"], "Instances": { "KeepJobFlowAliveWhenNoSteps": pars["keep_cluster_alive_when_no_steps"], "TerminationProtected": pars["termination_protected"], "Ec2SubnetId": pars["subnet_id"], "InstanceFleets": [], }, "StepConcurrencyLevel": pars["step_concurrency_level"], } if pars["custom_ami_id"] is not None: args["CustomAmiId"] = pars["custom_ami_id"] if pars["key_pair_name"] is not None: args["Instances"]["Ec2KeyName"] = pars["key_pair_name"] if pars["security_group_master"] is not None: args["Instances"]["EmrManagedMasterSecurityGroup"] = pars["security_group_master"] if pars["security_groups_master_additional"] is not None: args["Instances"]["AdditionalMasterSecurityGroups"] = pars["security_groups_master_additional"] if pars["security_group_slave"] is not None: args["Instances"]["EmrManagedSlaveSecurityGroup"] = pars["security_group_slave"] if pars["security_groups_slave_additional"] is not None: args["Instances"]["AdditionalSlaveSecurityGroups"] = pars["security_groups_slave_additional"] if pars["security_group_service_access"] is not None: args["Instances"]["ServiceAccessSecurityGroup"] = pars["security_group_service_access"] args["Configurations"] = [ {"Classification": "spark-log4j", "Properties": {"log4j.rootCategory": f"{pars['spark_log_level']}, console"}} ] if pars["docker"] is True: if pars.get("extra_registries") is None: extra_registries: List[str] = [] else: extra_registries = pars["extra_registries"] registries: str = f"local,centos,{account_id}.dkr.ecr.{region}.amazonaws.com,{','.join(extra_registries)}" registries = registries[:-1] if registries.endswith(",") else registries args["Configurations"].append( { "Classification": "container-executor", "Properties": {}, "Configurations": [ { "Classification": "docker", "Properties": { "docker.privileged-containers.registries": registries, "docker.trusted.registries": registries, }, "Configurations": [], } ], } ) if spark_env is not None: args["Configurations"].append( { "Classification": "spark-env", "Properties": {}, "Configurations": [{"Classification": "export", "Properties": spark_env, "Configurations": []}], } ) if yarn_env is not None: args["Configurations"].append( { "Classification": "yarn-env", "Properties": {}, "Configurations": [{"Classification": "export", "Properties": yarn_env, "Configurations": []}], } ) if livy_env is not None: args["Configurations"].append( { "Classification": "livy-env", "Properties": {}, "Configurations": [{"Classification": "export", "Properties": livy_env, "Configurations": []}], } ) if pars["spark_glue_catalog"] is True: args["Configurations"].append( { "Classification": "spark-hive-site", "Properties": { "hive.metastore.client.factory.class": "com.amazonaws.glue.catalog.metastore.AWSGlueDataCatalogHiveClientFactory" }, "Configurations": [], } ) if pars["hive_glue_catalog"] is True: hive_conf: Dict[str, Any] = {"Classification": "hive-site", "Properties": {}, "Configurations": []} hive_conf["Properties"][ "hive.metastore.client.factory.class" ] = "com.amazonaws.glue.catalog.metastore.AWSGlueDataCatalogHiveClientFactory" args["Configurations"].append(hive_conf) if pars["presto_glue_catalog"] is True: args["Configurations"].append( { "Classification": "presto-connector-hive", "Properties": {"hive.metastore.glue.datacatalog.enabled": "true"}, "Configurations": [], } ) if pars["consistent_view"] is True: args["Configurations"].append( { "Classification": "emrfs-site", "Properties": { "fs.s3.consistent.retryPeriodSeconds": str(pars.get("consistent_view_retry_seconds", "10")), "fs.s3.consistent": "true", "fs.s3.consistent.retryCount": str(pars.get("consistent_view_retry_count", "5")), "fs.s3.consistent.metadata.tableName": pars.get("consistent_view_table_name", "EmrFSMetadata"), }, } ) if pars["maximize_resource_allocation"] is True: args["Configurations"].append({"Classification": "spark", "Properties": {"maximizeResourceAllocation": "true"}}) if pars["spark_defaults"] is not None: spark_defaults: Dict[str, Union[str, Dict[str, str]]] = { "Classification": "spark-defaults", "Properties": pars["spark_defaults"], } args["Configurations"].append(spark_defaults) if pars.get("custom_classifications") is not None: for c in pars["custom_classifications"]: args["Configurations"].append(c) if pars["applications"]: args["Applications"] = [{"Name": x} for x in pars["applications"]] if pars["bootstraps_paths"]: args["BootstrapActions"] = [{"Name": x, "ScriptBootstrapAction": {"Path": x}} for x in pars["bootstraps_paths"]] if (pars["debugging"] is True) or (pars["steps"] is not None): args["Steps"] = [] if pars["debugging"] is True: args["Steps"].append( { "Name": "Setup Hadoop Debugging", "ActionOnFailure": "TERMINATE_CLUSTER", "HadoopJarStep": {"Jar": "command-runner.jar", "Args": ["state-pusher-script"]}, } ) if pars["steps"] is not None: args["Steps"] += pars["steps"] timeout_action_master: str = ( "SWITCH_TO_ON_DEMAND" if pars["spot_timeout_to_on_demand_master"] else "TERMINATE_CLUSTER" ) fleet_master: Dict[str, Any] = { "Name": "MASTER", "InstanceFleetType": "MASTER", "TargetOnDemandCapacity": pars["instance_num_on_demand_master"], "TargetSpotCapacity": pars["instance_num_spot_master"], "InstanceTypeConfigs": [ { "InstanceType": pars["instance_type_master"], "WeightedCapacity": 1, "BidPriceAsPercentageOfOnDemandPrice": pars["spot_bid_percentage_of_on_demand_master"], "EbsConfiguration": { "EbsBlockDeviceConfigs": [ { "VolumeSpecification": {"SizeInGB": pars["instance_ebs_size_master"], "VolumeType": "gp2"}, "VolumesPerInstance": 1, } ], "EbsOptimized": True, }, } ], } if pars["instance_num_spot_master"] > 0: fleet_master["LaunchSpecifications"] = { "SpotSpecification": { "TimeoutDurationMinutes": pars["spot_provisioning_timeout_master"], "TimeoutAction": timeout_action_master, } } args["Instances"]["InstanceFleets"].append(fleet_master) if (pars["instance_num_spot_core"] > 0) or pars["instance_num_on_demand_core"] > 0: timeout_action_core = "SWITCH_TO_ON_DEMAND" if pars["spot_timeout_to_on_demand_core"] else "TERMINATE_CLUSTER" fleet_core: Dict[str, Any] = { "Name": "CORE", "InstanceFleetType": "CORE", "TargetOnDemandCapacity": pars["instance_num_on_demand_core"], "TargetSpotCapacity": pars["instance_num_spot_core"], "InstanceTypeConfigs": [ { "InstanceType": pars["instance_type_core"], "WeightedCapacity": 1, "BidPriceAsPercentageOfOnDemandPrice": pars["spot_bid_percentage_of_on_demand_core"], "EbsConfiguration": { "EbsBlockDeviceConfigs": [ { "VolumeSpecification": { "SizeInGB": pars["instance_ebs_size_core"], "VolumeType": "gp2", }, "VolumesPerInstance": 1, } ], "EbsOptimized": True, }, } ], } if pars["instance_num_spot_core"] > 0: fleet_core["LaunchSpecifications"] = { "SpotSpecification": { "TimeoutDurationMinutes": pars["spot_provisioning_timeout_core"], "TimeoutAction": timeout_action_core, } } args["Instances"]["InstanceFleets"].append(fleet_core) if (pars["instance_num_spot_task"] > 0) or pars["instance_num_on_demand_task"] > 0: timeout_action_task: str = ( "SWITCH_TO_ON_DEMAND" if pars["spot_timeout_to_on_demand_task"] else "TERMINATE_CLUSTER" ) fleet_task: Dict[str, Any] = { "Name": "TASK", "InstanceFleetType": "TASK", "TargetOnDemandCapacity": pars["instance_num_on_demand_task"], "TargetSpotCapacity": pars["instance_num_spot_task"], "InstanceTypeConfigs": [ { "InstanceType": pars["instance_type_task"], "WeightedCapacity": 1, "BidPriceAsPercentageOfOnDemandPrice": pars["spot_bid_percentage_of_on_demand_task"], "EbsConfiguration": { "EbsBlockDeviceConfigs": [ { "VolumeSpecification": { "SizeInGB": pars["instance_ebs_size_task"], "VolumeType": "gp2", }, "VolumesPerInstance": 1, } ], "EbsOptimized": True, }, } ], } if pars["instance_num_spot_task"] > 0: fleet_task["LaunchSpecifications"] = { "SpotSpecification": { "TimeoutDurationMinutes": pars["spot_provisioning_timeout_task"], "TimeoutAction": timeout_action_task, } } args["Instances"]["InstanceFleets"].append(fleet_task) if pars["tags"] is not None: args["Tags"] = [{"Key": k, "Value": v} for k, v in pars["tags"].items()] _logger.debug("args: \n%s", pprint.pformat(args)) return args def create_cluster( subnet_id: str, cluster_name: str = "my-emr-cluster", logging_s3_path: Optional[str] = None, emr_release: str = "emr-6.0.0", emr_ec2_role: str = "EMR_EC2_DefaultRole", emr_role: str = "EMR_DefaultRole", instance_type_master: str = "r5.xlarge", instance_type_core: str = "r5.xlarge", instance_type_task: str = "r5.xlarge", instance_ebs_size_master: int = 64, instance_ebs_size_core: int = 64, instance_ebs_size_task: int = 64, instance_num_on_demand_master: int = 1, instance_num_on_demand_core: int = 0, instance_num_on_demand_task: int = 0, instance_num_spot_master: int = 0, instance_num_spot_core: int = 0, instance_num_spot_task: int = 0, spot_bid_percentage_of_on_demand_master: int = 100, spot_bid_percentage_of_on_demand_core: int = 100, spot_bid_percentage_of_on_demand_task: int = 100, spot_provisioning_timeout_master: int = 5, spot_provisioning_timeout_core: int = 5, spot_provisioning_timeout_task: int = 5, spot_timeout_to_on_demand_master: bool = True, spot_timeout_to_on_demand_core: bool = True, spot_timeout_to_on_demand_task: bool = True, python3: bool = True, spark_glue_catalog: bool = True, hive_glue_catalog: bool = True, presto_glue_catalog: bool = True, consistent_view: bool = False, consistent_view_retry_seconds: int = 10, consistent_view_retry_count: int = 5, consistent_view_table_name: str = "EmrFSMetadata", bootstraps_paths: Optional[List[str]] = None, debugging: bool = True, applications: Optional[List[str]] = None, visible_to_all_users: bool = True, key_pair_name: Optional[str] = None, security_group_master: Optional[str] = None, security_groups_master_additional: Optional[List[str]] = None, security_group_slave: Optional[str] = None, security_groups_slave_additional: Optional[List[str]] = None, security_group_service_access: Optional[str] = None, docker: bool = False, extra_public_registries: Optional[List[str]] = None, spark_log_level: str = "WARN", spark_jars_path: Optional[List[str]] = None, spark_defaults: Optional[Dict[str, str]] = None, spark_pyarrow: bool = False, custom_classifications: Optional[List[Dict[str, Any]]] = None, maximize_resource_allocation: bool = False, steps: Optional[List[Dict[str, Any]]] = None, custom_ami_id: Optional[str] = None, step_concurrency_level: int = 1, keep_cluster_alive_when_no_steps: bool = True, termination_protected: bool = False, tags: Optional[Dict[str, str]] = None, boto3_session: Optional[boto3.Session] = None, ) -> str: applications = ["Spark"] if applications is None else applications boto3_session = _utils.ensure_session(session=boto3_session) args: Dict[str, Any] = _build_cluster_args(**locals()) client_emr: boto3.client = _utils.client(service_name="emr", session=boto3_session) response: Dict[str, Any] = client_emr.run_job_flow(**args) _logger.debug("response: \n%s", pprint.pformat(response)) return cast(str, response["JobFlowId"]) def get_cluster_state(cluster_id: str, boto3_session: Optional[boto3.Session] = None) -> str: client_emr: boto3.client = _utils.client(service_name="emr", session=boto3_session) response: Dict[str, Any] = client_emr.describe_cluster(ClusterId=cluster_id) _logger.debug("response: \n%s", pprint.pformat(response)) return cast(str, response["Cluster"]["Status"]["State"]) def terminate_cluster(cluster_id: str, boto3_session: Optional[boto3.Session] = None) -> None: client_emr: boto3.client = _utils.client(service_name="emr", session=boto3_session) response: Dict[str, Any] = client_emr.terminate_job_flows(JobFlowIds=[cluster_id]) _logger.debug("response: \n%s", pprint.pformat(response)) def submit_steps( cluster_id: str, steps: List[Dict[str, Any]], boto3_session: Optional[boto3.Session] = None ) -> List[str]: client_emr: boto3.client = _utils.client(service_name="emr", session=boto3_session) response: Dict[str, Any] = client_emr.add_job_flow_steps(JobFlowId=cluster_id, Steps=steps) _logger.debug("response: \n%s", pprint.pformat(response)) return cast(List[str], response["StepIds"]) def submit_step( cluster_id: str, command: str, name: str = "my-step", action_on_failure: str = "CONTINUE", script: bool = False, boto3_session: Optional[boto3.Session] = None, ) -> str: session: boto3.Session = _utils.ensure_session(session=boto3_session) step: Dict[str, Any] = build_step( name=name, command=command, action_on_failure=action_on_failure, script=script, boto3_session=session ) client_emr: boto3.client = _utils.client(service_name="emr", session=session) response: Dict[str, Any] = client_emr.add_job_flow_steps(JobFlowId=cluster_id, Steps=[step]) _logger.debug("response: \n%s", pprint.pformat(response)) return cast(str, response["StepIds"][0]) def build_step( command: str, name: str = "my-step", action_on_failure: str = "CONTINUE", script: bool = False, region: Optional[str] = None, boto3_session: Optional[boto3.Session] = None, ) -> Dict[str, Any]: jar: str = "command-runner.jar" if script is True: if region is not None: _region: str = region else: _region = _utils.get_region_from_session(boto3_session=boto3_session, default_region="us-east-1") jar = f"s3://{_region}.elasticmapreduce/libs/script-runner/script-runner.jar" step: Dict[str, Any] = { "Name": name, "ActionOnFailure": action_on_failure, "HadoopJarStep": {"Jar": jar, "Args": command.split(" ")}, } return step
Apache License 2.0
jefflester/minitrino
cli/minitrino/cmd/cmd_provision.py
cli
python
def cli(ctx, modules, no_rollback, docker_native): utils.check_daemon(ctx.docker_client) utils.check_lib(ctx) utils.check_starburst_ver(ctx) modules = append_running_modules(modules) check_compatibility(modules) check_enterprise(modules) if not modules: ctx.logger.log( f"No catalog or security options received. Provisioning " f"standalone Trino container..." ) else: for module in modules: if not ctx.modules.data.get(module, False): raise err.UserError( f"Invalid module: '{module}'. It was not found " f"in the Minitrino library at {ctx.minitrino_lib_dir}" ) try: cmd_chunk = chunk(modules) compose_env = ctx.env.get_section("MODULES") compose_env.update(ctx.env.get_section("EXTRA")) compose_cmd = build_command(docker_native, compose_env, cmd_chunk) ctx.cmd_executor.execute_commands(compose_cmd, environment=compose_env) initialize_containers() containers_to_restart = execute_bootstraps(modules) containers_to_restart = append_user_config(containers_to_restart) check_dup_configs() restart_containers(containers_to_restart) ctx.logger.log(f"Environment provisioning complete.") except Exception as e: rollback_provision(no_rollback) utils.handle_exception(e)
Provision command for Minitrino. If the resulting docker-compose command is unsuccessful, the function exits with a non-zero status code.
https://github.com/jefflester/minitrino/blob/09c1d1d9c6f0539ea53ca46ec2cefa5cc142c2ea/cli/minitrino/cmd/cmd_provision.py#L74-L120
import os import stat import hashlib import time import click import yaml from minitrino.cli import pass_environment from minitrino import utils from minitrino import errors as err from minitrino.settings import RESOURCE_LABEL from minitrino.settings import MODULE_ROOT from minitrino.settings import MODULE_CATALOG from minitrino.settings import MODULE_SECURITY from minitrino.settings import ETC_TRINO from minitrino.settings import TRINO_CONFIG from minitrino.settings import TRINO_JVM_CONFIG from docker.errors import NotFound @click.command( "provision", help=( """Provision an environment based on specified modules. All options are optional and can be left empty.""" ), ) @click.option( "-m", "--module", "modules", default=[], type=str, multiple=True, help=("""A specific module to provision."""), ) @click.option( "-n", "--no-rollback", is_flag=True, default=False, help=( """Do not rollback provisioned resources in the event of an error.""" ), ) @click.option( "-d", "--docker-native", default="", type=str, help=( """Appends native docker-compose commands to the generated docker-compose shell command. Run `docker-compose up --help` to see all available options. Example: minitrino provision --docker-native --build Example: minitrino provision --docker-native '--remove-orphans --force-recreate'""" ), ) @utils.exception_handler @pass_environment
Apache License 2.0
tensorflow/text
tensorflow_text/python/ops/tokenization.py
Tokenizer.tokenize
python
def tokenize(self, input): raise NotImplementedError("Abstract method")
Tokenizes the input tensor. Splits each string in the input tensor into a sequence of tokens. Tokens generally correspond to short substrings of the source string. Tokens can be encoded using either strings or integer ids. Example: >>> print(tf_text.WhitespaceTokenizer().tokenize("small medium large")) tf.Tensor([b'small' b'medium' b'large'], shape=(3,), dtype=string) Args: input: An N-dimensional UTF-8 string (or optionally integer) `Tensor` or `RaggedTensor`. Returns: An N+1-dimensional UTF-8 string or integer `Tensor` or `RaggedTensor`. For each string from the input tensor, the final, extra dimension contains the tokens that string was split into.
https://github.com/tensorflow/text/blob/56a7619f9709d1cd0e69686be990e4215b3e4993/tensorflow_text/python/ops/tokenization.py#L51-L72
from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc from tensorflow.python.module import module from tensorflow_text.python.ops.splitter import Splitter from tensorflow_text.python.ops.splitter import SplitterWithOffsets class Tokenizer(Splitter): @abc.abstractmethod
Apache License 2.0
fxihub/hummingbird
src/hummingbird.py
main
python
def main(): if(len(sys.argv) == 1): argparser.print_help() args = argparser.parse_args() level = logging.WARNING if args.verbose: level = logging.INFO if args.debug: level = logging.DEBUG logging.basicConfig(format='%(filename)s:%(lineno)d %(message)s', level=level) if args.port < PORT_RANGE[0] or args.port > PORT_RANGE[1]: print("The port must be from {0} to {1}".format(PORT_RANGE[0], PORT_RANGE[1])) exit(0) if(args.backend is not None): if (args.influxdb is not None): from ipc import influx influx.init(args.influxdb) from backend import Worker if(args.backend != True): worker = Worker(args.backend, args.port) else: worker = Worker(None, args.port) if not args.profile: worker.start() else: from pycallgraph import PyCallGraph from pycallgraph.output import GraphvizOutput import ipc.mpi import os graphviz = GraphvizOutput() graphviz.output_file = 'pycallgraph_%d.png' % (ipc.mpi.rank) with PyCallGraph(output=graphviz): worker.start() elif(args.interface is not False): import interface interface.start_interface(args.no_restore) elif(args.reload is not False): import os, signal with open('.pid', 'r') as file: pid = int(file.read()) os.kill(pid, signal.SIGUSR1)
The entry point of the program
https://github.com/fxihub/hummingbird/blob/0b1bdf5023b92090f31d9bc857e0854a805cf2cd/src/hummingbird.py#L18-L63
import sys import logging import socket import imp from utils.cmdline_args import argparser parse_cmdline_args = argparser.parse_args PORT_RANGE = (0, 65535)
BSD 2-Clause Simplified License
redhatqe/pylero
src/pylero/test_run.py
TestRun.get_custom_field
python
def get_custom_field(self, field_name): self._verify_obj() cf = self._custom_fields match = [x for x in cf if x.key == field_name] if match: return match[0].value else: return Custom(field_name, None)
gets custom field values. Args: field_name: name of the custom field Returns: value of the custom field. Note: Polarion WSDL currently does not publish the list of custom fields, so this function cannot do any verification if the field is valid.
https://github.com/redhatqe/pylero/blob/a6a1ebc69736f0546c534386d0415fa91f3198a1/src/pylero/test_run.py#L982-L1001
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import os import suds from pylero._compatible import basestring from pylero._compatible import classmethod from pylero._compatible import object from pylero._compatible import range from pylero.base_polarion import BasePolarion from pylero.base_polarion import tx_wrapper from pylero.build import Build from pylero.custom import ArrayOfCustom from pylero.custom import Custom from pylero.custom_field_type import CustomFieldType from pylero.document import Document from pylero.enum_custom_field_type import EnumCustomFieldType from pylero.enum_option_id import ArrayOfEnumOptionId from pylero.enum_option_id import EnumOptionId from pylero.exceptions import PyleroLibException from pylero.plan import Plan from pylero.project import Project from pylero.test_record import ArrayOfTestRecord from pylero.test_record import TestRecord from pylero.test_run_attachment import ArrayOfTestRunAttachment from pylero.test_run_attachment import TestRunAttachment from pylero.text import Text from pylero.user import User from pylero.work_item import _WorkItem from pylero.work_item import TestCase def generate_description(test_run, test_case, test_record): tr_html = "<b>Test Run:</b> <span id=\"link\" class= " "\"polarion-rte-link\" data-type=\"testRun\" " "data-item-id=\"{0}\" data-option-id=\"long\">" "</span><br/>".format(test_run.test_run_id) tc_html = "<b>Test Case:</b> <span id=\"link\" class=" "\"polarion-rte-link\" data-type=\"workItem\" " "data-item-id=\"{0}\" data-option-id=\"long\"></span>" "<br/>".format(test_case.work_item_id) table_cell_style = "style=\"text-align: left; padding: 10px; " "vertical-align: top; background-color: #ffffff;\"" table_row_style = "style=\"border-bottom: 1px solid #f0f0f0;\"" columns = ["", "#", "<span title=\"Step\">Step</span>", "<span title=\"Expected Result\">Expected Result</span>", "Actual Result"] test_step_results = {"passed": "<span title=\"Results met expected results" "\"><span style=\"white-space:nowrap;\"><im" "g src=\"/polarion/icons/default/enums/test" "run_status_passed.png\" style=\"vertical-a" "lign:text-bottom;border:0px;margin-right:2" "px;\" class=\"polarion-no-style-cleanup\"/" "></span></span>", "failed": "<span title=\"Results did not meet expecte" "d results\"><span style=\"white-space:nowr" "ap;\"><img src=\"/polarion/icons/default/e" "nums/testrun_status_failed.png\" style=\"v" "ertical-align:text-bottom;border:0px;margi" "n-right:2px;\" class=\"polarion-no-style-c" "leanup\"/></span></span>", "blocked": "<span title=\"Errors in the product preve" "nted test from being executed\"><span sty" "le=\"white-space:nowrap;\"><img src=\"/po" "larion/icons/default/enums/testrun_status" "_blocked.png\" style=\"vertical-align:tex" "t-bottom;border:0px;margin-right:2px;\" c" "lass=\"polarion-no-style-cleanup\"/>" "</span></span>"} table_header = "<table class=\"polarion-no-style-cleanup\" style=\"border" "-collapse: collapse;\"><tr style=\"text-align: left; " "white-space: nowrap; color: #757575; border-bottom: 1px " "solid #d2d7da; background-color: #ffffff;\">{0}</tr>" .format("".join(["<th {0}>{1}</th>".format(table_cell_style, column) for column in columns])) verdict = "</table><table style=\"margin-bottom: 15px; ;border-collapse: " "collapse; width:100%; ;margin-top: 13px;\" class=\"polarion-no" "-style-cleanup\"><tr><th style=\"width: 80%; text-align: left;" " background-color: #ffffff;\">Test Case Verdict:</th></tr><tr>" "<td style=\"vertical-align: top;\"><span style=\"font-weight: " "bold;\"><span style=\"color: #C30000;\"><span title=\"Results " "did not meet expected results\"><span style=\"white-space:" "nowrap;\"><img src=\"/polarion/icons/default/enums/testrun_" "status_failed.png\" style=\"vertical-align:text-bottom;border:" "0px;margin-right:2px;\" class=\"polarion-no-style-cleanup\"/>" "</span>Failed</span></span></span><span> {0}</span></td></tr>" "</table>" .format(test_record.comment) table_rows = "" for step in range(len(test_record.test_step_results)): table_rows += "<tr {0}>" "<td {1}>{2}</td>" "<td {1}>{3}</td>" "<td {1}>{4}</td>" "<td {1}>{5}</td>" "<td {1}>{6}</td>" "</tr>".format(table_row_style, table_cell_style, test_step_results. get(test_record.test_step_results[step] .result), step + 1, test_case.test_steps.steps[step].values[0] .content, test_case.test_steps.steps[step].values[1] .content, test_record.test_step_results[step] .comment) content = tr_html + tc_html + table_header + table_rows + verdict return content def create_incident_report(test_run, test_record, test_case): project_id = test_run.project_id status = 'open' project = Project(project_id) tconf = project.get_tests_configuration() defectWorkItemType = tconf.defect_work_item_type title = 'Failed: ' + test_case.title description = generate_description(test_run, test_case, test_record) kwarg_dict = {} for prop in tconf.fields_to_copy_from_test_case_to_defect.property: kwarg_dict[prop.value] = getattr(test_case, prop.key) for prop in tconf.fields_to_copy_from_test_run_to_linked_defect.property: kwarg_dict[prop.value] = getattr(test_run, prop.key) incident_report = _WorkItem.create(project_id, defectWorkItemType, title, description, status, **kwarg_dict) incident_report.add_linked_item(test_case.work_item_id, "triggered_by") return incident_report.work_item_id class TestRun(BasePolarion): _cls_suds_map = { "attachments": {"field_name": "attachments", "is_array": True, "cls": TestRunAttachment, "arr_cls": ArrayOfTestRunAttachment, "inner_field_name": "TestRunAttachment"}, "author": {"field_name": "authorURI", "cls": User, "named_arg": "uri", "sync_field": "uri"}, "created": "created", "document": {"field_name": "document", "cls": Document}, "finished_on": "finishedOn", "group_id": "groupId", "test_run_id": "id", "is_template": "isTemplate", "keep_in_history": "keepInHistory", "location": "location", "project_id": {"field_name": "projectURI", "cls": Project, "named_arg": "uri", "sync_field": "uri"}, "query": "query", "_records": {"field_name": "records", "is_array": True, "cls": TestRecord, "arr_cls": ArrayOfTestRecord, "inner_field_name": "TestRecord"}, "select_test_cases_by": {"field_name": "selectTestCasesBy", "cls": EnumOptionId, "enum_id": "testrun-selectTestCasesBy"}, "status": {"field_name": "status", "cls": EnumOptionId, "enum_id": "testing/testrun-status"}, "summary_defect": {"field_name": "summaryDefectURI", "cls": _WorkItem, "named_arg": "uri", "sync_field": "uri"}, "title": "title", "template": {"field_name": "templateURI", "named_arg": "uri", "sync_field": "uri"}, "type": {"field_name": "type", "cls": EnumOptionId, "enum_id": "testing/testrun-type"}, "updated": "updated", "_custom_fields": {"field_name": "customFields", "is_array": True, "cls": Custom, "arr_cls": ArrayOfCustom, "inner_field_name": "Custom"}, "uri": "_uri", "_unresolvable": "_unresolvable"} _id_field = "test_run_id" _obj_client = "test_management_client" _obj_struct = "tns3:TestRun" _custom_field_cache = {} @property def records(self): self._verify_obj() if "dynamic" not in self.select_test_cases_by: return self._records if "Doc" in self.select_test_cases_by: cases = self.document.get_work_items(None, True) elif "Query" in self.select_test_cases_by: cases = _WorkItem.query( self.query + " AND project.id:" + self.project_id) else: raise PyleroLibException("Only Test Runs based on Docs or" " Queries can be dynamic") executed_ids = [rec.test_case_id for rec in self._records] test_recs = self._records for case in cases: if case.work_item_id not in executed_ids and case.type != "heading": test_recs.append( TestRecord(self.project_id, case.work_item_id)) return test_recs @records.setter def records(self, val): self._records = val @classmethod @tx_wrapper def create(cls, project_id, test_run_id=None, template=None, title=None, **kwargs): if not template: raise PyleroLibException("Template is required") if title: uri = cls.session.test_management_client.service. createTestRunWithTitle( project_id, test_run_id or title, title, template) else: uri = cls.session.test_management_client.service.createTestRun( project_id, test_run_id, template) if uri: run = cls(uri=uri) run.verify_params(**kwargs) for field in kwargs: setattr(run, field, kwargs[field]) run.update() return run else: raise PyleroLibException("Test Run was not created") @classmethod @tx_wrapper def create_template(cls, project_id, template_id, parent_template_id="Empty", select_test_cases_by=None, query=None, doc_with_space=None, **kwargs): tr = cls.create(project_id, template_id, parent_template_id, **kwargs) tr.is_template = True if select_test_cases_by: tr.select_test_cases_by = select_test_cases_by elif doc_with_space: tr.select_test_cases_by = "dynamicLiveDoc" elif query: tr.select_test_cases_by = "dynamicQueryResult" if query: tr.query = query if doc_with_space: tr.document = Document(project_id, doc_with_space) tr.update() return TestRun(tr.test_run_id, project_id=project_id) @classmethod def search(cls, query, fields=["test_run_id"], sort="test_run_id", limit=-1, search_templates=False, project_id=None): project_id = project_id or cls.default_project query += " AND project.id:%s" % (project_id) TestRun(project_id=project_id) function_name = "search" p_sort = cls._cls_suds_map[sort] if not isinstance( cls._cls_suds_map[sort], dict) else cls._cls_suds_map[sort]["field_name"] parms = [query, p_sort] if search_templates: function_name += "TestRunTemplates" else: function_name += "TestRuns" p_fields = cls._convert_obj_fields_to_polarion(fields) if p_fields: function_name += "WithFieldsLimited" parms.append(p_fields) parms.append(limit) test_runs = [] results = getattr(cls.session.test_management_client.service, function_name)(*parms) for suds_obj in results: tr = TestRun(suds_object=suds_obj) test_runs.append(tr) return test_runs def __init__(self, test_run_id=None, suds_object=None, project_id=None, uri=None): self._add_custom_fields(project_id) super(self.__class__, self).__init__(test_run_id, suds_object) if test_run_id: if not project_id: raise PyleroLibException("When test_run_id is passed in, " "project_id is required") self._suds_object = self.session.test_management_client.service. getTestRunById(project_id, test_run_id) elif uri: self._suds_object = self.session.test_management_client.service. getTestRunByUri(uri) if test_run_id or uri: if getattr(self._suds_object, "_unresolvable", True): raise PyleroLibException( "The Test Run {0} was not found.".format(test_run_id)) def _fix_circular_refs(self): self._cls_suds_map["template"]["cls"] = self.__class__ def _custom_field_types(self, field_type): if field_type == "text": return Text field_type = field_type.split("[")[0] if field_type.startswith("@"): return [globals()[x] for x in globals() if x.lower() == field_type[1:].lower()][0] else: return field_type @classmethod def get_defined_custom_field_types(cls, project_id): if not cls._custom_field_cache[project_id]: cfts = cls.session.test_management_client.service. getDefinedTestRunCustomFieldTypes(project_id) else: cfts = cls._custom_field_cache[project_id] results = [CustomFieldType(suds_object=item) if isinstance(item, CustomFieldType()._suds_object.__class__) else EnumCustomFieldType(suds_object=item) for item in cfts] return results def _cache_custom_fields(self, project_id): self._custom_field_cache[project_id] = {} results = self.get_defined_custom_field_types(project_id) for result in results: f = getattr(result, "enum_id", None) key = result.cft_id f_type = None if f: f_type = self._custom_field_types(f) self._custom_field_cache[project_id][key] = {} self._custom_field_cache[project_id][key]["type"] = f_type self._custom_field_cache[project_id][key]["required"] = getattr( result, "required", False) self._custom_field_cache[project_id][key]["multi"] = getattr( result, "multi", False) def _add_custom_fields(self, project_id): self._changed_fields = {} self.session if not project_id: project_id = self.default_project if project_id not in self._custom_field_cache: self._cache_custom_fields(project_id) cache = self._custom_field_cache[project_id] self._required_fields = [] for field in cache: if cache[field]["required"]: self._required_fields.append(field) self._cls_suds_map[field] = {} self._cls_suds_map[field]["field_name"] = field self._cls_suds_map[field]["is_custom"] = True if cache[field]["type"] == Text: self._cls_suds_map[field]["cls"] = Text elif cache[field]["type"]: if cache[field]["multi"]: self._cls_suds_map[field]["cls"] = ArrayOfEnumOptionId self._cls_suds_map[field]["is_array"] = True else: self._cls_suds_map[field]["cls"] = EnumOptionId self._cls_suds_map[field]["enum_id"] = cache[field]["type"] if isinstance(cache[field]["type"], type) and "project_id" in cache[field]["type"].__init__.__code__.co_varnames[ :cache[field]["type"].__init__.__code__.co_argcount]: self._cls_suds_map[field]["additional_parms"] = {"project_id": project_id} def _get_index_of_test_record(self, test_case_id): index = -1 for test_record in self._records: if test_record.executed: index += 1 if test_case_id in test_record._suds_object.testCaseURI: return index raise PyleroLibException("The Test Case is either not part of " "this TestRun or has not been executed") def _status_change(self): check_tr = TestRun(uri=self.uri) results = [rec.result for rec in check_tr.records if rec.result] if not results: status = "notrun" check_tr.finished_on = None elif len(results) == len(check_tr.records): status = "finished" else: status = "inprogress" check_tr.finished_on = None if status != check_tr.status: check_tr.status = status check_tr.update() def _verify_record_count(self, record_index): self._verify_obj() if record_index > (len(self.records) - 1): raise PyleroLibException("There are only {0} test records". format(len(self.records))) def _verify_test_step_count(self, record_index, test_step_index): self._verify_record_count(record_index) if test_step_index > (len(self.records[record_index].test_step_results) - 1): raise PyleroLibException("There are only {0} test records". format(len(self.records))) def add_attachment_to_test_record(self, test_case_id, path, title): record_index = self._get_index_of_test_record(test_case_id) self._verify_record_count(record_index) data = self._get_file_data(path) filename = os.path.basename(path) self.session.test_management_client.service. addAttachmentToTestRecord(self.uri, record_index, filename, title, data) def add_attachment(self, path, title): self._verify_obj() data = self._get_file_data(path) filename = os.path.basename(path) self.session.test_management_client.service. addAttachmentToTestRun(self.uri, filename, title, data) def add_attachment_to_test_step(self, test_case_id, test_step_index, path, title): record_index = self._get_index_of_test_record(test_case_id) self._verify_test_step_count(record_index, test_step_index) data = self._get_file_data(path) filename = os.path.basename(path) self.session.test_management_client.service.addAttachmentToTestStep( self.uri, record_index, test_step_index, filename, title, data) def _check_test_record_exists(self, test_case_id): check_tr = TestRun.search( 'project.id:%s AND id:"%s" AND %s' % (self.project_id, self.test_run_id, test_case_id)) if len(check_tr) not in [0, 1]: raise PyleroLibException( "The search function did not work as expected. Please report.") elif len(check_tr) == 1: raise PyleroLibException( "This test case is already part of the test run") else: return None @tx_wrapper def add_test_record_by_fields(self, test_case_id, test_result, test_comment, executed_by, executed, duration, defect_work_item_id=None): self._verify_obj() self.check_valid_field_values(test_result, "result", {}) if not executed or not test_result: raise PyleroLibException( "executed and test_result require values") testrec = TestRecord(self.project_id, test_case_id) testrec.result = test_result testrec.comment = test_comment testrec.executed_by = executed_by testrec.executed = executed testrec.duration = duration if defect_work_item_id: testrec.defect_case_id = defect_work_item_id self.add_test_record_by_object(testrec) @tx_wrapper def add_test_record_by_object(self, test_record): self._verify_obj() test_case_id = test_record.test_case_id self._check_test_record_exists(test_case_id) if isinstance(test_record, TestRecord): suds_object = test_record._suds_object elif isinstance(test_record, TestRecord()._suds_object.__class__): suds_object = test_record if test_record.result == "failed" and not test_record.defect_case_id: test_record.defect_case_id = create_incident_report(self, test_record, TestCase(work_item_id=test_case_id)) self.session.test_management_client.service.addTestRecordToTestRun( self.uri, suds_object) self._status_change() def create_summary_defect(self, defect_template_id=None): self._verify_obj() defect_template_uri = None if defect_template_id: suds_defect_template = _WorkItem(work_item_id=defect_template_id, project_id=self.project_id) defect_template_uri = suds_defect_template._uri wi_uri = self.session.test_management_client.service. createSummaryDefect(self.uri, defect_template_uri) return _WorkItem(uri=wi_uri) def delete_attachment_from_test_record(self, test_case_id, filename): record_index = self._get_index_of_test_record(test_case_id) self._verify_record_count(record_index) self.session.test_management_client.service. deleteAttachmentFromTestRecord(self.uri, record_index, filename) def delete_attachment_from_test_step(self, test_case_id, test_step_index, filename): record_index = self._get_index_of_test_record(test_case_id) self._verify_test_step_count(record_index, test_step_index) self.session.test_management_client.service. deleteAttachmentFromTestStep(self.uri, record_index, test_step_index, filename) def delete_attachment(self, filename): self._verify_obj() self.session.test_management_client.service. deleteTestRunAttachment(self.uri, filename) def get_attachment(self, filename): self._verify_obj() suds_attach = self.session.test_management_client.service. getTestRunAttachment(self.uri, filename) return TestRunAttachment(suds_object=suds_attach) def get_attachments(self): self._verify_obj() lst_suds_attach = self.session.test_management_client.service. getTestRunAttachments(self.uri) lst_attach = [TestRunAttachment(suds_object=suds_attach) for suds_attach in lst_suds_attach] return lst_attach
MIT License
morgan-stanley/treadmill
lib/python/treadmill/tests/appcfg_test.py
AppCfgTest._write_app_yaml
python
def _write_app_yaml(event, manifest_str): fs.write_safe( event, lambda f: f.write(manifest_str), mode='w', )
Helper method to create app.yaml file in the event directory.
https://github.com/morgan-stanley/treadmill/blob/f18267c665baf6def4374d21170198f63ff1cde4/lib/python/treadmill/tests/appcfg_test.py#L29-L36
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import os import shutil import tempfile import unittest from treadmill import appcfg from treadmill import fs class AppCfgTest(unittest.TestCase): def setUp(self): self.root = tempfile.mkdtemp() def tearDown(self): if self.root and os.path.isdir(self.root): shutil.rmtree(self.root) @staticmethod
Apache License 2.0
olitheolix/aiokubernetes
aiokubernetes/models/v1beta2_replica_set.py
V1beta2ReplicaSet.metadata
python
def metadata(self): return self._metadata
Gets the metadata of this V1beta2ReplicaSet. # noqa: E501 If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata # noqa: E501 :return: The metadata of this V1beta2ReplicaSet. # noqa: E501 :rtype: V1ObjectMeta
https://github.com/olitheolix/aiokubernetes/blob/266718b210dff2a9b2212183261ea89adf89115e/aiokubernetes/models/v1beta2_replica_set.py#L120-L128
import pprint import re from aiokubernetes.models.v1_object_meta import V1ObjectMeta from aiokubernetes.models.v1beta2_replica_set_spec import V1beta2ReplicaSetSpec from aiokubernetes.models.v1beta2_replica_set_status import V1beta2ReplicaSetStatus class V1beta2ReplicaSet(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'api_version': 'str', 'kind': 'str', 'metadata': 'V1ObjectMeta', 'spec': 'V1beta2ReplicaSetSpec', 'status': 'V1beta2ReplicaSetStatus' } attribute_map = { 'api_version': 'apiVersion', 'kind': 'kind', 'metadata': 'metadata', 'spec': 'spec', 'status': 'status' } def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None): self._api_version = None self._kind = None self._metadata = None self._spec = None self._status = None self.discriminator = None if api_version is not None: self.api_version = api_version if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata if spec is not None: self.spec = spec if status is not None: self.status = status @property def api_version(self): return self._api_version @api_version.setter def api_version(self, api_version): self._api_version = api_version @property def kind(self): return self._kind @kind.setter def kind(self, kind): self._kind = kind @property
Apache License 2.0
jest-community/jest-pytest
src/__tests__/integration/home-assistant/homeassistant/components/sensor/tellduslive.py
TelldusLiveSensor._value_as_temperature
python
def _value_as_temperature(self): return round(float(self._value), 1)
Return the value as temperature.
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/homeassistant/components/sensor/tellduslive.py#L73-L75
import logging from homeassistant.components.tellduslive import TelldusLiveEntity from homeassistant.const import ( DEVICE_CLASS_HUMIDITY, DEVICE_CLASS_ILLUMINANCE, DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS) _LOGGER = logging.getLogger(__name__) SENSOR_TYPE_TEMPERATURE = 'temp' SENSOR_TYPE_HUMIDITY = 'humidity' SENSOR_TYPE_RAINRATE = 'rrate' SENSOR_TYPE_RAINTOTAL = 'rtot' SENSOR_TYPE_WINDDIRECTION = 'wdir' SENSOR_TYPE_WINDAVERAGE = 'wavg' SENSOR_TYPE_WINDGUST = 'wgust' SENSOR_TYPE_UV = 'uv' SENSOR_TYPE_WATT = 'watt' SENSOR_TYPE_LUMINANCE = 'lum' SENSOR_TYPE_DEW_POINT = 'dewp' SENSOR_TYPE_BAROMETRIC_PRESSURE = 'barpress' SENSOR_TYPES = { SENSOR_TYPE_TEMPERATURE: ['Temperature', TEMP_CELSIUS, None, DEVICE_CLASS_TEMPERATURE], SENSOR_TYPE_HUMIDITY: ['Humidity', '%', None, DEVICE_CLASS_HUMIDITY], SENSOR_TYPE_RAINRATE: ['Rain rate', 'mm/h', 'mdi:water', None], SENSOR_TYPE_RAINTOTAL: ['Rain total', 'mm', 'mdi:water', None], SENSOR_TYPE_WINDDIRECTION: ['Wind direction', '', '', None], SENSOR_TYPE_WINDAVERAGE: ['Wind average', 'm/s', '', None], SENSOR_TYPE_WINDGUST: ['Wind gust', 'm/s', '', None], SENSOR_TYPE_UV: ['UV', 'UV', '', None], SENSOR_TYPE_WATT: ['Power', 'W', '', None], SENSOR_TYPE_LUMINANCE: ['Luminance', 'lx', None, DEVICE_CLASS_ILLUMINANCE], SENSOR_TYPE_DEW_POINT: ['Dew Point', TEMP_CELSIUS, None, DEVICE_CLASS_TEMPERATURE], SENSOR_TYPE_BAROMETRIC_PRESSURE: ['Barometric Pressure', 'kPa', '', None], } def setup_platform(hass, config, add_devices, discovery_info=None): if discovery_info is None: return add_devices(TelldusLiveSensor(hass, sensor) for sensor in discovery_info) class TelldusLiveSensor(TelldusLiveEntity): @property def device_id(self): return self._id[0] @property def _type(self): return self._id[1] @property def _value(self): return self.device.value(*self._id[1:]) @property
MIT License
cisco-en-programmability/dnacentersdk
dnacentersdk/api/v2_1_2/tag.py
Tag.delete_tag
python
def delete_tag(self, id, headers=None, **request_parameters): check_type(headers, dict) check_type(id, basestring, may_be_none=False) if headers is not None: if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { 'id': id, } with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/tag/${id}') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.delete(endpoint_full_url, params=_params, headers=_headers) else: json_data = self._session.delete(endpoint_full_url, params=_params) return self._object_factory('bpm_429c28154bdaa13d_v2_1_2', json_data)
Deletes a tag specified by id. Args: id(basestring): Tag ID. headers(dict): Dictionary of HTTP Headers to send with the Request . **request_parameters: Additional request parameters (provides support for parameters that may be added in the future). Returns: MyDict: JSON response. Access the object's properties by using the dot notation or the bracket notation. Raises: TypeError: If the parameter types are incorrect. MalformedRequest: If the request body created is invalid. ApiError: If the DNA Center cloud returns an error.
https://github.com/cisco-en-programmability/dnacentersdk/blob/ef2adde6113e7a6acd28a287007eb470fa39d31f/dnacentersdk/api/v2_1_2/tag.py#L315-L368
from __future__ import ( absolute_import, division, print_function, unicode_literals, ) from builtins import * from past.builtins import basestring from ...restsession import RestSession from ...utils import ( check_type, dict_from_items_with_values, apply_path_params, dict_of_str, ) class Tag(object): def __init__(self, session, object_factory, request_validator): check_type(session, RestSession) super(Tag, self).__init__() self._session = session self._object_factory = object_factory self._request_validator = request_validator def add_members_to_the_tag(self, id, headers=None, payload=None, active_validation=True, **request_parameters): check_type(headers, dict) check_type(payload, dict) check_type(id, basestring, may_be_none=False) if headers is not None: if 'Content-Type' in headers: check_type(headers.get('Content-Type'), basestring, may_be_none=False) if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { 'id': id, } _payload = { } _payload.update(payload or {}) _payload = dict_from_items_with_values(_payload) if active_validation: self._request_validator('jsd_00a2fa6146089317_v2_1_2') .validate(_payload) with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/tag/${id}/member') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.post(endpoint_full_url, params=_params, json=_payload, headers=_headers) else: json_data = self._session.post(endpoint_full_url, params=_params, json=_payload) return self._object_factory('bpm_00a2fa6146089317_v2_1_2', json_data) def create_tag(self, description=None, dynamicRules=None, id=None, instanceTenantId=None, name=None, systemTag=None, headers=None, payload=None, active_validation=True, **request_parameters): check_type(headers, dict) check_type(payload, dict) if headers is not None: if 'Content-Type' in headers: check_type(headers.get('Content-Type'), basestring, may_be_none=False) if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { } _payload = { 'systemTag': systemTag, 'description': description, 'dynamicRules': dynamicRules, 'name': name, 'id': id, 'instanceTenantId': instanceTenantId, } _payload.update(payload or {}) _payload = dict_from_items_with_values(_payload) if active_validation: self._request_validator('jsd_1399891c42a8be64_v2_1_2') .validate(_payload) with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/tag') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.post(endpoint_full_url, params=_params, json=_payload, headers=_headers) else: json_data = self._session.post(endpoint_full_url, params=_params, json=_payload) return self._object_factory('bpm_1399891c42a8be64_v2_1_2', json_data) def get_tag_member_count(self, id, member_type, level='0', member_association_type=None, headers=None, **request_parameters): check_type(headers, dict) check_type(member_type, basestring, may_be_none=False) check_type(member_association_type, basestring) check_type(level, basestring) check_type(id, basestring, may_be_none=False) if headers is not None: if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { 'memberType': member_type, 'memberAssociationType': member_association_type, 'level': level, } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { 'id': id, } with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/tag/${id}/member/count') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.get(endpoint_full_url, params=_params, headers=_headers) else: json_data = self._session.get(endpoint_full_url, params=_params) return self._object_factory('bpm_2e9db85840fbb1cf_v2_1_2', json_data)
MIT License
axerunners/electrum-axe
contrib/sign-releases.py
SignApp.sign_file_name
python
def sign_file_name(self, name, detach=True): with open(name, 'rb') as fdrb: signed_data = self.gpg.sign_file(fdrb, keyid=self.keyid, passphrase=self.passphrase, detach=detach) with open('%s.asc' % name, 'wb') as fdw: fdw.write(signed_data.data)
Sign file with self.keyid, place signature in deteached .asc file
https://github.com/axerunners/electrum-axe/blob/7ef05088c0edaf0688fb167df353d6da619ebf2f/contrib/sign-releases.py#L450-L458
import os import os.path import re import sys import time import getpass import shutil import hashlib import tempfile import json import zipfile from subprocess import check_call, CalledProcessError from functools import cmp_to_key from time import localtime, strftime try: import click import certifi import gnupg import dateutil.parser import colorama from colorama import Fore, Style from github_release import (get_releases, gh_asset_download, gh_asset_upload, gh_asset_delete, gh_release_edit) from urllib3 import PoolManager except ImportError as e: print('Import error:', e) print('To run script install required packages with the next command:\n\n' 'pip install githubrelease python-gnupg pyOpenSSL cryptography idna' ' certifi python-dateutil click colorama requests LinkHeader') sys.exit(1) HTTP = PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where()) FNULL = open(os.devnull, 'w') HOME_DIR = os.path.expanduser('~') CONFIG_NAME = '.sign-releases' SEARCH_COUNT = 1 SHA_FNAME = 'SHA256SUMS.txt' PPA_SERIES = { 'xenial': '16.04.1', 'bionic': '18.04.1', 'eoan': '19.10.1', 'focal': '20.04.1', } PEP440_PUBVER_PATTERN = re.compile('^((\d+)!)?' '((\d+)(\.\d+)*)' '([a-zA-Z]+\d+)?' '((\.[a-zA-Z]+\d+)*)$') REL_NOTES_PATTERN = re.compile('^#.+?(^[^#].+?)^#.+?', re.M | re.S) SDIST_NAME_PATTERN = re.compile('^Axe-Electrum-(.*).tar.gz$') SDIST_DIR_TEMPLATE = 'Axe-Electrum-{version}' PPA_SOURCE_NAME = 'electrum-axe' PPA_ORIG_NAME_TEMPLATE = '%s_{version}.orig.tar.gz' % PPA_SOURCE_NAME CHANGELOG_TEMPLATE = """%s ({ppa_version}) {series}; urgency=medium {changes} -- {uid} {time}""" % PPA_SOURCE_NAME PPA_FILES_TEMPLATE = '%s_{0}{1}' % PPA_SOURCE_NAME LP_API_URL='https://api.launchpad.net/1.0' LP_SERIES_TEMPLATE = '%s/ubuntu/{0}' % LP_API_URL LP_ARCHIVES_TEMPLATE = '%s/~{user}/+archive/ubuntu/{ppa}' % LP_API_URL JKS_KEYSTORE = os.path.join(HOME_DIR, '.jks/keystore') JKS_ALIAS = 'axerunners.com' JKS_STOREPASS = 'JKS_STOREPASS' JKS_KEYPASS = 'JKS_KEYPASS' KEYTOOL_ARGS = ['keytool', '-list', '-storepass:env', JKS_STOREPASS] JARSIGNER_ARGS = [ 'jarsigner', '-verbose', '-tsa', 'http://sha256timestamp.ws.symantec.com/sha256/timestamp', '-sigalg', 'SHA1withRSA', '-digestalg', 'SHA1', '-sigfile', 'axe-electrum', '-storepass:env', JKS_STOREPASS, '-keypass:env', JKS_KEYPASS, ] UNSIGNED_APK_PATTERN = re.compile('^Electrum_AXE(_Testnet)?-(.*)-release-unsigned.apk$') SIGNED_APK_TEMPLATE = 'Axe-Electrum{testnet}-{version}-release.apk' os.environ['QUILT_PATCHES'] = 'debian/patches' def pep440_to_deb(version): ver_match = PEP440_PUBVER_PATTERN.match(version) if not ver_match: raise Exception('Version "%s" does not comply with PEP 440' % version) g = ver_match.group deb_ver = '' deb_ver += ('%s:' % g(2)) if g(1) else '' deb_ver += g(3) deb_ver += ('~%s' % g(6)) if g(6) else '' deb_ver += ('%s' % g(7)) if g(7) else '' return deb_ver def compare_published_times(a, b): a = a['published_at'] b = b['published_at'] if not a and not b: return 0 elif not a: return -1 elif not b: return 1 a = dateutil.parser.parse(a) b = dateutil.parser.parse(b) if a > b: return -1 elif b > a: return 1 else: return 0 def sha256_checksum(filename, block_size=65536): sha256 = hashlib.sha256() with open(filename, 'rb') as f: for block in iter(lambda: f.read(block_size), b''): sha256.update(block) return sha256.hexdigest() def read_config(): config_path = os.path.join(HOME_DIR, CONFIG_NAME) if not os.path.isfile(config_path): return {} try: with open(config_path, 'r') as f: data = f.read() return json.loads(data) except Exception as e: print('Error: Cannot read config file:', e) return {} def get_next_ppa_num(ppa, source_package_name, ppa_upstr_version, series_name): user, ppa_name = ppa.split('/') archives_url = LP_ARCHIVES_TEMPLATE.format(user=user, ppa=ppa_name) series_url = LP_SERIES_TEMPLATE.format(series_name) query = { 'ws.op': 'getPublishedSources', 'distro_series': series_url, 'order_by_date': 'true', 'source_name': source_package_name, } resp = HTTP.request('GET', archives_url, fields=query) if resp.status != 200: raise Exception('Launchpad API error %s %s', (resp.status, resp.reason)) data = json.loads(resp.data.decode('utf-8')) entries = data['entries'] if len(entries) == 0: return 1 for e in entries: ppa_version = e['source_package_version'] version_match = re.match('%s-0ppa(\d+)~ubuntu' % ppa_upstr_version, ppa_version) if version_match: return int(version_match.group(1)) + 1 return 1 class ChdirTemporaryDirectory(object): def __enter__(self): self.prev_wd = os.getcwd() self.name = tempfile.mkdtemp() os.chdir(self.name) return self.name def __exit__(self, exc_type, exc_value, traceback): os.chdir(self.prev_wd) shutil.rmtree(self.name) class SignApp(object): def __init__(self, **kwargs): ask_passphrase = kwargs.pop('ask_passphrase', None) self.sign_drafts = kwargs.pop('sign_drafts', False) self.force = kwargs.pop('force', False) self.tag_name = kwargs.pop('tag_name', None) self.repo = kwargs.pop('repo', None) self.ppa = kwargs.pop('ppa', None) self.ppa_upstream_suffix = kwargs.pop('ppa_upstream_suffix', None) self.token = kwargs.pop('token', None) self.keyid = kwargs.pop('keyid', None) self.count = kwargs.pop('count', None) self.dry_run = kwargs.pop('dry_run', False) self.no_ppa = kwargs.pop('no_ppa', False) self.verbose = kwargs.pop('verbose', False) self.jks_keystore = kwargs.pop('jks_keystore', False) self.jks_alias = kwargs.pop('jks_alias', False) self.zipalign_path = kwargs.pop('zipalign_path', False) self.config = {} config_data = read_config() default_repo = config_data.get('default_repo', None) if default_repo: if not self.repo: self.repo = default_repo for config in config_data.get('repos', []): config_repo = config.get('repo', None) if config_repo and config_repo == self.repo: self.config = config break else: self.config = config_data if self.config: self.repo = self.repo or self.config.get('repo', None) self.ppa = self.ppa or self.config.get('ppa', None) self.token = self.token or self.config.get('token', None) self.keyid = self.keyid or self.config.get('keyid', None) self.count = self.count or self.config.get('count', None) or SEARCH_COUNT self.sign_drafts = self.sign_drafts or self.config.get('sign_drafts', False) self.no_ppa = self.no_ppa or self.config.get('no_ppa', False) self.verbose = self.verbose or self.config.get('verbose', None) self.jks_keystore = self.jks_keystore or self.config.get('jks_keystore', JKS_KEYSTORE) self.jks_alias = self.jks_alias or self.config.get('jks_alias', JKS_ALIAS) self.zipalign_path = self.zipalign_path or self.config.get('zipalign_path', None) if not self.repo: print('no repo found, exit') sys.exit(1) if self.token: os.environ['GITHUB_TOKEN'] = self.token if not os.environ.get('GITHUB_TOKEN', None): print('GITHUB_TOKEN environment var not set, exit') sys.exit(1) if self.keyid: self.keyid = self.keyid.split('/')[-1] self.passphrase = None self.gpg = gnupg.GPG() if not self.keyid: print('no keyid set, exit') sys.exit(1) keylist = self.gpg.list_keys(True, keys=[self.keyid]) if not keylist: print('no key with keyid %s found, exit' % self.keyid) sys.exit(1) self.uid = ', '.join(keylist[0].get('uids', ['No uid found'])) if ask_passphrase: while not self.passphrase: self.read_passphrase() elif not self.check_key(): while not self.passphrase: self.read_passphrase() if self.zipalign_path: try: check_call(self.zipalign_path, stderr=FNULL) except CalledProcessError: pass self.read_jks_storepass() self.read_jks_keypass() def read_jks_storepass(self): while not JKS_STOREPASS in os.environ: storepass = getpass.getpass('%sInput %s keystore password:%s ' % (Fore.GREEN, self.jks_keystore, Style.RESET_ALL)) os.environ[JKS_STOREPASS] = storepass try: check_call(KEYTOOL_ARGS + ['-keystore', self.jks_keystore], stdout=FNULL, stderr=FNULL) except CalledProcessError: print('%sWrong keystore password%s' % (Fore.RED, Style.RESET_ALL)) del os.environ[JKS_STOREPASS] def read_jks_keypass(self): while not JKS_KEYPASS in os.environ: keypass = getpass.getpass('%sInput alias password for <%s> ' '[Enter if same as for keystore]:%s ' % (Fore.YELLOW, self.jks_alias, Style.RESET_ALL)) if not keypass: os.environ[JKS_KEYPASS] = os.environ[JKS_STOREPASS] else: os.environ[JKS_KEYPASS] = keypass with ChdirTemporaryDirectory() as tmpdir: test_file = 'testfile.txt' test_zipfile = 'testzip.zip' with open(test_file, 'w') as fdw: fdw.write('testcontent') test_zf = zipfile.ZipFile(test_zipfile, mode='w') test_zf.write(test_file) test_zf.close() sign_args = ['-keystore', self.jks_keystore, test_zipfile, self.jks_alias] try: check_call(JARSIGNER_ARGS + sign_args, stdout=FNULL) except CalledProcessError: print('%sWrong key alias password%s' % (Fore.RED, Style.RESET_ALL)) del os.environ[JKS_KEYPASS] def read_passphrase(self): passphrase = getpass.getpass('%sInput passphrase for Key: %s %s:%s ' % (Fore.GREEN, self.keyid, self.uid, Style.RESET_ALL)) if self.check_key(passphrase): self.passphrase = passphrase def check_key(self, passphrase=None): signed_data = self.gpg.sign('test message to check passphrase', keyid=self.keyid, passphrase=passphrase) if signed_data.data and self.gpg.verify(signed_data.data).valid: return True print('%sWrong passphrase!%s' % (Fore.RED, Style.RESET_ALL)) return False
MIT License