repository_name
stringlengths 7
107
| function_path
stringlengths 4
190
| function_identifier
stringlengths 1
236
| language
stringclasses 1
value | function
stringlengths 9
647k
| docstring
stringlengths 5
488k
| function_url
stringlengths 71
285
| context
stringlengths 0
2.51M
| license
stringclasses 5
values |
---|---|---|---|---|---|---|---|---|
shivamsarodia/shivyc
|
shivyc/asm_gen.py
|
ASMGen._get_global_spotmap
|
python
|
def _get_global_spotmap(self):
global_spotmap = {}
EXTERNAL = self.symbol_table.EXTERNAL
DEFINED = self.symbol_table.DEFINED
num = 0
for value in (set(self.il_code.literals.keys()) |
set(self.il_code.string_literals.keys()) |
set(self.symbol_table.storage.keys())):
num += 1
spot = self._get_nondynamic_spot(value, num)
if spot: global_spotmap[value] = spot
externs = self.symbol_table.linkages[EXTERNAL].values()
for v in externs:
if self.symbol_table.def_state.get(v) == DEFINED:
self.asm_code.add_global(self.symbol_table.names[v])
return global_spotmap
|
Generate global spotmap and add global values to ASM.
This function generates a spotmap for variables which are not
specific to a single function. This includes literals and variables
with static storage duration.
|
https://github.com/shivamsarodia/shivyc/blob/e7d72eff237e1ef49ec70333497348baf86be425/shivyc/asm_gen.py#L412-L438
|
import itertools
import shivyc.asm_cmds as asm_cmds
import shivyc.spots as spots
from shivyc.spots import Spot, RegSpot, MemSpot, LiteralSpot
class ASMCode:
def __init__(self):
self.lines = []
self.comm = []
self.globals = []
self.data = []
self.string_literals = []
def add(self, cmd):
self.lines.append(cmd)
label_num = 0
@staticmethod
def get_label():
ASMCode.label_num += 1
return f"__shivyc_label{ASMCode.label_num}"
def add_global(self, name):
self.globals.append(f"\t.global {name}")
def add_data(self, name, size, init):
self.data.append(f"{name}:")
size_strs = {1: "byte",
2: "word",
4: "int",
8: "quad"}
if init:
self.data.append(f"\t.{size_strs[size]} {init}")
else:
self.data.append(f"\t.zero {size}")
def add_comm(self, name, size, local):
if local:
self.comm.append(f"\t.local {name}")
self.comm.append(f"\t.comm {name} {size}")
def add_string_literal(self, name, chars):
self.string_literals.append(f"{name}:")
data = ",".join(str(char) for char in chars)
self.string_literals.append(f"\t.byte {data}")
def full_code(self):
header = ["\t.intel_syntax noprefix"]
header += self.comm
if self.string_literals or self.data:
header += ["\t.section .data"]
header += self.data
header += self.string_literals
header += [""]
header += ["\t.section .text"] + self.globals
header += [str(line) for line in self.lines]
return "\n".join(header + ["\t.att_syntax noprefix", ""])
class NodeGraph:
def __init__(self, nodes=None):
self._real_nodes = nodes or []
self._all_nodes = self._real_nodes[:]
self._conf = {n: [] for n in self._all_nodes}
self._pref = {n: [] for n in self._all_nodes}
def is_node(self, n):
return n in self._conf and n in self._pref
def add_dummy_node(self, v):
self._all_nodes.append(v)
self._conf[v] = []
self._pref[v] = []
for n in self._all_nodes:
if n not in self._real_nodes and n != v:
self.add_conflict(n, v)
def add_conflict(self, n1, n2):
if n2 not in self._conf[n1]:
self._conf[n1].append(n2)
if n1 not in self._conf[n2]:
self._conf[n2].append(n1)
def add_pref(self, n1, n2):
if n2 not in self._pref[n1]:
self._pref[n1].append(n2)
if n1 not in self._pref[n2]:
self._pref[n2].append(n1)
def pop(self, n):
del self._conf[n]
del self._pref[n]
if n in self._real_nodes:
self._real_nodes.remove(n)
self._all_nodes.remove(n)
for v in self._conf:
if n in self._conf[v]:
self._conf[v].remove(n)
for v in self._pref:
if n in self._pref[v]:
self._pref[v].remove(n)
return n
def merge(self, n1, n2):
total_conf = self._conf[n1][:]
for c in self._conf[n2]:
if c not in total_conf:
total_conf.append(c)
self._conf[n1] = total_conf
for c in self._conf[n1]:
if n2 in self._conf[c]:
self._conf[c].remove(n2)
if n1 not in self._conf[c]:
self._conf[c].append(n1)
total_pref = self._pref[n1][:]
for p in self._pref[n2]:
if p not in total_pref:
total_pref.append(p)
if n1 in total_pref: total_pref.remove(n1)
if n2 in total_pref: total_pref.remove(n2)
self._pref[n1] = total_pref
for c in self._pref[n1]:
if n2 in self._pref[c]:
self._pref[c].remove(n2)
if n1 not in self._pref[c]:
self._pref[c].append(n1)
del self._conf[n2]
del self._pref[n2]
self._real_nodes.remove(n2)
self._all_nodes.remove(n2)
def remove_pref(self, n1, n2):
self._pref[n1].remove(n2)
self._pref[n2].remove(n1)
def prefs(self, n):
return self._pref[n]
def confs(self, n):
return self._conf[n]
def nodes(self):
return self._real_nodes
def all_nodes(self):
return self._all_nodes
def copy(self):
g = NodeGraph()
g._real_nodes = self._real_nodes[:]
g._all_nodes = self._all_nodes[:]
for n in self._all_nodes:
g._conf[n] = self._conf[n][:]
g._pref[n] = self._pref[n][:]
return g
def __str__(self):
return ("Conf\n" +
"\n".join(str((v, self._conf[v])) for v in self._all_nodes)
+ "\nPref\n" +
"\n".join(str((v, self._pref[v])) for v in self._all_nodes))
class ASMGen:
alloc_registers = spots.registers
all_registers = alloc_registers
def __init__(self, il_code, symbol_table, asm_code, arguments):
self.il_code = il_code
self.symbol_table = symbol_table
self.asm_code = asm_code
self.arguments = arguments
self.offset = 0
def make_asm(self):
global_spotmap = self._get_global_spotmap()
for func in self.il_code.commands:
self.asm_code.add(asm_cmds.Label(func))
self._make_asm(self.il_code.commands[func], global_spotmap)
def _make_asm(self, commands, global_spotmap):
free_values = self._get_free_values(commands, global_spotmap)
move_to_mem = []
for command in commands:
refs = command.references().values()
for line in refs:
for v in line:
if v not in refs:
move_to_mem.append(v)
for v in free_values:
if v.ctype.size not in {1, 2, 4, 8}:
move_to_mem.append(v)
for v in move_to_mem:
if v in free_values:
self.offset += v.ctype.size
global_spotmap[v] = MemSpot(spots.RBP, -self.offset)
free_values.remove(v)
live_vars = self._get_live_vars(commands, free_values)
g_bak = self._generate_graph(commands, free_values, live_vars)
spilled_nodes = []
while True:
g = g_bak.copy()
for n in spilled_nodes:
g.pop(n)
removed_nodes = []
merged_nodes = {}
while True:
while True:
simplified = self._simplify_all(removed_nodes, g)
merged = self._coalesce_all(merged_nodes, g)
if not simplified and not merged: break
if not self._freeze(g):
break
if not g.nodes():
break
else:
n = max(g.nodes(), key=lambda n: len(g.confs(n)))
spilled_nodes.append(n)
while g.all_nodes():
removed_nodes.append(g.pop(g.all_nodes()[0]))
spotmap = self._generate_spotmap(removed_nodes, merged_nodes, g_bak)
for v in spilled_nodes:
self.offset += v.ctype.size
spotmap[v] = MemSpot(spots.RBP, -self.offset)
for v in global_spotmap:
spotmap[v] = global_spotmap[v]
if self.arguments.show_reg_alloc_perf:
total_prefs = 0
matched_prefs = 0
for n1, n2 in itertools.combinations(g_bak.all_nodes(), 2):
if n2 in g_bak.prefs(n1):
total_prefs += 1
if spotmap[n1] == spotmap[n2]:
matched_prefs += 1
print("total prefs", total_prefs)
print("matched prefs", matched_prefs)
print("total ILValues", len(g_bak.nodes()))
print("register ILValues", len(g_bak.nodes()) - len(spilled_nodes))
self._generate_asm(commands, live_vars, spotmap)
|
MIT License
|
reconnaissanceblindchess/reconchess
|
reconchess/history.py
|
Turn.next
|
python
|
def next(self):
return Turn(not self.color, self.turn_number + (0 if self.color == chess.WHITE else 1))
|
:return: The :class:`Turn` that happens immediately after this, which is the other player's next turn.
|
https://github.com/reconnaissanceblindchess/reconchess/blob/ad5666004a564a1a8ce79538df64337fc190bd99/reconchess/history.py#L22-L26
|
import chess
from .types import *
from typing import Callable, TypeVar, Iterable, Mapping
import json
import math
from .utilities import ChessJSONEncoder, ChessJSONDecoder
T = TypeVar('T')
class Turn(object):
def __init__(self, color: Color, turn_number: int):
self.color = color
self.turn_number = turn_number
@property
|
BSD 3-Clause New or Revised License
|
hyperledger/sawtooth-core
|
validator/sawtooth_validator/journal/block_store.py
|
BlockStore.get_batch_count
|
python
|
def get_batch_count(self):
return self._get_count('commit_store_get_batch_count')
|
Returns the count of batches in the block store.
Returns:
Integer: The count of batches
|
https://github.com/hyperledger/sawtooth-core/blob/2ab9b8fb8383887dc33c67a72e194c30f7b3f7dc/validator/sawtooth_validator/journal/block_store.py#L419-L425
|
import ctypes
from enum import IntEnum
from sawtooth_validator.journal.block_wrapper import BlockWrapper
from sawtooth_validator.protobuf.block_pb2 import Block
from sawtooth_validator.protobuf.batch_pb2 import Batch
from sawtooth_validator.protobuf.transaction_pb2 import Transaction
from sawtooth_validator.state.merkle import INIT_ROOT_KEY
from sawtooth_validator import ffi
class ErrorCode(IntEnum):
Success = ffi.CommonErrorCode.Success
NullPointerProvided = ffi.CommonErrorCode.NullPointerProvided
InvalidArgument = 2
DatabaseError = 0x10
NotFound = 0x11
StopIteration = 0x20
def _check_error(return_code):
if return_code == ErrorCode.Success:
return
if return_code == ErrorCode.NullPointerProvided:
raise TypeError("Provided null pointer(s)")
if return_code == ErrorCode.InvalidArgument:
raise TypeError("An invalid argument was provided")
if return_code == ErrorCode.DatabaseError:
raise RuntimeError("A database error occurred")
if return_code == ErrorCode.NotFound:
raise ValueError("Unable to find requested item")
if return_code == ErrorCode.StopIteration:
raise StopIteration()
raise RuntimeError("An unknown error occurred: {}".format(return_code))
def _libexec(name, *args):
_check_error(ffi.LIBRARY.call(name, *args))
def _pylibexec(name, *args):
_check_error(ffi.PY_LIBRARY.call(name, *args))
class _PutEntry(ctypes.Structure):
_fields_ = [('block_bytes', ctypes.c_char_p),
('block_bytes_len', ctypes.c_size_t)]
@staticmethod
def new(block_bytes):
return _PutEntry(
block_bytes,
len(block_bytes)
)
class BlockStore(ffi.OwnedPointer):
def __init__(self, database):
super().__init__('commit_store_drop')
_libexec(
'commit_store_new',
database.pointer,
ctypes.byref(self.pointer))
def _get_data_by_num(self, object_id, ffi_fn_name):
(vec_ptr, vec_len, vec_cap) = ffi.prepare_vec_result()
_pylibexec(ffi_fn_name,
self.pointer,
ctypes.c_ulonglong(object_id),
ctypes.byref(vec_ptr),
ctypes.byref(vec_len),
ctypes.byref(vec_cap))
return ffi.from_rust_vec(vec_ptr, vec_len, vec_cap)
def _get_data_by_id(self, object_id, ffi_fn_name):
(vec_ptr, vec_len, vec_cap) = ffi.prepare_vec_result()
_pylibexec(ffi_fn_name,
self.pointer,
ctypes.c_char_p(object_id.encode()),
ctypes.byref(vec_ptr),
ctypes.byref(vec_len),
ctypes.byref(vec_cap))
return ffi.from_rust_vec(vec_ptr, vec_len, vec_cap)
def _get_block_by_num(self, object_id, ffi_fn_name):
return self.deserialize_block(
self._get_data_by_num(object_id, ffi_fn_name))
def _get_block_by_id(self, object_id, ffi_fn_name):
return self.deserialize_block(
self._get_data_by_id(object_id, ffi_fn_name))
def __getitem__(self, key):
try:
return self._get_block_by_id(key, 'commit_store_get_by_block_id')
except ValueError:
raise KeyError("Unable to find block id: %s" % key) from ValueError
def put_blocks(self, blocks):
c_put_items = (ctypes.POINTER(_PutEntry) * len(blocks))()
for (i, block) in enumerate(blocks):
c_put_items[i] = ctypes.pointer(_PutEntry.new(
block.SerializeToString(),
))
_libexec('commit_store_put_blocks',
self.pointer,
c_put_items, ctypes.c_size_t(len(blocks)))
def _contains_id(self, object_id, fn_name):
contains = ctypes.c_bool(False)
_pylibexec(fn_name,
self.pointer,
ctypes.c_char_p(object_id.encode()),
ctypes.byref(contains))
return contains.value
def __contains__(self, block_id):
return self._contains_id(block_id, 'commit_store_contains_block')
def __iter__(self):
return self.get_block_iter()
@staticmethod
def create_index_configuration():
return ['index_batch', 'index_transaction', 'index_block_num']
@staticmethod
def deserialize_block(value):
block = Block()
block.ParseFromString(value)
return BlockWrapper(
block=block)
@property
def chain_head(self):
(vec_ptr, vec_len, vec_cap) = ffi.prepare_vec_result()
try:
_libexec(
'commit_store_get_chain_head',
self.pointer,
ctypes.byref(vec_ptr),
ctypes.byref(vec_len),
ctypes.byref(vec_cap))
except ValueError:
return None
return self.deserialize_block(
ffi.from_rust_vec(vec_ptr, vec_len, vec_cap))
def chain_head_state_root(self):
chain_head = self.chain_head
if chain_head is not None:
return chain_head.state_root_hash
return INIT_ROOT_KEY
def get_predecessor_iter(self, starting_block=None):
return self.get_block_iter(start_block=starting_block)
def get_block_iter(self, start_block=None, start_block_num=None,
reverse=True):
start = None
if start_block_num:
if len(start_block_num) < 2:
raise ValueError("Invalid start block num")
if start_block_num[:2] != "0x":
raise ValueError("Invalid start block num")
start = int(start_block_num, 16)
elif start_block:
start = start_block.block_num
return _BlockStoreIter(
self.pointer,
start,
reverse)
def get_blocks(self, block_ids):
return list(
filter(
lambda b: b is not None,
map(self._get_block_by_id_or_none, block_ids)))
def _get_block_by_id_or_none(self, block_id):
try:
return self[block_id]
except KeyError:
return None
def get_block_by_transaction_id(self, txn_id):
return self._get_block_by_id(
txn_id, 'commit_store_get_by_transaction_id')
def get_block_by_number(self, block_num):
try:
return self._get_block_by_num(
block_num, 'commit_store_get_by_block_num')
except ValueError:
raise KeyError(
"Unable to find block number: %s" % repr(
block_num)) from ValueError
def has_transaction(self, txn_id):
return self._contains_id(txn_id, 'commit_store_contains_transaction')
def get_block_by_batch_id(self, batch_id):
return self._get_block_by_id(
batch_id, 'commit_store_get_by_batch_id')
def has_batch(self, batch_id):
return self._contains_id(batch_id, 'commit_store_contains_batch')
def get_batch_by_transaction(self, transaction_id):
payload = self._get_data_by_id(
transaction_id, 'commit_store_get_batch_by_transaction')
batch = Batch()
batch.ParseFromString(payload)
return batch
def get_batch(self, batch_id):
payload = self._get_data_by_id(batch_id, 'commit_store_get_batch')
batch = Batch()
batch.ParseFromString(payload)
return batch
def get_transaction(self, transaction_id):
payload = self._get_data_by_id(
transaction_id, 'commit_store_get_transaction')
txn = Transaction()
txn.ParseFromString(payload)
return txn
def _get_count(self, fn_name):
count = ctypes.c_size_t(0)
_libexec(fn_name, self.pointer, ctypes.byref(count))
return count.value
def get_transaction_count(self):
return self._get_count('commit_store_get_transaction_count')
|
Apache License 2.0
|
azure/autorest.az
|
test/scenarios/msgraphuser/output/users_v1_0/azext_users_v1_0/vendored_sdks/users/operations/_users_outlook_operations.py
|
UsersOutlookOperations.list_master_categories
|
python
|
def list_master_categories(
self,
user_id,
orderby=None,
select=None,
expand=None,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_master_categories.metadata['url']
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfOutlookCategory', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
|
Get masterCategories from users.
Get masterCategories from users.
:param user_id: key: id of user.
:type user_id: str
:param orderby: Order items by property values.
:type orderby: list[str or ~users.models.Enum83]
:param select: Select properties to be returned.
:type select: list[str or ~users.models.Enum84]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfOutlookCategory or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~users.models.CollectionOfOutlookCategory]
:raises: ~azure.core.exceptions.HttpResponseError
|
https://github.com/azure/autorest.az/blob/b000db70f608c64918d04a0e0f5b50bb5468baa0/test/scenarios/msgraphuser/output/users_v1_0/azext_users_v1_0/vendored_sdks/users/operations/_users_outlook_operations.py#L48-L141
|
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UsersOutlookOperations(object):
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
|
MIT License
|
thejessleigh/betterreads
|
betterreads/author.py
|
GoodreadsAuthor.fans_count
|
python
|
def fans_count(self):
return int(self._author_dict["fans_count"]["#text"])
|
Number of fans
|
https://github.com/thejessleigh/betterreads/blob/70dff47a1e0cc76adaa89184bc04278cb39d6663/betterreads/author.py#L55-L57
|
from datetime import datetime
class GoodreadsAuthor:
def __init__(self, author_dict, client):
self._author_dict = author_dict
self._client = client
def __repr__(self):
return self.name
@property
def gid(self):
return int(self._author_dict["id"])
@property
def name(self):
return self._author_dict["name"]
@property
def about(self):
return self._author_dict["about"]
@property
def books(self):
from .book import GoodreadsBook
if type(self._author_dict["books"]["book"]) == list:
return [
GoodreadsBook(book_dict, self._client)
for book_dict in self._author_dict["books"]["book"]
]
else:
return [GoodreadsBook(self._author_dict["books"]["book"], self._client)]
@property
def born_at(self):
born_date = self._author_dict["born_at"]
return datetime.strptime(born_date, "%Y/%m/%d")
@property
def died_at(self):
died_date = self._author_dict["died_at"]
return datetime.strptime(died_date, "%Y/%m/%d")
@property
|
MIT License
|
stencila/hub
|
manager/projects/models/reviews.py
|
Review.extract_callback
|
python
|
def extract_callback(self, job: Job):
json = job.result
if not json:
self.status = ReviewStatus.FAILED.name
else:
self.review = Node.objects.create(
project=self.project, creator=job.creator, app="hub.reviews", json=json
)
authors = json.get("authors", [])
if len(authors) > 0:
self.review_author_name = authors[0].get("name")
self.review_date = (
json.get("datePublished")
or json.get("dateModified")
or json.get("dateCreated")
)
self.review_title = node_text_content(json.get("title"))
self.review_description = node_text_content(
json.get("description") or json.get("content")
)
self.review_comments = len(json.get("comments", []))
self.status = ReviewStatus.EXTRACTED.name
self.save()
|
Store the extracted review.
|
https://github.com/stencila/hub/blob/e696c39213156bb43a098f81286197e919379cdf/manager/projects/models/reviews.py#L382-L413
|
import re
from datetime import datetime
from typing import Dict, List, Optional, Tuple
import shortuuid
from django.db import models
from dois.models import Doi
from jobs.models import Job
from manager.helpers import EnumChoice
from manager.nodes import node_text_content
from projects.models.nodes import Node
from projects.models.projects import Project, ProjectAgent, ProjectRole
from projects.models.sources import Source
from users.models import User
class ReviewStatus(EnumChoice):
PENDING = "PENDING"
REQUESTED = "REQUESTED"
CANCELLED = "CANCELLED"
ACCEPTED = "ACCEPTED"
DECLINED = "DECLINED"
COMPLETED = "COMPLETED"
EXTRACTING = "EXTRACTING"
EXTRACTED = "EXTRACTED"
FAILED = "FAILED"
REGISTERED = "REGISTERED"
@staticmethod
def as_choices() -> List[Tuple[str, str]]:
return [
(ReviewStatus.PENDING.name, "Pending"),
(ReviewStatus.REQUESTED.name, "Requested"),
(ReviewStatus.CANCELLED.name, "Cancelled"),
(ReviewStatus.ACCEPTED.name, "Accepted"),
(ReviewStatus.DECLINED.name, "Declined"),
(ReviewStatus.COMPLETED.name, "Completed"),
(ReviewStatus.EXTRACTING.name, "Retrieval in progress"),
(ReviewStatus.EXTRACTED.name, "Retrieved"),
(ReviewStatus.FAILED.name, "Retrieval failed"),
(ReviewStatus.REGISTERED.name, "Registered"),
]
@classmethod
def get_description(cls, status: str) -> Optional[str]:
choices = cls.as_choices()
for choice in choices:
if status == choice[0]:
return choice[1]
return None
def generate_review_key():
return shortuuid.ShortUUID().random(length=32)
class Review(models.Model):
project = models.ForeignKey(
Project,
null=False,
blank=False,
on_delete=models.CASCADE,
related_name="reviews",
help_text="The project that the review is for.",
)
creator = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="reviews_created",
help_text="The user who created the review.",
)
created = models.DateTimeField(
auto_now_add=True, help_text="The time the review was created."
)
updated = models.DateTimeField(
auto_now=True, help_text="The time the review was last updated."
)
status = models.CharField(
max_length=16,
choices=ReviewStatus.as_choices(),
default=ReviewStatus.PENDING.name,
help_text="The status of the review.",
)
reviewer = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="reviews_authored",
help_text="The user who authored the review.",
)
reviewer_email = models.EmailField(
null=True, blank=True, help_text="The email address of the reviewer.",
)
key = models.CharField(
default=generate_review_key,
max_length=64,
help_text="A unique, and very difficult to guess, key for the reviewer "
"to access the review if they are not a user.",
)
request_message = models.TextField(
null=True,
blank=True,
help_text="The message sent to the reviewer in the request to review.",
)
response_message = models.TextField(
null=True,
blank=True,
help_text="The message provided by the reviewer when accepting or declining to review.",
)
cancel_message = models.TextField(
null=True,
blank=True,
help_text="The message sent to the reviewer when the review was cancelled.",
)
source = models.ForeignKey(
Source,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="reviews",
help_text="The source for this review.",
)
job = models.ForeignKey(
Job,
null=True,
blank=True,
on_delete=models.PROTECT,
related_name="reviews",
help_text="The job that extracted the review from the source.",
)
subject = models.ForeignKey(
Node,
null=True,
blank=True,
on_delete=models.PROTECT,
related_name="reviews",
help_text="The node, usually a `CreativeWork`, that is the subject of the review.",
)
review = models.ForeignKey(
Node,
null=True,
blank=True,
on_delete=models.PROTECT,
help_text="The node, of type `Review`, representing the actual review.",
)
review_author_name = models.CharField(
max_length=128, null=True, blank=True, help_text="The name of the reviewer.",
)
review_date = models.DateTimeField(
null=True,
blank=True,
help_text="The date of the review e.g it's `datePublished`.",
)
review_title = models.TextField(
null=True, blank=True, help_text="The title of the review.",
)
review_description = models.TextField(
null=True, blank=True, help_text="The description of the review.",
)
review_comments = models.IntegerField(
null=True, blank=True, help_text="The number of comments that the review has."
)
def get_status(self) -> Optional[str]:
return ReviewStatus.get_description(self.status)
def get_date(self) -> datetime:
return self.review_date or self.updated or self.created()
def get_reviewer_name(self) -> Optional[str]:
return (
self.reviewer.personal_account.display_name or self.reviewer.username
if self.reviewer
else self.review_author_name
)
def get_reviewer_image(self) -> Optional[str]:
return self.reviewer.personal_account.image.medium if self.reviewer else None
def get_doi(self) -> Optional[str]:
return self.review and self.review.dois.order_by("-created").first()
def get_comments(self) -> Optional[int]:
return self.review_comments
def request(self):
if self.reviewer_email or self.reviewer:
self.status = ReviewStatus.REQUESTED.name
self.save()
def update(
self,
status: str,
response_message: Optional[str] = None,
cancel_message: Optional[str] = None,
user: Optional[User] = None,
filters: Dict = {},
):
if (
status == ReviewStatus.CANCELLED.name
and self.status == ReviewStatus.REQUESTED.name
):
self.cancel_message = cancel_message or None
elif (
status == ReviewStatus.ACCEPTED.name
and self.status == ReviewStatus.REQUESTED.name
):
self.reviewer = user
self.response_message = response_message or None
try:
agent = ProjectAgent.objects.get(project_id=self.project, user=user)
except ProjectAgent.DoesNotExist:
ProjectAgent.objects.create(
project_id=self.project, user=user, role=ProjectRole.REVIEWER.name,
)
else:
if agent.role not in ProjectRole.and_above(ProjectRole.REVIEWER):
agent.role = ProjectRole.REVIEWER.name
agent.save()
elif (
status == ReviewStatus.DECLINED.name
and self.status == ReviewStatus.REQUESTED.name
):
self.reviewer = user
self.response_message = response_message or None
elif status == ReviewStatus.COMPLETED.name and self.status in (
ReviewStatus.PENDING.name,
ReviewStatus.ACCEPTED.name,
ReviewStatus.FAILED.name,
):
return self.extract(user, filters)
elif (
status == ReviewStatus.REGISTERED.name
and self.status == ReviewStatus.EXTRACTED.name
):
return self.register(user)
else:
raise ValueError(
f"Review can not be updated from {self.status} to {status}."
)
self.status = status
self.save()
def extract(self, user: User, filters: Dict = {}):
if self.source.type_name == "Github":
match = re.match(
r"https:\/\/github.com\/(?:\w+)\/(?:\w+)\/pull\/(\d+)#pullrequestreview-(\d+)",
filters.get("filter_a", ""),
)
if match:
filters = dict(
pr_number=int(match.group(1)), review_id=int(match.group(2))
)
elif self.source.type_name.startswith("Google"):
filters = dict(name=filters.get("filter_a"))
job = self.source.extract(review=self, user=user, filters=filters)
job.dispatch()
self.job = job
self.status = ReviewStatus.EXTRACTING.name
self.save()
|
Apache License 2.0
|
spacepatcher/firehol-ip-aggregator
|
app/api.py
|
search_get
|
python
|
def search_get(v: hug.types.text):
if General.validate_request(v):
payload = [v]
else:
return {"errors": "Data validation error in '%s'" % v}
return FeedsStorage.search(payload)
|
Search for an IP object in all available feeds. Input: HTTP GET with parameter containing a single IP address
|
https://github.com/spacepatcher/firehol-ip-aggregator/blob/f96290330d9e632126049c8815b43f0e52938bfd/app/api.py#L40-L49
|
import hug
from modules.db_sync import FeedsStorage
from modules.general import General
FeedsStorage = FeedsStorage()
General = General()
General.logger.info("API instance successfully started")
@hug.post("/search", output=hug.output_format.json, version=1)
def search(body):
try:
payload = body.read().decode("utf-8")
except AttributeError:
payload = body
payload = payload.split(",")
if isinstance(payload, list):
for item in payload:
if General.validate_request(item):
pass
else:
return {"errors": "Data validation error in '%s'" % item}
else:
return {"errors": "Got an unrecognized structure"}
return FeedsStorage.search(list(set(payload)))
@hug.get("/search/ip", output=hug.output_format.json, examples="v=8.8.8.8", version=1)
|
Apache License 2.0
|
asheshjain399/tensormodels
|
tensormodels/data_reader/data_feeder.py
|
DataFeeder._launch_pipeline
|
python
|
def _launch_pipeline(self):
self.data_processes = []
queue_size = 2 * self.num_preprocess_threads + 2 * self.num_gpu_towers
self.data_queue = Queue(queue_size)
image_files = open(self.data_file, 'r').readlines()
labels = open(self.label_file, 'r').readlines()
print 'Size of queue: ', queue_size
self.filename_queue = Queue(len(image_files))
p = Process(target=self._create_filename_queue, args=(self.filename_queue, image_files, labels, self.num_epochs))
p.start()
self.data_processes.append(p)
print 'Data feeder started'
for each_worker in range(self.num_preprocess_threads):
p = Process(target=self._each_worker_process, args=(self.data_queue,))
p.start()
self.data_processes.append(p)
|
This method creates two queues.
filename_queue: stores the list of filesnames in data_file and label_file
data_queue: stores the mini-batch
|
https://github.com/asheshjain399/tensormodels/blob/d808dc8d34067238f98d8a35372b2bb6cb344b31/tensormodels/data_reader/data_feeder.py#L46-L69
|
from multiprocessing import Queue, Process
import multiprocessing as mp
import numpy as np
from numpy.random import RandomState
rand = RandomState(123)
class DataFeeder():
def __init__(self,
data_file,
label_file,
reader_handle,
data_list_shapes,
label_list_shapes,
batch_size=32,
num_preprocess_threads=int(0.3 * mp.cpu_count()),
num_gpu_towers=1,
num_epochs=-1,
data_type=np.uint8,
label_type=np.int32
):
self.data_file = data_file
self.label_file = label_file
self.data_list_shapes = data_list_shapes
self.label_list_shapes = label_list_shapes
self.reader_handle = reader_handle
self.num_preprocess_threads = num_preprocess_threads
self.num_gpu_towers = num_gpu_towers
self.batch_size = batch_size
self.num_epochs = num_epochs
self.data_type = data_type
self.label_type = label_type
self._launch_pipeline()
|
MIT License
|
square/connect-python-sdk
|
squareconnect/models/v1_merchant.py
|
V1Merchant.business_name
|
python
|
def business_name(self):
return self._business_name
|
Gets the business_name of this V1Merchant.
The name of the merchant's business.
:return: The business_name of this V1Merchant.
:rtype: str
|
https://github.com/square/connect-python-sdk/blob/e00e2889b2dd2c55048219cbe64db79962a68633/squareconnect/models/v1_merchant.py#L276-L284
|
from pprint import pformat
from six import iteritems
import re
class V1Merchant(object):
def __init__(self, id=None, name=None, email=None, account_type=None, account_capabilities=None, country_code=None, language_code=None, currency_code=None, business_name=None, business_address=None, business_phone=None, business_type=None, shipping_address=None, location_details=None, market_url=None):
self.swagger_types = {
'id': 'str',
'name': 'str',
'email': 'str',
'account_type': 'str',
'account_capabilities': 'list[str]',
'country_code': 'str',
'language_code': 'str',
'currency_code': 'str',
'business_name': 'str',
'business_address': 'Address',
'business_phone': 'V1PhoneNumber',
'business_type': 'str',
'shipping_address': 'Address',
'location_details': 'V1MerchantLocationDetails',
'market_url': 'str'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'email': 'email',
'account_type': 'account_type',
'account_capabilities': 'account_capabilities',
'country_code': 'country_code',
'language_code': 'language_code',
'currency_code': 'currency_code',
'business_name': 'business_name',
'business_address': 'business_address',
'business_phone': 'business_phone',
'business_type': 'business_type',
'shipping_address': 'shipping_address',
'location_details': 'location_details',
'market_url': 'market_url'
}
self._id = id
self._name = name
self._email = email
self._account_type = account_type
self._account_capabilities = account_capabilities
self._country_code = country_code
self._language_code = language_code
self._currency_code = currency_code
self._business_name = business_name
self._business_address = business_address
self._business_phone = business_phone
self._business_type = business_type
self._shipping_address = shipping_address
self._location_details = location_details
self._market_url = market_url
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def email(self):
return self._email
@email.setter
def email(self, email):
self._email = email
@property
def account_type(self):
return self._account_type
@account_type.setter
def account_type(self, account_type):
self._account_type = account_type
@property
def account_capabilities(self):
return self._account_capabilities
@account_capabilities.setter
def account_capabilities(self, account_capabilities):
self._account_capabilities = account_capabilities
@property
def country_code(self):
return self._country_code
@country_code.setter
def country_code(self, country_code):
self._country_code = country_code
@property
def language_code(self):
return self._language_code
@language_code.setter
def language_code(self, language_code):
self._language_code = language_code
@property
def currency_code(self):
return self._currency_code
@currency_code.setter
def currency_code(self, currency_code):
self._currency_code = currency_code
@property
|
Apache License 2.0
|
riotkit-org/riotkit-harbor
|
src/rkd_harbor/tasks/maintenance.py
|
BaseMaintenanceManagementTask.run
|
python
|
def run(self, context: ExecutionContext) -> bool:
is_global = context.get_arg('--global')
domain = context.get_arg('--domain')
service = context.get_arg('--service')
directory = self.get_data_path(context) + '/maintenance-mode'
if not self._validate_switches(is_global, domain, service):
self.io().error_msg('Cannot use together --global, --domain and --service switch. Pick one of them.')
return False
try:
if is_global:
return self.act([directory + '/on'], 'globally')
elif service:
return self.act_for_service(directory, service, context)
elif domain:
return self.act_for_domain(directory, domain, context)
else:
self.io().error_msg('Must specify --global or --domain switch')
return False
except PermissionError as e:
self.io().error_msg('No write permissions. Set permissions or use sudo? %s' % str(e))
return False
|
Validate parameters and select action
|
https://github.com/riotkit-org/riotkit-harbor/blob/f6b47b74fc0b1655f4e98968a3f104ceb6d7c682/src/rkd_harbor/tasks/maintenance.py#L21-L49
|
import os
from typing import List
from argparse import ArgumentParser
from abc import abstractmethod
from rkd.api.contract import ExecutionContext
from .base import HarborBaseTask
from ..formatting import prod_formatting
from ..exception import ServiceNotFoundInYamlLookedByCriteria
from ..exception import ServiceNotFoundInYaml
class BaseMaintenanceManagementTask(HarborBaseTask):
def get_group_name(self) -> str:
return ':harbor:maintenance'
def configure_argparse(self, parser: ArgumentParser):
parser.add_argument('--domain', '-d', help='Domain name', default='')
parser.add_argument('--service', '-s', help='Service name', default='')
parser.add_argument('--global', '-g', help='Set maintenance for all domains', action='store_true')
|
Apache License 2.0
|
jhuapl-boss/boss
|
django/boss/views.py
|
Metric._get_admin_user
|
python
|
def _get_admin_user(self):
return User.objects.get(username=ADMIN_USER)
|
Lookup the admin user
Returns: the User object for the Admin user
|
https://github.com/jhuapl-boss/boss/blob/c2e26d272bd7b8d54abdc2948193163537e31291/django/boss/views.py#L164-L170
|
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import permissions
from django.contrib.auth.mixins import LoginRequiredMixin
from bosscore.error import BossHTTPError, ErrorCodes
from django.conf import settings
from bossutils.logger import bossLogger
import socket
version = settings.BOSS_VERSION
class Ping(APIView):
authentication_classes = ()
permission_classes = (permissions.AllowAny,)
renderer_classes = (JSONRenderer, )
def get(self, request):
content = {'ip': socket.gethostbyname(socket.gethostname())}
return Response(content)
class Unsupported(APIView):
authentication_classes = ()
permission_classes = (permissions.AllowAny,)
renderer_classes = (JSONRenderer, )
def get(self, request):
return BossHTTPError(" This API version is unsupported. Update to version {}".format(version),
ErrorCodes.UNSUPPORTED_VERSION)
def post(self, request):
return BossHTTPError(" This API version is unsupported. Update to version {}".format(version),
ErrorCodes.UNSUPPORTED_VERSION)
def delete(self, request):
return BossHTTPError(" This API version is unsupported. Update to version {}".format(version),
ErrorCodes.UNSUPPORTED_VERSION)
def put(self, request):
return BossHTTPError(" This API version is unsupported. Update to version {}".format(version),
ErrorCodes.UNSUPPORTED_VERSION)
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic import View
from rest_framework.authtoken.models import Token as TokenModel
class Token(LoginRequiredMixin, View):
def get(self, request):
action = request.GET.get('action', None)
try:
token = TokenModel.objects.get(user = request.user)
if action == "Revoke":
token.delete()
token = None
except:
if action == "Generate":
token = TokenModel.objects.create(user = request.user)
else:
token = None
if token is None:
content = ""
button = "Generate"
else:
content = "<textarea>{}</textarea>".format(token)
button = "Revoke"
html = """
<html>
<head><title>BOSS Token Management</title></head>
<body>
{1}
<a href="{0}?action={2}">{2}</a>
</body>
</html>
""".format(request.path_info, content, button)
return HttpResponse(html)
from boss.throttling import MetricDatabase
from bosscore.constants import ADMIN_USER
from django.contrib.auth.models import User
from bosscore.models import ThrottleMetric, ThrottleThreshold, ThrottleUsage
class Metric(LoginRequiredMixin, APIView):
renderer_classes = (JSONRenderer, )
def __init__(self):
self.blog = bossLogger()
self.metricdb = MetricDatabase()
|
Apache License 2.0
|
kraymer/flinck
|
flinck/confit.py
|
config_dirs
|
python
|
def config_dirs():
paths = []
if platform.system() == 'Darwin':
paths.append(MAC_DIR)
paths.append(UNIX_DIR_FALLBACK)
if UNIX_DIR_VAR in os.environ:
paths.append(os.environ[UNIX_DIR_VAR])
elif platform.system() == 'Windows':
paths.append(WINDOWS_DIR_FALLBACK)
if WINDOWS_DIR_VAR in os.environ:
paths.append(os.environ[WINDOWS_DIR_VAR])
else:
paths.append(UNIX_DIR_FALLBACK)
if UNIX_DIR_VAR in os.environ:
paths.append(os.environ[UNIX_DIR_VAR])
out = []
for path in paths:
path = os.path.abspath(os.path.expanduser(path))
if path not in out:
out.append(path)
return out
|
Return a platform-specific list of candidates for user
configuration directories on the system.
The candidates are in order of priority, from highest to lowest. The
last element is the "fallback" location to be used when no
higher-priority config file exists.
|
https://github.com/kraymer/flinck/blob/b3d0076e2b3ab74c0a8f4a3c8abf5631ee362438/flinck/confit.py#L559-L592
|
from __future__ import division, absolute_import, print_function
import platform
import os
import pkgutil
import sys
import yaml
import types
import collections
import re
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
UNIX_DIR_VAR = 'XDG_CONFIG_HOME'
UNIX_DIR_FALLBACK = '~/.config'
WINDOWS_DIR_VAR = 'APPDATA'
WINDOWS_DIR_FALLBACK = '~\\AppData\\Roaming'
MAC_DIR = '~/Library/Application Support'
CONFIG_FILENAME = 'config.yaml'
DEFAULT_FILENAME = 'config_default.yaml'
ROOT_NAME = 'root'
YAML_TAB_PROBLEM = "found character '\\t' that cannot start any token"
REDACTED_TOMBSTONE = 'REDACTED'
PY3 = sys.version_info[0] == 3
STRING = str if PY3 else unicode
BASESTRING = str if PY3 else basestring
NUMERIC_TYPES = (int, float) if PY3 else (int, float, long)
TYPE_TYPES = (type,) if PY3 else (type, types.ClassType)
def iter_first(sequence):
it = iter(sequence)
try:
if PY3:
return next(it)
else:
return it.next()
except StopIteration:
raise ValueError()
class ConfigError(Exception):
class NotFoundError(ConfigError):
class ConfigValueError(ConfigError):
class ConfigTypeError(ConfigValueError):
class ConfigTemplateError(ConfigError):
class ConfigReadError(ConfigError):
def __init__(self, filename, reason=None):
self.filename = filename
self.reason = reason
message = u'file {0} could not be read'.format(filename)
if isinstance(reason, yaml.scanner.ScannerError) and reason.problem == YAML_TAB_PROBLEM:
message += u': found tab character at line {0}, column {1}'.format(
reason.problem_mark.line + 1,
reason.problem_mark.column + 1,
)
elif reason:
message += u': {0}'.format(reason)
super(ConfigReadError, self).__init__(message)
class ConfigSource(dict):
def __init__(self, value, filename=None, default=False):
super(ConfigSource, self).__init__(value)
if filename is not None and not isinstance(filename, BASESTRING):
raise TypeError(u'filename must be a string or None')
self.filename = filename
self.default = default
def __repr__(self):
return 'ConfigSource({0!r}, {1!r}, {2!r})'.format(
super(ConfigSource, self),
self.filename,
self.default,
)
@classmethod
def of(self, value):
if isinstance(value, ConfigSource):
return value
elif isinstance(value, dict):
return ConfigSource(value)
else:
raise TypeError(u'source value must be a dict')
class ConfigView(object):
name = None
def resolve(self):
raise NotImplementedError
def first(self):
pairs = self.resolve()
try:
return iter_first(pairs)
except ValueError:
raise NotFoundError(u"{0} not found".format(self.name))
def exists(self):
try:
self.first()
except NotFoundError:
return False
return True
def add(self, value):
raise NotImplementedError
def set(self, value):
raise NotImplementedError
def root(self):
raise NotImplementedError
def __repr__(self):
return '<{}: {}>'.format(self.__class__.__name__, self.name)
def __iter__(self):
try:
keys = self.keys()
for key in keys:
yield key
except ConfigTypeError:
collection = self.get()
if not isinstance(collection, (list, tuple)):
raise ConfigTypeError(
u'{0} must be a dictionary or a list, not {1}'.format(
self.name, type(collection).__name__
)
)
for index in range(len(collection)):
yield self[index]
def __getitem__(self, key):
return Subview(self, key)
def __setitem__(self, key, value):
self.set({key: value})
def __contains__(self, key):
return self[key].exists()
def set_args(self, namespace):
args = {}
for key, value in namespace.__dict__.items():
if value is not None:
args[key] = value
self.set(args)
def __str__(self):
if PY3:
return self.__unicode__()
else:
return bytes(self.get())
def __unicode__(self):
return STRING(self.get())
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return bool(self.get())
def keys(self):
keys = []
for dic, _ in self.resolve():
try:
cur_keys = dic.keys()
except AttributeError:
raise ConfigTypeError(
u'{0} must be a dict, not {1}'.format(
self.name, type(dic).__name__
)
)
for key in cur_keys:
if key not in keys:
keys.append(key)
return keys
def items(self):
for key in self.keys():
yield key, self[key]
def values(self):
for key in self.keys():
yield self[key]
def all_contents(self):
for collection, _ in self.resolve():
try:
it = iter(collection)
except TypeError:
raise ConfigTypeError(
u'{0} must be an iterable, not {1}'.format(
self.name, type(collection).__name__
)
)
for value in it:
yield value
def flatten(self, redact=False):
od = OrderedDict()
for key, view in self.items():
if redact and view.redact:
od[key] = REDACTED_TOMBSTONE
else:
try:
od[key] = view.flatten(redact=redact)
except ConfigTypeError:
od[key] = view.get()
return od
def get(self, template=None):
return as_template(template).value(self, template)
def as_filename(self):
return self.get(Filename())
def as_choice(self, choices):
return self.get(Choice(choices))
def as_number(self):
return self.get(Number())
def as_str_seq(self):
return self.get(StrSeq())
@property
def redact(self):
return () in self.get_redactions()
@redact.setter
def redact(self, flag):
self.set_redaction((), flag)
def set_redaction(self, path, flag):
raise NotImplementedError()
def get_redactions(self):
raise NotImplementedError()
class RootView(ConfigView):
def __init__(self, sources):
self.sources = list(sources)
self.name = ROOT_NAME
self.redactions = set()
def add(self, obj):
self.sources.append(ConfigSource.of(obj))
def set(self, value):
self.sources.insert(0, ConfigSource.of(value))
def resolve(self):
return ((dict(s), s) for s in self.sources)
def clear(self):
del self.sources[:]
self.redactions.clear()
def root(self):
return self
def set_redaction(self, path, flag):
if flag:
self.redactions.add(path)
elif path in self.redactions:
self.redactions.remove(path)
def get_redactions(self):
return self.redactions
class Subview(ConfigView):
def __init__(self, parent, key):
self.parent = parent
self.key = key
if isinstance(self.parent, RootView):
self.name = ''
else:
self.name = self.parent.name
if not isinstance(self.key, int):
self.name += '.'
if isinstance(self.key, int):
self.name += u'#{0}'.format(self.key)
elif isinstance(self.key, BASESTRING):
if isinstance(self.key, bytes):
self.name += self.key.decode('utf8')
else:
self.name += self.key
else:
self.name += repr(self.key)
def resolve(self):
for collection, source in self.parent.resolve():
try:
value = collection[self.key]
except IndexError:
continue
except KeyError:
continue
except TypeError:
raise ConfigTypeError(
u"{0} must be a collection, not {1}".format(
self.parent.name, type(collection).__name__
)
)
yield value, source
def set(self, value):
self.parent.set({self.key: value})
def add(self, value):
self.parent.add({self.key: value})
def root(self):
return self.parent.root()
def set_redaction(self, path, flag):
self.parent.set_redaction((self.key,) + path, flag)
def get_redactions(self):
return (kp[1:] for kp in self.parent.get_redactions()
if kp and kp[0] == self.key)
def _package_path(name):
loader = pkgutil.get_loader(name)
if loader is None or name == b'__main__':
return None
if hasattr(loader, 'get_filename'):
filepath = loader.get_filename(name)
else:
__import__(name)
filepath = sys.modules[name].__file__
return os.path.dirname(os.path.abspath(filepath))
|
MIT License
|
pimoroni/grow-python
|
library/grow/__init__.py
|
Piezo.beep
|
python
|
def beep(self, frequency=440, timeout=0.1, blocking=True, force=False):
if blocking:
self.start(frequency=frequency)
time.sleep(timeout)
self.stop()
return True
else:
if self._timeout is not None:
if self._timeout.is_alive():
if force:
self._timeout.cancel()
else:
return False
self._timeout = threading.Timer(timeout, self.stop)
self.start(frequency=frequency)
self._timeout.start()
return True
|
Beep the piezo for time seconds.
:param freq: Frequency, in hertz, of the piezo
:param timeout: Time, in seconds, of the piezo beep
:param blocking: If true, function will block until piezo has stopped
|
https://github.com/pimoroni/grow-python/blob/9ddb040294da4ac0184e09fb0765d46c71edd5d1/library/grow/__init__.py#L45-L68
|
__version__ = '0.0.2'
import time
import atexit
import threading
import RPi.GPIO as GPIO
class Piezo():
def __init__(self, gpio_pin=13):
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(gpio_pin, GPIO.OUT, initial=GPIO.LOW)
self.pwm = GPIO.PWM(gpio_pin, 440)
self.pwm.start(0)
self._timeout = None
atexit.register(self._exit)
def frequency(self, value):
self.pwm.ChangeFrequency(value)
def start(self, frequency=None):
if frequency is not None:
self.frequency(frequency)
self.pwm.ChangeDutyCycle(1)
def stop(self):
self.pwm.ChangeDutyCycle(0)
|
MIT License
|
trusat/trusat-backend
|
test_API_snapshots.py
|
api_get
|
python
|
def api_get(endpoint, is_json, params):
return api_request(endpoint, is_json, False, params)
|
GETs a resource from an API endpoint, decoding it and prettifying it if JSON.
Also prints timing info.
|
https://github.com/trusat/trusat-backend/blob/8ae226dbe52231fac42ff7d799f484ffa6e888ed/test_API_snapshots.py#L54-L59
|
import requests
import json
from snapshottest.file import FileSnapshot
import time
from enum import Enum
API_BASE_URL = "http://127.0.0.1:8080"
TEST_OBJECT_IDENTIFIER = {'norad_number': 40538}
class RequestMethod(Enum):
GET = 1
POST = 2
def api_request(endpoint, expect_json_response, post_data, get_params):
start = time.time()
if post_data:
r = requests.post(API_BASE_URL + endpoint, json=post_data)
else:
r = requests.get(API_BASE_URL + endpoint, params=get_params, headers={'Accept': 'Cache-Control'})
end = time.time()
print(f"{'POST' if post_data else 'GET'} {endpoint} took {end - start} seconds")
if (expect_json_response):
output = json.loads(r.content)
formatted = json.dumps(output, sort_keys=True, indent=4, separators=(',', ': '))
else:
formatted = r.content.decode('utf-8')
headers = r.headers
headers['Date'] = 'XXX'
return f"STATUS: {r.status_code}\n\nHEADERS:{headers}\n\nCONTENT:{formatted}"
def api_post(endpoint, data, expect_json_response=True):
return api_request(endpoint, expect_json_response, data,{})
|
Apache License 2.0
|
mitmproxy/mitmproxy
|
mitmproxy/tools/console/layoutwidget.py
|
LayoutWidget.layout_pushed
|
python
|
def layout_pushed(self, prev):
pass
|
We have just pushed a window onto the stack.
|
https://github.com/mitmproxy/mitmproxy/blob/667d4e04749a4bc2212f58fa2b8c31cd1d91fc7b/mitmproxy/tools/console/layoutwidget.py#L36-L40
|
class LayoutWidget:
title = ""
keyctx = ""
def key_responder(self):
return self
def focus_changed(self):
pass
def view_changed(self):
pass
def layout_popping(self):
pass
|
MIT License
|
skoltechrobotics/hokuyolx
|
hokuyolx/hokuyo.py
|
HokuyoLX._get_info
|
python
|
def _get_info(self, cmd):
status, data = self._send_req(cmd)
if status != '00':
raise HokuyoStatusException(status)
return dict(self._process_info_line(line) for line in data if line)
|
Generic method for recieving and decoding sensor information,
accepts the following commands: II, VV and PP
|
https://github.com/skoltechrobotics/hokuyolx/blob/07bb42d76db5b458001f65fe9eb7109ed1995330/hokuyolx/hokuyo.py#L790-L796
|
import socket
import logging
import time
import numpy as np
from codecs import encode, decode
from .exceptions import HokuyoException, HokuyoStatusException
from .exceptions import HokuyoChecksumMismatch
from .statuses import activation_statuses, laser_states, tsync_statuses
class HokuyoLX(object):
addr = ('192.168.0.10', 10940)
dmin = 20
dmax = 30000
ares = 1440
amin = 0
amax = 1080
aforw = 540
scan_freq = 40
model = 'UST-10LX'
tzero = 0
tn = 0
convert_time = True
_sock = None
_logger = None
def __init__(self, activate=True, info=True, tsync=True, addr=None,
buf=512, timeout=5, time_tolerance=300, logger=None,
convert_time=True):
super(HokuyoLX, self).__init__()
if addr is not None:
self.addr = addr
self.buf = buf
self.timeout = timeout
self._logger = logging.getLogger('hokuyo') if logger is None else logger
self.time_tolerance = time_tolerance
self.convert_time = convert_time
self._connect_to_laser(False)
if tsync:
self.time_sync()
if info:
self.update_info()
if activate:
self.activate()
@staticmethod
def _check_sum(msg, cc=None):
if cc is None:
cmsg, cc = msg[:-1], msg[-1:]
else:
cmsg = msg
conv_msg = cmsg if isinstance(cmsg, bytes) else encode(cmsg, 'ascii')
conv_sum = decode(cc, 'ascii') if isinstance(msg, bytes) else cc
calc_sum = chr((sum(bytearray(conv_msg)) & 0x3f) + 0x30)
if calc_sum != conv_sum:
raise HokuyoChecksumMismatch(
'For message %s sum mismatch: %s vs %s' %
(decode(conv_msg, 'ascii'), calc_sum, cc))
return cmsg
@staticmethod
def _convert2int(chars):
return sum([(ord(char) - 0x30) << (6*(len(chars) - i - 1))
for i, char in enumerate(chars)])
def _convert2ts(self, chars, convert=None):
ts = self._convert2int(self._check_sum(chars))
if not (self.convert_time if convert is None else convert):
return ts
logging.debug('Sensor timestamp: %d', ts)
t = self.tzero + ts + self._tn*(1 << 24)
logging.debug('Converted timestamp: %d (t0: %d, tn: %d)',
t, self.tzero, self._tn)
dt = int(time.time()*1000) - t
logging.debug('Delta t with local time: %d', dt)
if abs(dt) > self.time_tolerance:
diff = (1 << 24) - self.time_tolerance
if dt > diff and self.tzero != 0:
self._logger.warning('Timestamp overflow detected, '
'%d -- %d' % (dt, diff))
self._tn += 1
else:
self._logger.warning(
'Time difference %d is too big. Resyncing...', dt)
self.time_sync()
return self._convert2ts(chars)
return t
def _connect_to_laser(self, close=True):
if close:
self.close()
self._logger.info('Connecting to the laser')
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.settimeout(self.timeout)
try:
self._sock.connect(self.addr)
except socket.timeout:
raise HokuyoException('Failed to connect to the sensor')
def _send_cmd(self, cmd, params='', string=''):
if not (len(cmd) == 2 or (cmd[0] == '%' and len(cmd) == 3)):
raise HokuyoException(
'Command must be two chars string '
'or three chars starting with %%, got %d chars' % len(cmd))
self._logger.debug(
'Sending command to the sensor; '
'cmd: %s, params: %s, string: %s', cmd, params, string)
req = cmd + params
if string:
req += ';' + string
if self._sock is None:
raise HokuyoException('Not connected to the laser')
n = self._sock.send(encode(req, 'ascii') + b'\n')
if len(req) + 1 != n:
raise HokuyoException('Failed to send all data to the sensor')
return req
def _recv(self, header=None):
self._logger.debug('Recieving data from sensor')
if self._sock is None:
raise HokuyoException('Not connected to the laser')
try:
while True:
data = b''
while not data.endswith(b'\n\n'):
data += self._sock.recv(self.buf)
self._logger.debug('Recieved data: %s' % data)
split_data = decode(data[:-2], 'ascii').split('\n')
if header is not None and split_data[0] != header:
self._logger.warning(
'Discarded data due header mismatch: %s' % data)
continue
break
except socket.timeout:
raise HokuyoException('Connection timeout')
return split_data
def _send_req(self, cmd, params='', string=''):
self._logger.debug(
'Performing request; cmd: %s, params: %s, string: %s',
cmd, params, string)
header = self._send_cmd(cmd, params, string)
resp = self._recv(header)
if resp.pop(0) != header:
raise HokuyoException('Response header mismatch')
status_str = resp.pop(0)
status = self._check_sum(status_str)
self._logger.debug('Got response with status %s', status)
return status, resp
def get_angles(self, start=None, end=None, grouping=0):
start = self.amin if start is None else start
end = self.amax if end is None else end
grouping = 1 if grouping == 0 else grouping
num = self.amax - self.amin + 1
space = np.linspace(self.amin, self.amax, num) - self.aforw
angles = 2*np.pi*space/self.ares
return angles[start:end+1:grouping]
def _process_scan_data(self, data, with_intensity):
raw_data = ''.join([self._check_sum(block) for block in data])
if len(raw_data) % 3 != 0:
raise HokuyoException('Wrong length of scan data')
scan = np.array([
self._convert2int(raw_data[3*i:3*i+3])
for i in range(len(raw_data)//3)], np.uint32)
if with_intensity:
return scan.reshape((len(scan)//2, 2))
return scan
def _filter(self, scan, start=None, end=None, grouping=0,
dmin=None, dmax=None, imin=None, imax=None):
angles = self.get_angles(start, end, grouping)
if scan.ndim == 1:
tpl = (angles, scan)
elif scan.ndim == 2:
tpl = (angles, scan[:, 0], scan[:, 1])
else:
raise HokuyoException('Unexpected scan dimensions')
data = np.vstack(tpl).T
dmin = self.dmin if dmin is None else dmin
dmax = self.dmax if dmax is None else dmax
data = data[(data[:, 1] >= dmin) & (data[:, 1] <= dmax)]
if imin is not None:
data = data[data[:, 2] >= imin]
if imax is not None:
data = data[data[:, 2] <= imax]
return data
def _force_standby(self):
state, description = self.laser_state()
if state in (3, 4, 5):
self.standby()
elif state == 2:
self.tsync_exit()
elif state != 0:
raise HokuyoException('Unexpected laser state: %s' % description)
def activate(self):
self._logger.info('Activating sensor')
status, _ = self._send_req('BM')
if status not in activation_statuses:
raise HokuyoStatusException(status)
return int(status), activation_statuses[status]
def standby(self):
self._logger.info('Switching sensor to the standby state')
status, _ = self._send_req('QT')
if status != '00':
raise HokuyoStatusException(status)
def sleep(self):
self._logger.info('Switching sensor to the sleep state')
self._force_standby()
status, _ = self._send_req('%SL')
if status != '00':
raise HokuyoStatusException(status)
def _single_measurment(self, with_intensity, start, end, grouping):
start = self.amin if start is None else start
end = self.amax if end is None else end
params = '%0.4d%0.4d%0.2d' % (start, end, grouping)
cmd = 'GE' if with_intensity else 'GD'
status, data = self._send_req(cmd, params)
if status != '00':
raise HokuyoStatusException(status)
timestamp = self._convert2ts(data.pop(0))
scan = self._process_scan_data(data, with_intensity)
return timestamp, scan
def get_dist(self, start=None, end=None, grouping=0):
return self._single_measurment(False, start, end, grouping)
def get_intens(self, start=None, end=None, grouping=0):
return self._single_measurment(True, start, end, grouping)
def get_filtered_dist(self, start=None, end=None, grouping=0,
dmin=None, dmax=None):
ts, scan = self.get_dist(start, end, grouping)
return ts, self._filter(scan, start, end, grouping, dmin, dmax)
def get_filtered_intens(self, start=None, end=None, grouping=0,
dmin=None, dmax=None, imin=None, imax=None):
ts, scan = self.get_intens(start, end, grouping)
return ts, self._filter(scan, start, end, grouping,
dmin, dmax, imin, imax)
def _iter_meas(self, with_intensity, scans, start, end, grouping, skips):
self._logger.info('Initializing continous measurment')
start = self.amin if start is None else start
end = self.amax if end is None else end
params = '%0.4d%0.4d%0.2d%0.1d%0.2d' % (start, end, grouping,
skips, scans)
cmd = 'ME' if with_intensity else 'MD'
status, _ = self._send_req(cmd, params)
if status != '00':
raise HokuyoStatusException(status)
self._logger.info('Starting scan response cycle')
while True:
data = self._recv()
self._logger.debug('Recieved data in the scan response cycle: %s' %
data)
header = data.pop(0)
req = cmd + params[:-2]
if not header.startswith(req):
raise HokuyoException('Header mismatch in the scan '
'response message')
pending = int(header[len(req):len(req) + 2])
status = self._check_sum(data.pop(0))
if status == '0M':
self._logger.warning('Unstable scanner condition')
continue
elif status != '99':
raise HokuyoStatusException(status)
timestamp = self._convert2ts(data.pop(0))
scan = self._process_scan_data(data, with_intensity)
self._logger.info('Got new scan, yielding...')
yield (scan, timestamp, pending)
if pending == 0 and scans != 0:
self._logger.info('Last scan recieved, exiting generator')
break
def iter_dist(self, scans=0, start=None, end=None, grouping=0, skips=0):
return self._iter_meas(False, scans, start, end, grouping, skips)
def iter_intens(self, scans=0, start=None, end=None, grouping=0, skips=0):
return self._iter_meas(True, scans, start, end, grouping, skips)
def iter_filtered_dist(self, scans=0, start=None, end=None, grouping=0,
skips=0, dmin=None, dmax=None):
gen = self.iter_dist(scans, start, end, grouping, skips)
for scan, timestamp, pending in gen:
scan = self._filter(scan, start, end, grouping, dmin, dmax)
yield (scan, timestamp, pending)
def iter_filtered_intens(self, scans=0, start=None, end=None, grouping=0,
skips=0, dmin=None, dmax=None,
imin=None, imax=None):
gen = self.iter_intens(scans, start, end, grouping, skips)
for scan, timestamp, pending in gen:
scan = self._filter(scan, start, end, grouping,
dmin, dmax, imin, imax)
yield (scan, timestamp, pending)
def _tsync_cmd(self, code):
status, data = self._send_req('TM', str(code))
if status not in tsync_statuses:
raise HokuyoStatusException(status)
if data:
return status, tsync_statuses[status], data[0]
else:
return status, tsync_statuses[status]
def tsync_enter(self):
self._logger.info('Entering time sync mode')
return self._tsync_cmd(0)
def tsync_get(self):
resp = self._tsync_cmd(1)
if resp[0] != '00':
raise HokuyoException(
'Failed to get sensor time: %s (%s)' %
(resp[1], resp[0]))
return self._convert2ts(resp[2], False)
def tsync_exit(self):
self._logger.info('Exiting time sync mode')
return self._tsync_cmd(2)
def time_sync(self, N=10, dt=0.1):
self._logger.info('Starting time synchronization.')
self._force_standby()
code, description = self.tsync_enter()
if code != '00':
self._logger.info(
'Failed to enter time sync mode: %s (%s)' %
(description, code))
self._logger.info('Collecting timestamps...')
tzero_list = []
for _ in range(N):
tzero_list.append(time.time()*1000 - self.tsync_get())
time.sleep(dt)
self.tzero = int(np.mean(np.rint(np.array(tzero_list))))
self._tn = 0
self._logger.info('Time sync done, t0: %d ms' % self.tzero)
code, description = self.tsync_exit()
if code != '00':
self._logger.info(
'Failed to exit time sync mode: %s (%s)' %
(description, code))
def _process_info_line(self, line):
key, value = self._check_sum(line[:-2], line[-1:]).split(':')
return key, int(value) if value.isdigit() else value
|
MIT License
|
shaunduncan/nosqlite
|
nosqlite.py
|
_eq
|
python
|
def _eq(field, value, document):
try:
return document.get(field, None) == value
except TypeError:
return False
|
Returns True if the value of a document field is equal to a given value
|
https://github.com/shaunduncan/nosqlite/blob/3033c029b7c8290c66a8b36dc512e560505d4c85/nosqlite.py#L430-L437
|
import json
import re
import sqlite3
import sys
import warnings
from functools import partial
from itertools import starmap
try:
from itertools import ifilter as filter, imap as map
except ImportError:
pass
class MalformedQueryException(Exception):
pass
class Connection(object):
def __init__(self, *args, **kwargs):
self._collections = {}
self.connect(*args, **kwargs)
def connect(self, *args, **kwargs):
self.db = sqlite3.connect(*args, **kwargs)
self.db.isolation_level = None
def close(self):
if self.db is not None:
self.db.close()
def __getitem__(self, name):
if name not in self._collections:
self._collections[name] = Collection(self.db, name)
return self._collections[name]
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return self[name]
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_traceback):
self.close()
return False
def drop_collection(self, name):
self.db.execute("drop table if exists %s" % name)
class Collection(object):
def __init__(self, db, name, create=True):
self.db = db
self.name = name
if create:
self.create()
def clear(self):
self.db.execute("delete from %s" % self.name)
def exists(self):
return self._object_exists('table', self.name)
def _object_exists(self, type, name):
row = self.db.execute(
"select count(1) from sqlite_master where type = ? and name = ?",
(type, name.strip('[]'))
).fetchone()
return int(row[0]) > 0
def create(self):
self.db.execute("""
create table if not exists %s (
id integer primary key autoincrement,
data text not null
)
""" % self.name)
def insert(self, document):
if '_id' in document:
return self.update(document)
cursor = self.db.execute("""
insert into %s(data) values (?)
""" % self.name, (json.dumps(document),))
document['_id'] = cursor.lastrowid
return document
def update(self, document):
if '_id' not in document:
return self.insert(document)
copy = document.copy()
del copy['_id']
self.db.execute("""
update %s set data = ? where id = ?
""" % self.name, (json.dumps(copy), document['_id']))
return document
def remove(self, document):
assert '_id' in document, 'Document must have an id'
self.db.execute("delete from %s where id = ?" % self.name, (document['_id'],))
def save(self, document):
return self.update(document)
def delete(self, document):
return self.remove(document)
def _load(self, id, data):
if isinstance(data, bytes):
data = data.decode('utf-8')
document = json.loads(data)
document['_id'] = id
return document
def find(self, query=None, limit=None):
results = []
query = query or {}
cursor = self.db.execute("select id, data from %s" % self.name)
apply = partial(self._apply_query, query)
for match in filter(apply, starmap(self._load, cursor.fetchall())):
results.append(match)
if limit and len(results) == limit:
return results
return results
def _apply_query(self, query, document):
matches = []
reapply = lambda q: self._apply_query(q, document)
for field, value in query.items():
if field == '$and':
matches.append(all(map(reapply, value)))
elif field == '$or':
matches.append(any(map(reapply, value)))
elif field == '$nor':
matches.append(not any(map(reapply, value)))
elif field == '$not':
matches.append(not self._apply_query(value, document))
elif isinstance(value, dict):
for operator, arg in value.items():
if not self._get_operator_fn(operator)(field, arg, document):
matches.append(False)
break
else:
matches.append(True)
elif value != document.get(field, None):
if '.' in field:
nodes = field.split('.')
document_section = document
try:
for path in nodes[:-1]:
document_section = document_section.get(path, None)
except AttributeError:
document_section = None
if document_section is None:
matches.append(False)
else:
if value != document_section.get(nodes[-1], None):
matches.append(False)
else:
matches.append(False)
return all(matches)
def _get_operator_fn(self, op):
if not op.startswith('$'):
raise MalformedQueryException("Operator '%s' is not a valid query operation" % op)
try:
return getattr(sys.modules[__name__], op.replace('$', '_'))
except AttributeError:
raise MalformedQueryException("Operator '%s' is not currently implemented" % op)
def find_one(self, query=None):
try:
return self.find(query=query, limit=1)[0]
except (sqlite3.OperationalError, IndexError):
return None
def find_and_modify(self, query=None, update=None):
update = update or {}
for document in self.find(query=query):
document.update(update)
self.update(document)
def count(self, query=None):
return len(self.find(query=query))
def rename(self, new_name):
new_collection = Collection(self.db, new_name, create=False)
assert not new_collection.exists()
self.db.execute("alter table %s rename to %s" % (self.name, new_name))
self.name = new_name
def distinct(self, key):
return set(d[key] for d in filter(lambda d: key in d, self.find()))
def create_index(self, key, reindex=True, sparse=False):
warnings.warn('Index support is currently very alpha and is not guaranteed')
if isinstance(key, (list, tuple)):
index_name = ','.join(key)
index_columns = ', '.join('%s text' % f for f in key)
else:
index_name = key
index_columns = '%s text' % key
table_name = '[%s{%s}]' % (self.name, index_name)
reindex = reindex or not self._object_exists('table', table_name)
self.db.execute("""
create table if not exists {table} (
id integer primary key,
{columns},
foreign key(id) references {collection}(id) on delete cascade on update cascade
)
""".format(
table=table_name,
collection=self.name,
columns=index_columns
))
self.db.execute("""
create index if not exists [idx.{collection}{{index}}] on {table}({index})
""".format(
collection=self.name,
index=index_name,
table=table_name,
))
if reindex:
self.reindex(key)
def ensure_index(self, key, sparse=False):
self.create_index(key, reindex=False, sparse=False)
def reindex(self, table, sparse=False):
warnings.warn('Index support is currently very alpha and is not guaranteed')
index = re.findall(r'^\[.*\{(.*)\}\]$', table)[0].split(',')
update = "update {table} set {key} = ? where id = ?"
insert = "insert into {table}({index}) values({q})"
count = "select count(1) from {table} where id = ?"
qs = ('?,' * len(index)).rstrip(',')
for document in self.find():
row = self.db.execute(count.format(table=table), (document['_id'],)).fetchone()
if int(row[0]) == 0:
self.db.execute(insert.format(table=table, index=index, q=qs),
[None for x in index])
for key in index:
if key not in document and sparse:
continue
self.db.execute(update.format(table=table, key=key),
(document.get(key, None), document['_id']))
def drop_index(self):
warnings.warn('Index support is currently very alpha and is not guaranteed')
pass
def drop_indexes(self):
warnings.warn('Index support is currently very alpha and is not guaranteed')
pass
|
MIT License
|
thalesgroup/pycryptoki
|
pycryptoki/ca_extensions/hsm_info.py
|
ca_retrieve_license_list
|
python
|
def ca_retrieve_license_list(slot):
license_len = c_ulong()
ret = CA_RetrieveLicenseList(slot, byref(license_len), None)
if ret == CKR_OK:
licenses = (c_ulong * license_len.value)()
ret = CA_RetrieveLicenseList(slot, license_len, cast(licenses, POINTER(c_ulong)))
LOG.info("Getting license id. slot=%s", slot)
if ret != CKR_OK:
return ret, []
else:
return ret, []
return ret, [(licenses[x], licenses[x + 1]) for x in range(0, license_len.value, 2)]
|
Gets the license info for a given slot id
:param int slot_id: Slot index to get the license id's
:returns: (A python list representing the license id's)
:rtype: list
|
https://github.com/thalesgroup/pycryptoki/blob/b1c97389b9db11c8bd96722db5347cc54a051602/pycryptoki/ca_extensions/hsm_info.py#L20-L38
|
import logging
from ctypes import c_ulong, byref, cast, POINTER
from pycryptoki.cryptoki import (
CK_ULONG,
CA_GetNumberOfAllowedContainers,
CA_RetrieveLicenseList,
CA_GetHSMStorageInformation,
CA_GetTSV,
CA_GetCVFirmwareVersion,
)
from pycryptoki.exceptions import make_error_handle_function
from pycryptoki.defines import CKR_OK
LOG = logging.getLogger(__name__)
|
Apache License 2.0
|
ibm/low-resource-text-classification-framework
|
lrtc_lib/oracle_data_access/oracle_data_access_api.py
|
get_gold_labels
|
python
|
def get_gold_labels(dataset_name: str, text_element_uris: Sequence[str], category_name: str = None) -> List[Tuple[str, Mapping[str, Label]]]:
gold_labels = oracle_utils.get_gold_labels(dataset_name, category_name)
return [(uri, gold_labels[uri]) for uri in text_element_uris if gold_labels[uri]]
|
Return the gold labels information for the given TextElements uris, keeping the same order, for the given dataset.
If no gold labels information was added for this dataset, an empty dict is returned.
:param dataset_name: the name of the dataset from which the gold labels should be retrieved
:param text_element_uris:
:param category_name: the name of the category for which label information is needed. Default is None, meaning all
categories.
:return: a list of tuples of TextElement uri and a dictionary of categories to Labels. The order of tuples is the
same order as the order of the TextElement uris given as input.
|
https://github.com/ibm/low-resource-text-classification-framework/blob/4380755a65b35265e84ecbf4b87e872d79e8f079/lrtc_lib/oracle_data_access/oracle_data_access_api.py#L37-L52
|
import dataclasses
import os
import ujson as json
from typing import Sequence, List, Mapping, Tuple, Set
import lrtc_lib.oracle_data_access.core.utils as oracle_utils
from lrtc_lib.data_access.core.data_structs import Label
from lrtc_lib.orchestrator.orchestrator_api import LABEL_POSITIVE, LABEL_NEGATIVE
def add_gold_labels(dataset_name: str, text_and_gold_labels: List[Tuple[str, Mapping[str, Label]]]):
oracle_utils.gold_labels_per_dataset = (dataset_name, dict(text_and_gold_labels))
simplified_labels = {k: {str(category): label.to_dict() for category, label in v.items()}
for k, v in oracle_utils.gold_labels_per_dataset[1].items()}
gold_labels_encoded = json.dumps(simplified_labels)
os.makedirs(oracle_utils.get_gold_labels_dump_dir(), exist_ok=True)
with open(oracle_utils.get_labels_dump_filename(dataset_name), 'w') as f:
f.write(gold_labels_encoded)
|
Apache License 2.0
|
lscsoft/bilby
|
bilby/core/result.py
|
Result.nested_samples
|
python
|
def nested_samples(self):
if self._nested_samples is not None:
return self._nested_samples
else:
raise ValueError("Result object has no stored nested samples")
|
An array of unweighted samples
|
https://github.com/lscsoft/bilby/blob/b1e02f1dfae03d4939cae9c95eff300c22919689/bilby/core/result.py#L628-L633
|
import inspect
import json
import os
from collections import OrderedDict, namedtuple
from copy import copy
from importlib import import_module
from itertools import product
import numpy as np
import pandas as pd
import scipy.stats
from . import utils
from .utils import (
logger, infer_parameters_from_function,
check_directory_exists_and_if_not_mkdir,
latex_plot_format, safe_save_figure,
BilbyJsonEncoder, load_json,
move_old_file, get_version_information,
decode_bilby_json, docstring,
recursively_save_dict_contents_to_group,
recursively_load_dict_contents_from_group,
recursively_decode_bilby_json,
)
from .prior import Prior, PriorDict, DeltaFunction, ConditionalDeltaFunction
def result_file_name(outdir, label, extension='json', gzip=False):
if extension == 'pickle':
extension = 'pkl'
if extension in ['json', 'hdf5', 'pkl']:
if extension == 'json' and gzip:
return os.path.join(outdir, '{}_result.{}.gz'.format(label, extension))
else:
return os.path.join(outdir, '{}_result.{}'.format(label, extension))
else:
raise ValueError("Extension type {} not understood".format(extension))
def _determine_file_name(filename, outdir, label, extension, gzip):
if filename is not None:
return filename
else:
if (outdir is None) and (label is None):
raise ValueError("No information given to load file")
else:
return result_file_name(outdir, label, extension, gzip)
def read_in_result(filename=None, outdir=None, label=None, extension='json', gzip=False):
filename = _determine_file_name(filename, outdir, label, extension, gzip)
extension = os.path.splitext(filename)[1].lstrip('.')
if extension == 'gz':
extension = os.path.splitext(os.path.splitext(filename)[0])[1].lstrip('.')
if 'json' in extension:
result = Result.from_json(filename=filename)
elif ('hdf5' in extension) or ('h5' in extension):
result = Result.from_hdf5(filename=filename)
elif ("pkl" in extension) or ("pickle" in extension):
result = Result.from_pickle(filename=filename)
elif extension is None:
raise ValueError("No filetype extension provided")
else:
raise ValueError("Filetype {} not understood".format(extension))
return result
def get_weights_for_reweighting(
result, new_likelihood=None, new_prior=None, old_likelihood=None,
old_prior=None, resume_file=None, n_checkpoint=5000):
from tqdm.auto import tqdm
nposterior = len(result.posterior)
if (resume_file is not None) and os.path.exists(resume_file):
old_log_likelihood_array, old_log_prior_array, new_log_likelihood_array, new_log_prior_array = np.genfromtxt(resume_file)
starting_index = np.argmin(np.abs(old_log_likelihood_array))
logger.info(f'Checkpoint resuming from {starting_index}.')
else:
old_log_likelihood_array = np.zeros(nposterior)
old_log_prior_array = np.zeros(nposterior)
new_log_likelihood_array = np.zeros(nposterior)
new_log_prior_array = np.zeros(nposterior)
starting_index = 0
for ii, sample in tqdm(result.posterior.iloc[starting_index:].iterrows()):
par_sample = {key: sample[key] for key in result.posterior}
if old_likelihood is not None:
old_likelihood.parameters.update(par_sample)
old_log_likelihood_array[ii] = old_likelihood.log_likelihood()
else:
old_log_likelihood_array[ii] = sample["log_likelihood"]
if new_likelihood is not None:
new_likelihood.parameters.update(par_sample)
new_log_likelihood_array[ii] = new_likelihood.log_likelihood()
else:
new_log_likelihood_array[ii] = old_log_likelihood_array[ii]
if old_prior is not None:
old_log_prior_array[ii] = old_prior.ln_prob(par_sample)
else:
old_log_prior_array[ii] = sample["log_prior"]
if new_prior is not None:
new_log_prior_array[ii] = new_prior.ln_prob(par_sample)
else:
new_log_prior_array[ii] = old_log_prior_array[ii]
if (ii % (n_checkpoint) == 0) and (resume_file is not None):
checkpointed_index = np.argmin(np.abs(old_log_likelihood_array))
logger.info(f'Checkpointing with {checkpointed_index} samples')
np.savetxt(
resume_file,
[old_log_likelihood_array, old_log_prior_array, new_log_likelihood_array, new_log_prior_array])
ln_weights = (
new_log_likelihood_array + new_log_prior_array - old_log_likelihood_array - old_log_prior_array)
return ln_weights, new_log_likelihood_array, new_log_prior_array, old_log_likelihood_array, old_log_prior_array
def rejection_sample(posterior, weights):
keep = weights > np.random.uniform(0, max(weights), weights.shape)
return posterior[keep]
def reweight(result, label=None, new_likelihood=None, new_prior=None,
old_likelihood=None, old_prior=None, conversion_function=None, npool=1,
verbose_output=False, resume_file=None, n_checkpoint=5000,
use_nested_samples=False):
from scipy.special import logsumexp
result = copy(result)
if use_nested_samples:
result.posterior = result.nested_samples
nposterior = len(result.posterior)
logger.info("Reweighting posterior with {} samples".format(nposterior))
ln_weights, new_log_likelihood_array, new_log_prior_array, old_log_likelihood_array, old_log_prior_array = get_weights_for_reweighting(
result, new_likelihood=new_likelihood, new_prior=new_prior,
old_likelihood=old_likelihood, old_prior=old_prior,
resume_file=resume_file, n_checkpoint=n_checkpoint)
weights = np.exp(ln_weights)
if use_nested_samples:
weights *= result.posterior['weights']
result.posterior["log_likelihood"] = new_log_likelihood_array
result.posterior["log_prior"] = new_log_prior_array
result.posterior = rejection_sample(result.posterior, weights=weights)
result.posterior = result.posterior.reset_index(drop=True)
logger.info("Rejection sampling resulted in {} samples".format(len(result.posterior)))
result.meta_data["reweighted_using_rejection_sampling"] = True
if use_nested_samples:
result.log_evidence += np.log(np.sum(weights))
else:
result.log_evidence += logsumexp(ln_weights) - np.log(nposterior)
if new_prior is not None:
for key, prior in new_prior.items():
result.priors[key] = prior
if conversion_function is not None:
data_frame = result.posterior
if "npool" in inspect.getargspec(conversion_function).args:
data_frame = conversion_function(data_frame, new_likelihood, new_prior, npool=npool)
else:
data_frame = conversion_function(data_frame, new_likelihood, new_prior)
result.posterior = data_frame
if label:
result.label = label
else:
result.label += "_reweighted"
if verbose_output:
return result, weights, new_log_likelihood_array, new_log_prior_array, old_log_likelihood_array, old_log_prior_array
else:
return result
class Result(object):
def __init__(self, label='no_label', outdir='.', sampler=None,
search_parameter_keys=None, fixed_parameter_keys=None,
constraint_parameter_keys=None, priors=None,
sampler_kwargs=None, injection_parameters=None,
meta_data=None, posterior=None, samples=None,
nested_samples=None, log_evidence=np.nan,
log_evidence_err=np.nan, information_gain=np.nan,
log_noise_evidence=np.nan, log_bayes_factor=np.nan,
log_likelihood_evaluations=None,
log_prior_evaluations=None, sampling_time=None, nburn=None,
num_likelihood_evaluations=None, walkers=None,
max_autocorrelation_time=None, use_ratio=None,
parameter_labels=None, parameter_labels_with_unit=None,
version=None):
self.label = label
self.outdir = os.path.abspath(outdir)
self.sampler = sampler
self.search_parameter_keys = search_parameter_keys
self.fixed_parameter_keys = fixed_parameter_keys
self.constraint_parameter_keys = constraint_parameter_keys
self.parameter_labels = parameter_labels
self.parameter_labels_with_unit = parameter_labels_with_unit
self.priors = priors
self.sampler_kwargs = sampler_kwargs
self.meta_data = meta_data
self.injection_parameters = injection_parameters
self.posterior = posterior
self.samples = samples
self.nested_samples = nested_samples
self.walkers = walkers
self.nburn = nburn
self.use_ratio = use_ratio
self.log_evidence = log_evidence
self.log_evidence_err = log_evidence_err
self.information_gain = information_gain
self.log_noise_evidence = log_noise_evidence
self.log_bayes_factor = log_bayes_factor
self.log_likelihood_evaluations = log_likelihood_evaluations
self.log_prior_evaluations = log_prior_evaluations
self.num_likelihood_evaluations = num_likelihood_evaluations
self.sampling_time = sampling_time
self.version = version
self.max_autocorrelation_time = max_autocorrelation_time
self.prior_values = None
self._kde = None
@classmethod
def _from_hdf5_old(cls, filename=None, outdir=None, label=None):
import deepdish
filename = _determine_file_name(filename, outdir, label, 'hdf5', False)
if os.path.isfile(filename):
dictionary = deepdish.io.load(filename)
if len(dictionary) == 1 and 'data' in dictionary:
dictionary = dictionary['data']
if "priors" in dictionary:
if not isinstance(dictionary["priors"], PriorDict):
try:
priordict = PriorDict()
for key, value in dictionary["priors"].items():
if key not in ["__module__", "__name__", "__prior_dict__"]:
try:
priordict[key] = decode_bilby_json(value)
except AttributeError:
continue
dictionary["priors"] = priordict
except Exception as e:
raise IOError(
"Unable to parse priors from '{}':\n{}".format(
filename, e,
)
)
try:
if isinstance(dictionary.get('posterior', None), dict):
dictionary['posterior'] = pd.DataFrame(dictionary['posterior'])
return cls(**dictionary)
except TypeError as e:
raise IOError("Unable to load dictionary, error={}".format(e))
else:
raise IOError("No result '{}' found".format(filename))
_load_doctstring = """ Read in a saved .{format} data file
Parameters
==========
filename: str
If given, try to load from this filename
outdir, label: str
If given, use the default naming convention for saved results file
Returns
=======
result: bilby.core.result.Result
Raises
=======
ValueError: If no filename is given and either outdir or label is None
If no bilby.core.result.Result is found in the path
"""
@staticmethod
@docstring(_load_doctstring.format(format="pickle"))
def from_pickle(filename=None, outdir=None, label=None):
filename = _determine_file_name(filename, outdir, label, 'hdf5', False)
import dill
with open(filename, "rb") as ff:
return dill.load(ff)
@classmethod
@docstring(_load_doctstring.format(format="hdf5"))
def from_hdf5(cls, filename=None, outdir=None, label=None):
import h5py
filename = _determine_file_name(filename, outdir, label, 'hdf5', False)
with h5py.File(filename, "r") as ff:
data = recursively_load_dict_contents_from_group(ff, '/')
if list(data.keys()) == ["data"]:
return cls._from_hdf5_old(filename=filename)
data["posterior"] = pd.DataFrame(data["posterior"])
data["priors"] = PriorDict._get_from_json_dict(
json.loads(data["priors"], object_hook=decode_bilby_json)
)
try:
cls = getattr(import_module(data['__module__']), data['__name__'])
except ImportError:
logger.debug(
"Module {}.{} not found".format(data["__module__"], data["__name__"])
)
except KeyError:
logger.debug("No class specified, using base Result.")
for key in ["__module__", "__name__"]:
if key in data:
del data[key]
return cls(**data)
@classmethod
@docstring(_load_doctstring.format(format="json"))
def from_json(cls, filename=None, outdir=None, label=None, gzip=False):
filename = _determine_file_name(filename, outdir, label, 'json', gzip)
if os.path.isfile(filename):
dictionary = load_json(filename, gzip)
try:
return cls(**dictionary)
except TypeError as e:
raise IOError("Unable to load dictionary, error={}".format(e))
else:
raise IOError("No result '{}' found".format(filename))
def __str__(self):
if getattr(self, 'posterior', None) is not None:
if getattr(self, 'log_noise_evidence', None) is not None:
return ("nsamples: {:d}\n"
"ln_noise_evidence: {:6.3f}\n"
"ln_evidence: {:6.3f} +/- {:6.3f}\n"
"ln_bayes_factor: {:6.3f} +/- {:6.3f}\n"
.format(len(self.posterior), self.log_noise_evidence, self.log_evidence,
self.log_evidence_err, self.log_bayes_factor,
self.log_evidence_err))
else:
return ("nsamples: {:d}\n"
"ln_evidence: {:6.3f} +/- {:6.3f}\n"
.format(len(self.posterior), self.log_evidence, self.log_evidence_err))
else:
return ''
@property
def meta_data(self):
return self._meta_data
@meta_data.setter
def meta_data(self, meta_data):
if meta_data is None:
meta_data = dict()
meta_data = recursively_decode_bilby_json(meta_data)
self._meta_data = meta_data
@property
def priors(self):
if self._priors is not None:
return self._priors
else:
raise ValueError('Result object has no priors')
@priors.setter
def priors(self, priors):
if isinstance(priors, dict):
if isinstance(priors, PriorDict):
self._priors = priors
else:
self._priors = PriorDict(priors)
if self.parameter_labels is None:
self.parameter_labels = [self.priors[k].latex_label for k in
self.search_parameter_keys]
if self.parameter_labels_with_unit is None:
self.parameter_labels_with_unit = [
self.priors[k].latex_label_with_unit for k in
self.search_parameter_keys]
elif priors is None:
self._priors = priors
self.parameter_labels = self.search_parameter_keys
self.parameter_labels_with_unit = self.search_parameter_keys
else:
raise ValueError("Input priors not understood")
@property
def samples(self):
if self._samples is not None:
return self._samples
else:
raise ValueError("Result object has no stored samples")
@samples.setter
def samples(self, samples):
self._samples = samples
@property
def num_likelihood_evaluations(self):
if self._num_likelihood_evaluations is not None:
return self._num_likelihood_evaluations
else:
raise ValueError("Result object has no stored likelihood evaluations")
@num_likelihood_evaluations.setter
def num_likelihood_evaluations(self, num_likelihood_evaluations):
self._num_likelihood_evaluations = num_likelihood_evaluations
@property
|
MIT License
|
rcscott/ynab-python
|
ynab/models/month_summaries_wrapper.py
|
MonthSummariesWrapper.__eq__
|
python
|
def __eq__(self, other):
if not isinstance(other, MonthSummariesWrapper):
return False
return self.__dict__ == other.__dict__
|
Returns true if both objects are equal
|
https://github.com/rcscott/ynab-python/blob/a4ef77dd9680f16fcb6e0983771c47dcfb28239c/ynab/models/month_summaries_wrapper.py#L106-L111
|
import pprint
import re
import six
from ynab.models.month_summary import MonthSummary
class MonthSummariesWrapper(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'months': 'list[MonthSummary]'
}
attribute_map = {
'months': 'months'
}
def __init__(self, months=None):
self._months = None
self.discriminator = None
self.months = months
@property
def months(self):
return self._months
@months.setter
def months(self, months):
if months is None:
raise ValueError("Invalid value for `months`, must not be `None`")
self._months = months
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
|
Apache License 2.0
|
dvska/gdata-python3
|
src/gdata/apps/organization/data.py
|
OrgUnitEntry.SetOrgUnitBlockInheritance
|
python
|
def SetOrgUnitBlockInheritance(self, value):
self._SetProperty(ORG_UNIT_BLOCK_INHERITANCE, value)
|
Set the block_inheritance flag of the OrganizationUnit object.
Args:
value: [string] The new block_inheritance flag to give this object.
|
https://github.com/dvska/gdata-python3/blob/a34c35901473e4ba7223ea4607136141301fbe88/src/gdata/apps/organization/data.py#L200-L206
|
import gdata.data
pyproperty = property
ORG_UNIT_NAME = 'name'
ORG_UNIT_PATH = 'orgUnitPath'
PARENT_ORG_UNIT_PATH = 'parentOrgUnitPath'
ORG_UNIT_DESCRIPTION = 'description'
ORG_UNIT_BLOCK_INHERITANCE = 'blockInheritance'
USER_EMAIL = 'orgUserEmail'
USERS_TO_MOVE = 'usersToMove'
MOVED_USERS = 'usersMoved'
CUSTOMER_ID = 'customerId'
CUSTOMER_ORG_UNIT_NAME = 'customerOrgUnitName'
CUSTOMER_ORG_UNIT_DESCRIPTION = 'customerOrgUnitDescription'
OLD_ORG_UNIT_PATH = 'oldOrgUnitPath'
class CustomerIdEntry(gdata.apps.apps_property_entry.AppsPropertyEntry):
def GetCustomerId(self):
return self._GetProperty(CUSTOMER_ID)
customer_id = pyproperty(GetCustomerId)
def GetOrgUnitName(self):
return self._GetProperty(ORG_UNIT_NAME)
org_unit_name = pyproperty(GetOrgUnitName)
def GetCustomerOrgUnitName(self):
return self._GetProperty(CUSTOMER_ORG_UNIT_NAME)
customer_org_unit_name = pyproperty(GetCustomerOrgUnitName)
def GetOrgUnitDescription(self):
return self._GetProperty(ORG_UNIT_DESCRIPTION)
org_unit_description = pyproperty(GetOrgUnitDescription)
def GetCustomerOrgUnitDescription(self):
return self._GetProperty(CUSTOMER_ORG_UNIT_DESCRIPTION)
customer_org_unit_description = pyproperty(GetCustomerOrgUnitDescription)
class OrgUnitEntry(gdata.apps.apps_property_entry.AppsPropertyEntry):
def GetOrgUnitName(self):
return self._GetProperty(ORG_UNIT_NAME)
def SetOrgUnitName(self, value):
self._SetProperty(ORG_UNIT_NAME, value)
org_unit_name = pyproperty(GetOrgUnitName, SetOrgUnitName)
def GetOrgUnitPath(self):
return self._GetProperty(ORG_UNIT_PATH)
def SetOrgUnitPath(self, value):
self._SetProperty(ORG_UNIT_PATH, value)
org_unit_path = pyproperty(GetOrgUnitPath, SetOrgUnitPath)
def GetParentOrgUnitPath(self):
return self._GetProperty(PARENT_ORG_UNIT_PATH)
def SetParentOrgUnitPath(self, value):
self._SetProperty(PARENT_ORG_UNIT_PATH, value)
parent_org_unit_path = pyproperty(GetParentOrgUnitPath, SetParentOrgUnitPath)
def GetOrgUnitDescription(self):
return self._GetProperty(ORG_UNIT_DESCRIPTION)
def SetOrgUnitDescription(self, value):
self._SetProperty(ORG_UNIT_DESCRIPTION, value)
org_unit_description = pyproperty(GetOrgUnitDescription,
SetOrgUnitDescription)
def GetOrgUnitBlockInheritance(self):
return self._GetProperty(ORG_UNIT_BLOCK_INHERITANCE)
|
Apache License 2.0
|
blacklight/platypush
|
platypush/plugins/camera/__init__.py
|
CameraPlugin.wait_capture
|
python
|
def wait_capture(self, camera: Camera) -> None:
if camera.capture_thread and camera.capture_thread.is_alive() and threading.get_ident() != camera.capture_thread.ident:
try:
camera.capture_thread.join(timeout=camera.info.capture_timeout)
except Exception as e:
self.logger.warning('Error on FFmpeg capture wait: {}'.format(str(e)))
|
Wait until a capture session terminates.
:param camera: Camera object. ``camera.info.capture_timeout`` is used as a capture thread termination timeout
if set.
|
https://github.com/blacklight/platypush/blob/a5f1dc2638d7c6308325e0ca39dc7d5e262836aa/platypush/plugins/camera/__init__.py#L201-L213
|
import io
import os
import pathlib
import socket
import threading
import time
from abc import ABC, abstractmethod
from contextlib import contextmanager
from datetime import datetime
from multiprocessing import Process
from queue import Queue
from typing import Optional, Union, Dict, Tuple, IO
from platypush.config import Config
from platypush.message.event.camera import CameraRecordingStartedEvent, CameraPictureTakenEvent, CameraRecordingStoppedEvent, CameraVideoRenderedEvent
from platypush.plugins import Plugin, action
from platypush.plugins.camera.model.camera import CameraInfo, Camera
from platypush.plugins.camera.model.exceptions import CameraException, CaptureAlreadyRunningException
from platypush.plugins.camera.model.writer import VideoWriter, StreamWriter
from platypush.plugins.camera.model.writer.ffmpeg import FFmpegFileWriter
from platypush.plugins.camera.model.writer.preview import PreviewWriter, PreviewWriterFactory
from platypush.utils import get_plugin_name_by_class
__all__ = ['Camera', 'CameraInfo', 'CameraException', 'CameraPlugin', 'CaptureAlreadyRunningException',
'StreamWriter']
class CameraPlugin(Plugin, ABC):
_camera_class = Camera
_camera_info_class = CameraInfo
_video_writer_class = FFmpegFileWriter
def __init__(self, device: Optional[Union[int, str]] = None, resolution: Tuple[int, int] = (640, 480),
frames_dir: Optional[str] = None, warmup_frames: int = 5, warmup_seconds: Optional[float] = 0.,
capture_timeout: Optional[float] = 20.0, scale_x: Optional[float] = None,
scale_y: Optional[float] = None, rotate: Optional[float] = None, grayscale: Optional[bool] = None,
color_transform: Optional[Union[int, str]] = None, fps: float = 16, horizontal_flip: bool = False,
vertical_flip: bool = False, input_format: Optional[str] = None, output_format: Optional[str] = None,
stream_format: str = 'mjpeg', listen_port: Optional[int] = 5000, bind_address: str = '0.0.0.0',
ffmpeg_bin: str = 'ffmpeg', input_codec: Optional[str] = None, output_codec: Optional[str] = None,
**kwargs):
super().__init__(**kwargs)
self.workdir = os.path.join(Config.get('workdir'), get_plugin_name_by_class(self))
pathlib.Path(self.workdir).mkdir(mode=0o755, exist_ok=True, parents=True)
self.camera_info = self._camera_info_class(device, color_transform=color_transform, warmup_frames=warmup_frames,
warmup_seconds=warmup_seconds, rotate=rotate, scale_x=scale_x,
scale_y=scale_y, capture_timeout=capture_timeout, fps=fps,
input_format=input_format, output_format=output_format,
stream_format=stream_format, resolution=resolution,
grayscale=grayscale, listen_port=listen_port,
horizontal_flip=horizontal_flip, vertical_flip=vertical_flip,
ffmpeg_bin=ffmpeg_bin, input_codec=input_codec,
output_codec=output_codec, bind_address=bind_address,
frames_dir=os.path.abspath(
os.path.expanduser(frames_dir or
os.path.join(self.workdir, 'frames'))))
self._devices: Dict[Union[int, str], Camera] = {}
self._streams: Dict[Union[int, str], Camera] = {}
def _merge_info(self, **info) -> CameraInfo:
merged_info = self.camera_info.clone()
merged_info.set(**info)
return merged_info
def open_device(self, device: Optional[Union[int, str]] = None, stream: bool = False, **params) -> Camera:
info = self._merge_info(**params)
if device is None:
device = info.device
elif device not in self._devices:
info.device = device
else:
info = self._devices[device].info.clone()
assert device is not None, 'No device specified/configured'
if device in self._devices:
camera = self._devices[device]
if camera.capture_thread and camera.capture_thread.is_alive() and camera.start_event.is_set():
raise CaptureAlreadyRunningException(device)
camera.start_event.clear()
camera.capture_thread = None
else:
camera = self._camera_class(info=info)
camera.info.set(**params)
camera.object = self.prepare_device(camera)
if stream:
writer_class = StreamWriter.get_class_by_name(camera.info.stream_format)
camera.stream = writer_class(camera=camera, plugin=self)
if camera.info.frames_dir:
pathlib.Path(os.path.abspath(os.path.expanduser(camera.info.frames_dir))).mkdir(
mode=0o755, exist_ok=True, parents=True)
self._devices[device] = camera
return camera
def close_device(self, camera: Camera, wait_capture: bool = True) -> None:
name = camera.info.device
self.stop_preview(camera)
self.release_device(camera)
camera.start_event.clear()
if wait_capture:
self.wait_capture(camera)
if name in self._devices:
del self._devices[name]
|
MIT License
|
schettino72/pyregurgitator
|
pyreg/astview.py
|
AstNode.tree
|
python
|
def tree(cls, stream, filename):
ct = ast.parse(stream.read(), filename)
stream.seek(0)
lines = stream.readlines()
cls.line_list = lines
return cls(ct, '', [l for l in lines], None)
|
build whole AST from a module
|
https://github.com/schettino72/pyregurgitator/blob/988f507d5e2a02c38f98673fa8c57245bdae3da4/pyreg/astview.py#L106-L114
|
import platform
import os
import ast
import json
import argparse
from pkg_resources import resource_filename
import jinja2
class AstField(object):
class TypeField(AstField):
def __init__(self, value, path, lines):
self.value = value
self.path = path
def to_text(self):
return repr(self.value)
def to_map(self):
return ["%s => %s" % (self.path, repr(self.value))]
def to_html(self):
if isinstance(self.value, str):
str_value = repr(self.value.replace('\n', '\n<br/>'))
else:
str_value = repr(self.value)
return '<span class="final">%s</span>' % str_value
class NodeField(AstField):
def __init__(self, value, path, lines, parent):
self.value = parent.__class__(value, path, lines, parent)
self.path = path
def to_text(self):
return self.value.to_text()
def to_map(self):
ll = ["%s (%s)" % (self.path, self.value.node.__class__.__name__)]
ll.extend(self.value.to_map())
return ll
def to_html(self):
return self.value.to_html()
class ListField(AstField):
def __init__(self, value, path, lines, parent):
self.value = []
for i,n in enumerate(value):
path = "%s[%d]" % (path,i)
if isinstance(n , ast.AST):
node = parent.__class__(n, path, lines, parent)
self.value.append(node)
else:
self.value.append(TypeField(n, path, lines))
self.path = path
def to_text(self):
return "[%s]" % ", ".join((n.to_text() for n in self.value))
def to_map(self):
ll = ["%s []" % self.path]
for n in self.value:
ll.append("%s (%s)" % (n.path, n.node.__class__.__name__))
ll.extend(n.to_map())
return ll
def to_html(self):
t_head = '<table class="field_list">'
row = "<tr><td>%s</td></tr>"
t_body = "".join(row % n.to_html() for n in self.value)
t_foot = '</table>'
return t_head + t_body + t_foot
class AstNode(object):
node_template = None
MAP = None
@classmethod
|
MIT License
|
claws/txpachube
|
txpachube/client.py
|
Client._handleRequestFailure
|
python
|
def _handleRequestFailure(self, failure, url=None):
if url:
logging.error('Error communicating with url %s\nError: %s\n' % (url, failure))
else:
logging.error('Error detected: %s' % (failure))
|
Callback to handle an error resulting from an attempt to communicate with pachube.
@param failure: A failure instance
@type failure: twisted.python.failure.Failure instance
@param url: The url used during the request
@type url: string
|
https://github.com/claws/txpachube/blob/d9ba7b7440408bc459de75270a60c8eb6e14cf5b/txpachube/client.py#L156-L168
|
import json
import logging
import txpachube
import urllib
import uuid
from twisted.internet import reactor, defer
from twisted.internet.protocol import Protocol, ReconnectingClientFactory
from twisted.web.client import Agent, ResponseDone
from twisted.web.http_headers import Headers
from twisted.web.iweb import IBodyProducer
from zope.interface import implements
class RequestBodyProducer(object):
implements(IBodyProducer)
def __init__(self, body=None):
if body is None:
self.body = ""
else:
self.body = body
self.length = len(body)
def startProducing(self, consumer):
consumer.write(self.body)
return defer.succeed(None)
def pauseProducing(self):
pass
def stopProducing(self):
pass
class ResponseBodyProtocol(Protocol):
def __init__(self, finished, response):
self.finished = finished
self.response = response
self.buffer = []
def dataReceived(self, bytes):
self.buffer.append(bytes)
def connectionLost(self, reason):
r = reason.trap(ResponseDone)
if r == ResponseDone:
logging.debug(reason.getErrorMessage())
responseData = "".join(self.buffer)
self.buffer = []
result = (self.response, responseData)
self.finished.callback(result)
else:
logging.error("Problem reading response body: %s" % reason.getErrorMessage())
class Client(object):
api_url = "api.pachube.com/v2"
def __init__(self, api_key=None, feed_id=None, use_http=False, timezone=None):
self.feed_id = feed_id
self.api_key = api_key
prefix = "https"
if use_http:
prefix = "http"
self.api_url = "%s://api.pachube.com/v2" % (prefix)
self.timezone = None
if timezone:
self.timezone = "timezone=%s" % timezone
self.agent = Agent(reactor)
self.headers = {'User-Agent': 'txpachube Client',
'Content-Type' : 'application/x-www-form-urlencoded'}
def _handleResponseHeader(self, response, url):
logging.debug("Success communicating with url: %s" % (url))
finished = defer.Deferred()
response.deliverBody(ResponseBodyProtocol(finished, response))
return finished
|
MIT License
|
jmchilton/galaxy-central
|
modules/docutils/statemachine.py
|
StateMachine.get_text_block
|
python
|
def get_text_block(self, flush_left=0):
try:
block = self.input_lines.get_text_block(self.line_offset,
flush_left)
self.next_line(len(block) - 1)
return block
except UnexpectedIndentationError, error:
block, source, lineno = error
self.next_line(len(block) - 1)
raise
|
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
|
https://github.com/jmchilton/galaxy-central/blob/31e2fd3a32b06ddfba06ae5b044efdce1d93f08c/modules/docutils/statemachine.py#L367-L383
|
__docformat__ = 'restructuredtext'
import sys
import re
from types import SliceType as _SliceType
class StateMachine:
def __init__(self, state_classes, initial_state, debug=0):
self.input_lines = None
self.input_offset = 0
self.line = None
self.line_offset = -1
self.debug = debug
self.initial_state = initial_state
self.current_state = initial_state
self.states = {}
self.add_states(state_classes)
self.observers = []
def unlink(self):
for state in self.states.values():
state.unlink()
self.states = None
def run(self, input_lines, input_offset=0, context=None,
input_source=None):
self.runtime_init()
if isinstance(input_lines, StringList):
self.input_lines = input_lines
else:
self.input_lines = StringList(input_lines, source=input_source)
self.input_offset = input_offset
self.line_offset = -1
self.current_state = self.initial_state
if self.debug:
print >>sys.stderr, (
'\nStateMachine.run: input_lines (line_offset=%s):\n| %s'
% (self.line_offset, '\n| '.join(self.input_lines)))
transitions = None
results = []
state = self.get_state()
try:
if self.debug:
print >>sys.stderr, ('\nStateMachine.run: bof transition')
context, result = state.bof(context)
results.extend(result)
while 1:
try:
try:
self.next_line()
if self.debug:
source, offset = self.input_lines.info(
self.line_offset)
print >>sys.stderr, (
'\nStateMachine.run: line (source=%r, '
'offset=%r):\n| %s'
% (source, offset, self.line))
context, next_state, result = self.check_line(
context, state, transitions)
except EOFError:
if self.debug:
print >>sys.stderr, (
'\nStateMachine.run: %s.eof transition'
% state.__class__.__name__)
result = state.eof(context)
results.extend(result)
break
else:
results.extend(result)
except TransitionCorrection, exception:
self.previous_line()
transitions = (exception.args[0],)
if self.debug:
print >>sys.stderr, (
'\nStateMachine.run: TransitionCorrection to '
'state "%s", transition %s.'
% (state.__class__.__name__, transitions[0]))
continue
except StateCorrection, exception:
self.previous_line()
next_state = exception.args[0]
if len(exception.args) == 1:
transitions = None
else:
transitions = (exception.args[1],)
if self.debug:
print >>sys.stderr, (
'\nStateMachine.run: StateCorrection to state '
'"%s", transition %s.'
% (next_state, transitions[0]))
else:
transitions = None
state = self.get_state(next_state)
except:
if self.debug:
self.error()
raise
self.observers = []
return results
def get_state(self, next_state=None):
if next_state:
if self.debug and next_state != self.current_state:
print >>sys.stderr, ('\nStateMachine.get_state: Changing state from '
'"%s" to "%s" (input line %s).'
% (self.current_state, next_state,
self.abs_line_number()))
self.current_state = next_state
try:
return self.states[self.current_state]
except KeyError:
raise UnknownStateError(self.current_state)
def next_line(self, n=1):
try:
try:
self.line_offset += n
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def is_next_line_blank(self):
try:
return not self.input_lines[self.line_offset + 1].strip()
except IndexError:
return 1
def at_eof(self):
return self.line_offset >= len(self.input_lines) - 1
def at_bof(self):
return self.line_offset <= 0
def previous_line(self, n=1):
self.line_offset -= n
if self.line_offset < 0:
self.line = None
else:
self.line = self.input_lines[self.line_offset]
self.notify_observers()
return self.line
def goto_line(self, line_offset):
try:
try:
self.line_offset = line_offset - self.input_offset
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def get_source(self, line_offset):
return self.input_lines.source(line_offset - self.input_offset)
def abs_line_offset(self):
return self.line_offset + self.input_offset
def abs_line_number(self):
return self.line_offset + self.input_offset + 1
def insert_input(self, input_lines, source):
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding')
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding')
self.input_lines.insert(self.line_offset + 2,
StringList(input_lines, source))
|
MIT License
|
osmr/imgclsmob
|
gluon/gluoncv2/models/resnext.py
|
resnext101_32x4d
|
python
|
def resnext101_32x4d(**kwargs):
return get_resnext(blocks=101, cardinality=32, bottleneck_width=4, model_name="resnext101_32x4d", **kwargs)
|
ResNeXt-101 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
|
https://github.com/osmr/imgclsmob/blob/ea5f784eea865ce830f3f97c5c1d1f6491d9cbb2/gluon/gluoncv2/models/resnext.py#L415-L429
|
__all__ = ['ResNeXt', 'resnext14_16x4d', 'resnext14_32x2d', 'resnext14_32x4d', 'resnext26_16x4d', 'resnext26_32x2d',
'resnext26_32x4d', 'resnext38_32x4d', 'resnext50_32x4d', 'resnext101_32x4d', 'resnext101_64x4d',
'ResNeXtBottleneck', 'ResNeXtUnit']
import os
import math
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block
from .resnet import ResInitBlock
class ResNeXtBottleneck(HybridBlock):
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
bn_use_global_stats,
bottleneck_factor=4,
**kwargs):
super(ResNeXtBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // bottleneck_factor
D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
group_width = cardinality * D
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=group_width,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=group_width,
out_channels=group_width,
strides=strides,
groups=cardinality,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv1x1_block(
in_channels=group_width,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class ResNeXtUnit(HybridBlock):
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
bn_use_global_stats,
**kwargs):
super(ResNeXtUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = ResNeXtBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class ResNeXt(HybridBlock):
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(ResNeXt, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ResNeXtUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_resnext(blocks,
cardinality,
bottleneck_width,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
if blocks == 14:
layers = [1, 1, 1, 1]
elif blocks == 26:
layers = [2, 2, 2, 2]
elif blocks == 38:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported ResNeXt with number of blocks: {}".format(blocks))
assert (sum(layers) * 3 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = ResNeXt(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def resnext14_16x4d(**kwargs):
return get_resnext(blocks=14, cardinality=16, bottleneck_width=4, model_name="resnext14_16x4d", **kwargs)
def resnext14_32x2d(**kwargs):
return get_resnext(blocks=14, cardinality=32, bottleneck_width=2, model_name="resnext14_32x2d", **kwargs)
def resnext14_32x4d(**kwargs):
return get_resnext(blocks=14, cardinality=32, bottleneck_width=4, model_name="resnext14_32x4d", **kwargs)
def resnext26_16x4d(**kwargs):
return get_resnext(blocks=26, cardinality=16, bottleneck_width=4, model_name="resnext26_16x4d", **kwargs)
def resnext26_32x2d(**kwargs):
return get_resnext(blocks=26, cardinality=32, bottleneck_width=2, model_name="resnext26_32x2d", **kwargs)
def resnext26_32x4d(**kwargs):
return get_resnext(blocks=26, cardinality=32, bottleneck_width=4, model_name="resnext26_32x4d", **kwargs)
def resnext38_32x4d(**kwargs):
return get_resnext(blocks=38, cardinality=32, bottleneck_width=4, model_name="resnext38_32x4d", **kwargs)
def resnext50_32x4d(**kwargs):
return get_resnext(blocks=50, cardinality=32, bottleneck_width=4, model_name="resnext50_32x4d", **kwargs)
|
MIT License
|
scanapi/scanapi
|
scanapi/tree/request_node.py
|
RequestNode._validate
|
python
|
def _validate(self):
validate_keys(
self.spec.keys(), self.ALLOWED_KEYS, self.REQUIRED_KEYS, self.SCOPE
)
|
Validate spec keys.
Returns:
None
|
https://github.com/scanapi/scanapi/blob/f974b4e4abff8d061e8f8c1cec70b1845034ef2a/scanapi/tree/request_node.py#L178-L187
|
import logging
import time
from scanapi.errors import HTTPMethodNotAllowedError
from scanapi.evaluators.spec_evaluator import SpecEvaluator
from scanapi.hide_utils import hide_sensitive_info
from scanapi.test_status import TestStatus
from scanapi.tree.testing_node import TestingNode
from scanapi.tree.tree_keys import (
BODY_KEY,
DELAY_KEY,
HEADERS_KEY,
METHOD_KEY,
NAME_KEY,
PARAMS_KEY,
PATH_KEY,
RETRY_KEY,
TESTS_KEY,
VARS_KEY,
)
from scanapi.utils import join_urls, session_with_retry, validate_keys
logger = logging.getLogger(__name__)
class RequestNode:
SCOPE = "request"
ALLOWED_KEYS = (
BODY_KEY,
HEADERS_KEY,
METHOD_KEY,
NAME_KEY,
PARAMS_KEY,
PATH_KEY,
TESTS_KEY,
VARS_KEY,
DELAY_KEY,
RETRY_KEY,
)
ALLOWED_HTTP_METHODS = (
"GET",
"POST",
"PUT",
"PATCH",
"DELETE",
"HEAD",
"OPTIONS",
)
REQUIRED_KEYS = (NAME_KEY,)
def __init__(self, spec, endpoint):
self.spec = spec
self.endpoint = endpoint
self._validate()
def __repr__(self):
return f"<{self.__class__.__name__} {self.full_url_path}>"
def __getitem__(self, item):
return self.spec[item]
@property
def http_method(self):
method = self.spec.get(METHOD_KEY, "get").upper()
if method not in self.ALLOWED_HTTP_METHODS:
raise HTTPMethodNotAllowedError(method, self.ALLOWED_HTTP_METHODS)
return method
@property
def name(self):
return self[NAME_KEY]
@property
def full_url_path(self):
base_path = self.endpoint.path
path = str(self.spec.get(PATH_KEY, ""))
full_url = join_urls(base_path, path)
return self.endpoint.spec_vars.evaluate(full_url)
@property
def headers(self):
endpoint_headers = self.endpoint.headers
headers = self.spec.get(HEADERS_KEY, {})
return self.endpoint.spec_vars.evaluate({**endpoint_headers, **headers})
@property
def params(self):
endpoint_params = self.endpoint.params
params = self.spec.get(PARAMS_KEY, {})
return self.endpoint.spec_vars.evaluate({**endpoint_params, **params})
@property
def delay(self):
delay = self.spec.get(DELAY_KEY, 0)
return delay or self.endpoint.delay
@property
def body(self):
body = self.spec.get(BODY_KEY)
return self.endpoint.spec_vars.evaluate(body)
@property
def tests(self):
return (
TestingNode(spec, self) for spec in self.spec.get(TESTS_KEY, [])
)
@property
def retry(self):
return self.spec.get(RETRY_KEY)
def run(self):
time.sleep(self.delay / 1000)
method = self.http_method
url = self.full_url_path
logger.info("Making request %s %s", method, url)
self.endpoint.spec_vars.update(
self.spec.get(VARS_KEY, {}),
extras=dict(self.endpoint.spec_vars),
filter_responses=True,
)
session = session_with_retry(self.retry)
response = session.request(
method,
url,
headers=self.headers,
params=self.params,
json=self.body,
allow_redirects=False,
)
extras = dict(self.endpoint.spec_vars)
extras["response"] = response
self.endpoint.spec_vars.update(
self.spec.get(VARS_KEY, {}), extras=extras,
)
tests_results = self._run_tests()
hide_sensitive_info(response)
del self.endpoint.spec_vars["response"]
return {
"response": response,
"tests_results": tests_results,
"no_failure": all(
test_result["status"] == TestStatus.PASSED
for test_result in tests_results
),
"request_node_name": self.name,
}
def _run_tests(self):
return [test.run() for test in self.tests]
|
MIT License
|
regionmask/regionmask
|
regionmask/core/regions.py
|
Regions.numbers
|
python
|
def numbers(self):
return self.combiner("number")
|
list of the numbers of the regions
|
https://github.com/regionmask/regionmask/blob/43e478288700ff6da6b957b8342ae4e5ed4ceb85/regionmask/core/regions.py#L199-L201
|
import copy
from collections import OrderedDict
import numpy as np
from shapely.geometry import MultiPolygon, Polygon
from .formatting import _display
from .mask import _inject_mask_docstring, _mask_2D, _mask_3D
from .plot import _plot, _plot_regions
from .utils import _is_180, _is_numeric, _maybe_to_dict, _sanitize_names_abbrevs
class Regions:
def __init__(
self,
outlines,
numbers=None,
names=None,
abbrevs=None,
name="unnamed",
source=None,
):
if numbers is None:
numbers = range(len(outlines))
if not _is_numeric(numbers):
raise ValueError("'numbers' must be numeric")
outlines = _maybe_to_dict(numbers, outlines)
names = _sanitize_names_abbrevs(numbers, names, "Region")
abbrevs = _sanitize_names_abbrevs(numbers, abbrevs, "r")
regions = OrderedDict()
for n in sorted(numbers):
regions[n] = _OneRegion(n, names[n], abbrevs[n], outlines[n])
self.regions = regions
self.name = name
self.source = source
def __getitem__(self, key):
key = self.map_keys(key)
if isinstance(key, (int, np.integer)):
return self.regions[key]
else:
regions = OrderedDict()
for k in key:
regions[k] = self.regions[k]
new_self = copy.copy(self)
new_self.regions = regions
return new_self
def __len__(self):
return len(self.numbers)
def map_keys(self, key):
if isinstance(key, (int, np.integer, str)):
key = self.region_ids[key]
else:
key = [self.region_ids[k] for k in key]
key = np.unique(key).tolist()
return key
def __repr__(self):
return self._display()
def __iter__(self):
for i in self.numbers:
yield self[i]
def combiner(self, prop):
return [getattr(r, prop) for r in self.regions.values()]
@property
def region_ids(self):
abbrevs = self.abbrevs
names = self.names
numbers = self.numbers
all_comb = zip(numbers + abbrevs + names, (numbers * 3))
region_ids = {key: value for key, value in all_comb}
return region_ids
@property
def abbrevs(self):
return self.combiner("abbrev")
@property
def names(self):
return self.combiner("name")
@property
|
MIT License
|
adriangb/scikeras
|
scikeras/wrappers.py
|
BaseWrapper.current_epoch
|
python
|
def current_epoch(self) -> int:
if not hasattr(self, "history_"):
return 0
return len(self.history_["loss"])
|
Returns the current training epoch.
Returns
-------
int
Current training epoch.
|
https://github.com/adriangb/scikeras/blob/b6f5979eb99e19114b0685743e06a96f02724bbb/scikeras/wrappers.py#L250-L260
|
import inspect
import warnings
from collections import defaultdict
from typing import Any, Callable, Dict, Iterable, List, Mapping, Set, Tuple, Type, Union
import numpy as np
import tensorflow as tf
from sklearn.base import BaseEstimator
from sklearn.exceptions import NotFittedError
from sklearn.metrics import accuracy_score as sklearn_accuracy_score
from sklearn.metrics import r2_score as sklearn_r2_score
from sklearn.preprocessing import FunctionTransformer
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import _check_sample_weight, check_array, check_X_y
from tensorflow.keras import losses as losses_module
from tensorflow.keras.models import Model
from tensorflow.keras.utils import register_keras_serializable
from scikeras._utils import (
TFRandomState,
accepts_kwargs,
get_loss_class_function_or_string,
get_metric_class,
get_optimizer_class,
has_param,
route_params,
try_to_convert_strings_to_classes,
unflatten_params,
)
from scikeras.utils import loss_name, metric_name
from scikeras.utils.transformers import ClassifierLabelEncoder, RegressorTargetEncoder
class BaseWrapper(BaseEstimator):
_tags = {
"poor_score": True,
"multioutput": True,
}
_fit_kwargs = {
"batch_size",
"epochs",
"verbose",
"validation_split",
"shuffle",
"class_weight",
"sample_weight",
"initial_epoch",
"validation_steps",
"validation_batch_size",
"validation_freq",
}
_predict_kwargs = {
"batch_size",
"verbose",
"steps",
}
_compile_kwargs = {
"optimizer",
"loss",
"metrics",
"loss_weights",
"weighted_metrics",
"run_eagerly",
}
_wrapper_params = {
"warm_start",
"random_state",
}
_routing_prefixes = {
"model",
"fit",
"compile",
"predict",
"optimizer",
"loss",
"metrics",
}
def __init__(
self,
model: Union[None, Callable[..., tf.keras.Model], tf.keras.Model] = None,
*,
build_fn: Union[
None, Callable[..., tf.keras.Model], tf.keras.Model
] = None,
warm_start: bool = False,
random_state: Union[int, np.random.RandomState, None] = None,
optimizer: Union[
str, tf.keras.optimizers.Optimizer, Type[tf.keras.optimizers.Optimizer]
] = "rmsprop",
loss: Union[
Union[str, tf.keras.losses.Loss, Type[tf.keras.losses.Loss], Callable], None
] = None,
metrics: Union[
List[
Union[
str,
tf.keras.metrics.Metric,
Type[tf.keras.metrics.Metric],
Callable,
]
],
None,
] = None,
batch_size: Union[int, None] = None,
validation_batch_size: Union[int, None] = None,
verbose: int = 1,
callbacks: Union[
List[Union[tf.keras.callbacks.Callback, Type[tf.keras.callbacks.Callback]]],
None,
] = None,
validation_split: float = 0.0,
shuffle: bool = True,
run_eagerly: bool = False,
epochs: int = 1,
**kwargs,
):
self.model = model
self.build_fn = build_fn
self.warm_start = warm_start
self.random_state = random_state
self.optimizer = optimizer
self.loss = loss
self.metrics = metrics
self.batch_size = batch_size
self.validation_batch_size = validation_batch_size
self.verbose = verbose
self.callbacks = callbacks
self.validation_split = validation_split
self.shuffle = shuffle
self.run_eagerly = run_eagerly
self.epochs = epochs
vars(self).update(**kwargs)
if kwargs:
self._user_params = set(kwargs)
@property
def __name__(self):
return self.__class__.__name__
@property
|
MIT License
|
randsleadershipslack/destalinator
|
utils/__init__.py
|
get_local_file_content
|
python
|
def get_local_file_content(file_name):
f = codecs.open(file_name, encoding='utf-8')
ret = f.read().strip()
f.close()
return ret
|
Read the contents of `file_name` into a unicode string, return the unicode string.
|
https://github.com/randsleadershipslack/destalinator/blob/d9c2cf8126a1cddace9cba2d8288807549337e23/utils/__init__.py#L4-L9
|
import codecs
|
Apache License 2.0
|
pelioniot/mbed-cloud-sdk-python
|
src/mbed_cloud/update/update.py
|
CampaignDeviceState.id
|
python
|
def id(self):
return self._id
|
The id of the metadata record (readonly).
:rtype: str
|
https://github.com/pelioniot/mbed-cloud-sdk-python/blob/71dc67fc2a8d1aff31e35ec781fb328e6a60639c/src/mbed_cloud/update/update.py#L715-L720
|
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
from mbed_cloud.core import BaseAPI
from mbed_cloud.core import BaseObject
from mbed_cloud.pagination import PaginatedResponse
from mbed_cloud.utils import force_utc
from mbed_cloud.decorators import catch_exceptions
from mbed_cloud.device_directory import Device
from six import iteritems
from mbed_cloud import filters
import mbed_cloud._backends.update_service as update_service
from mbed_cloud._backends.update_service.models import UpdateCampaignPostRequest
from mbed_cloud._backends.update_service.rest import ApiException as UpdateServiceApiException
LOG = logging.getLogger(__name__)
class UpdateAPI(BaseAPI):
api_structure = {update_service: [update_service.DefaultApi]}
@catch_exceptions(UpdateServiceApiException)
def list_campaigns(self, **kwargs):
kwargs = self._verify_sort_options(kwargs)
kwargs = self._verify_filters(kwargs, Campaign, True)
api = self._get_api(update_service.DefaultApi)
return PaginatedResponse(api.update_campaign_list, lwrap_type=Campaign, **kwargs)
@catch_exceptions(UpdateServiceApiException)
def get_campaign(self, campaign_id):
api = self._get_api(update_service.DefaultApi)
return Campaign(api.update_campaign_retrieve(campaign_id))
@catch_exceptions(UpdateServiceApiException)
def add_campaign(self, name, device_filter, **kwargs):
device_filter = filters.legacy_filter_formatter(
dict(filter=device_filter),
Device._get_attributes_map()
)
campaign = Campaign._create_request_map(kwargs)
if 'when' in campaign:
campaign['when'] = force_utc(campaign['when'])
body = UpdateCampaignPostRequest(
name=name,
device_filter=device_filter['filter'],
**campaign)
api = self._get_api(update_service.DefaultApi)
return Campaign(api.update_campaign_create(body))
@catch_exceptions(UpdateServiceApiException)
def start_campaign(self, campaign_object):
campaign_object._state = "scheduled"
return self.update_campaign(campaign_object)
@catch_exceptions(UpdateServiceApiException)
def update_campaign(self, campaign_object=None, campaign_id=None, **kwargs):
api = self._get_api(update_service.DefaultApi)
if campaign_object:
campaign_id = campaign_object.id
campaign_object = campaign_object._create_patch_request()
else:
campaign_object = Campaign._create_request_map(kwargs)
if 'device_filter' in campaign_object:
campaign_object["device_filter"] = filters.legacy_filter_formatter(
dict(filter=campaign_object["device_filter"]),
Device._get_attributes_map()
)['filter']
if 'when' in campaign_object:
campaign_object['when'] = force_utc(campaign_object['when'])
return Campaign(api.update_campaign_update(campaign_id=campaign_id,
campaign=campaign_object))
@catch_exceptions(UpdateServiceApiException)
def delete_campaign(self, campaign_id):
api = self._get_api(update_service.DefaultApi)
api.update_campaign_destroy(campaign_id)
return
@catch_exceptions(UpdateServiceApiException)
def list_campaign_device_states(self, campaign_id, **kwargs):
kwargs = self._verify_sort_options(kwargs)
kwargs = self._verify_filters(kwargs, CampaignDeviceState, True)
kwargs["campaign_id"] = campaign_id
api = self._get_api(update_service.DefaultApi)
return PaginatedResponse(api.update_campaign_metadata_list,
lwrap_type=CampaignDeviceState, **kwargs)
@catch_exceptions(UpdateServiceApiException)
def get_firmware_image(self, image_id):
api = self._get_api(update_service.DefaultApi)
return FirmwareImage(api.firmware_image_retrieve(image_id))
@catch_exceptions(UpdateServiceApiException)
def list_firmware_images(self, **kwargs):
kwargs = self._verify_sort_options(kwargs)
kwargs = self._verify_filters(kwargs, FirmwareImage, True)
api = self._get_api(update_service.DefaultApi)
return PaginatedResponse(api.firmware_image_list, lwrap_type=FirmwareImage, **kwargs)
@catch_exceptions(UpdateServiceApiException)
def add_firmware_image(self, name, datafile, **kwargs):
kwargs.update({'name': name})
firmware_image = FirmwareImage._create_request_map(kwargs)
firmware_image.update({'datafile': datafile})
api = self._get_api(update_service.DefaultApi)
return FirmwareImage(
api.firmware_image_create(**firmware_image)
)
@catch_exceptions(UpdateServiceApiException)
def delete_firmware_image(self, image_id):
api = self._get_api(update_service.DefaultApi)
api.firmware_image_destroy(image_id=image_id)
return
@catch_exceptions(UpdateServiceApiException)
def get_firmware_manifest(self, manifest_id):
api = self._get_api(update_service.DefaultApi)
return FirmwareManifest(api.firmware_manifest_retrieve(manifest_id=manifest_id))
@catch_exceptions(UpdateServiceApiException)
def list_firmware_manifests(self, **kwargs):
kwargs = self._verify_sort_options(kwargs)
kwargs = self._verify_filters(kwargs, FirmwareManifest, True)
api = self._get_api(update_service.DefaultApi)
return PaginatedResponse(api.firmware_manifest_list, lwrap_type=FirmwareManifest, **kwargs)
@catch_exceptions(UpdateServiceApiException)
def add_firmware_manifest(self, name, datafile, key_table_file=None, **kwargs):
kwargs.update({
'name': name,
'url': datafile,
})
if key_table_file is not None:
kwargs.update({'key_table_url': key_table_file})
firmware_manifest = FirmwareManifest._create_request_map(kwargs)
api = self._get_api(update_service.DefaultApi)
return FirmwareManifest(
api.firmware_manifest_create(**firmware_manifest)
)
@catch_exceptions(UpdateServiceApiException)
def delete_firmware_manifest(self, manifest_id):
api = self._get_api(update_service.DefaultApi)
return api.firmware_manifest_destroy(manifest_id)
class FirmwareImage(BaseObject):
@staticmethod
def _get_attributes_map():
return {
"created_at": "created_at",
"datafile_checksum": "datafile_checksum",
"datafile_size": "datafile_size",
"description": "description",
"id": "id",
"name": "name",
"updated_at": "updated_at",
"url": "datafile",
}
@property
def created_at(self):
return self._created_at
@property
def url(self):
return self._url
@property
def datafile_checksum(self):
return self._datafile_checksum
@property
def datafile_size(self):
return self._datafile_size
@property
def description(self):
return self._description
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@property
def updated_at(self):
return self._updated_at
class FirmwareManifest(BaseObject):
@staticmethod
def _get_attributes_map():
return {
"created_at": "created_at",
"url": "datafile",
"key_table_url": "key_table",
"description": "description",
"device_class": "device_class",
"datafile_size": "datafile_size",
"id": "id",
"name": "name",
"timestamp": "timestamp",
"updated_at": "updated_at",
"version": "version"
}
@property
def created_at(self):
return self._created_at
@property
def url(self):
return self._url
@property
def key_table_url(self):
return self._key_table_url
@property
def description(self):
return self._description
@property
def device_class(self):
return self._device_class
@property
def datafile_size(self):
return self._datafile_size
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@property
def timestamp(self):
return self._timestamp
@property
def updated_at(self):
return self._updated_at
@property
def version(self):
return self._version
class Campaign(BaseObject):
@staticmethod
def _get_attributes_map():
return {
"created_at": "created_at",
"description": "description",
"device_filter": "device_filter",
"finished_at": "finished",
"id": "id",
"manifest_id": "root_manifest_id",
"manifest_url": "root_manifest_url",
"name": "name",
"phase": "phase",
"scheduled_at": "when",
"started_at": "started_at",
"state": "state",
"updated_at": "updated_at",
}
def _create_patch_request(self):
patch_map = {
"description": "description",
"device_filter": "device_filter",
"manifest_id": "root_manifest_id",
"name": "name",
"scheduled_at": "when",
"state": "state",
}
map_patch = {}
for key, value in iteritems(patch_map):
val = getattr(self, key, None)
if val is not None:
map_patch[value] = val
return map_patch
@property
def phase(self):
return self._phase
@property
def device_filter(self):
if isinstance(self._device_filter, str):
return self._decode_query(self._device_filter)
return self._device_filter
@device_filter.setter
def device_filter(self, device_filter):
self._device_filter = device_filter
@property
def created_at(self):
return self._created_at
@property
def updated_at(self):
return self._updated_at
@property
def description(self):
return self._description
@description.setter
def description(self, description):
self._description = description
@property
def finished_at(self):
return self._finished_at
@property
def id(self):
return self._id
@property
def manifest_id(self):
return self._manifest_id
@manifest_id.setter
def manifest_id(self, manifest_id):
self._manifest_id = manifest_id
@property
def manifest_url(self):
return self._manifest_url
@property
def name(self):
return self._name
@name.setter
def name(self, name):
if name is not None and len(name) > 128:
raise ValueError("Invalid value for `name`, length must be less than or equal to `128`")
self._name = name
@property
def started_at(self):
return self._started_at
@property
def state(self):
return self._state
@property
def scheduled_at(self):
return self._scheduled_at
@scheduled_at.setter
def scheduled_at(self, scheduled_at):
self._scheduled_at = scheduled_at
class CampaignDeviceState(BaseObject):
@staticmethod
def _get_attributes_map():
return {
"id": "id",
"device_id": "device_id",
"campaign_id": "campaign",
"state": "deployment_state",
"name": "name",
"description": "description",
"created_at": "created_at",
"updated_at": "updated_at",
"mechanism": "mechanism",
"mechanism_url": "mechanism_url"
}
@property
|
Apache License 2.0
|
cirada-tools/rm-tools
|
RMutils/nestle.py
|
Ellipsoid.sample
|
python
|
def sample(self, rstate=np.random):
return self.ctr + self.randoffset(rstate=rstate)
|
Chose a sample randomly distributed within the ellipsoid.
Returns
-------
x : 1-d array
A single point within the ellipsoid.
|
https://github.com/cirada-tools/rm-tools/blob/083354a22292f4a6ae27a7336fbbfe22e8868c1e/RMutils/nestle.py#L288-L296
|
import sys
import warnings
import math
import numpy as np
try:
from scipy.cluster.vq import kmeans2
HAVE_KMEANS = True
except ImportError:
HAVE_KMEANS = False
__all__ = ["sample", "print_progress", "mean_and_cov", "resample_equal",
"Result"]
__version__ = "0.2.0"
SQRTEPS = math.sqrt(float(np.finfo(np.float64).eps))
def vol_prefactor(n):
if n % 2 == 0:
f = 1.
i = 2
while i <= n:
f *= (2. / i * math.pi)
i += 2
else:
f = 2.
i = 3
while i <= n:
f *= (2. / i * math.pi)
i += 2
return f
def randsphere(n, rstate=np.random):
z = rstate.randn(n)
return z * rstate.rand()**(1./n) / np.sqrt(np.sum(z**2))
def random_choice(a, p, rstate=np.random):
if abs(np.sum(p) - 1.) > SQRTEPS:
raise ValueError("probabilities do not sum to 1")
r = rstate.rand()
i = 0
t = p[i]
while t < r:
i += 1
t += p[i]
return i
def resample_equal(samples, weights, rstate=None):
if abs(np.sum(weights) - 1.) > SQRTEPS:
raise ValueError("weights do not sum to 1")
if rstate is None:
rstate = np.random
N = len(weights)
positions = (rstate.random() + np.arange(N)) / N
idx = np.zeros(N, dtype=np.int)
cumulative_sum = np.cumsum(weights)
i, j = 0, 0
while i < N:
if positions[i] < cumulative_sum[j]:
idx[i] = j
i += 1
else:
j += 1
return samples[idx]
class Result(dict):
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if list(self.keys()):
m = max(list(map(len, list(self.keys())))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in list(self.items())])
else:
return self.__class__.__name__ + "()"
def summary(self):
return ("niter: {:d}\n"
"ncall: {:d}\n"
"nsamples: {:d}\n"
"logz: {:6.3f} +/- {:6.3f}\n"
"h: {:6.3f}"
.format(self.niter, self.ncall, len(self.samples),
self.logz, self.logzerr, self.h))
def mean_and_cov(x, weights):
mean = np.average(x, weights=weights, axis=0)
dx = x - mean
wsum = np.sum(weights)
w2sum = np.sum(weights**2)
cov = wsum / (wsum**2 - w2sum) * np.einsum('i,ij,ik', weights, dx, dx)
return mean, cov
def print_progress(info):
print("\r\033[Kit={:6d} logz={:8f}".format(info['it'], info['logz']),
end='')
sys.stdout.flush()
class Ellipsoid(object):
def __init__(self, ctr, a):
self.n = len(ctr)
self.ctr = ctr
self.a = a
self.vol = vol_prefactor(self.n) / np.sqrt(np.linalg.det(a))
l, v = np.linalg.eigh(a)
self.axlens = 1. / np.sqrt(l)
self.axes = np.dot(v, np.diag(self.axlens))
def scale_to_vol(self, vol):
f = (vol / self.vol) ** (1.0 / self.n)
self.a *= f**-2
self.axlens *= f
self.axes *= f
self.vol = vol
def major_axis_endpoints(self):
i = np.argmax(self.axlens)
v = self.axes[:, i]
return self.ctr - v, self.ctr + v
def contains(self, x):
d = x - self.ctr
return np.dot(np.dot(d, self.a), d) <= 1.0
def randoffset(self, rstate=np.random):
return np.dot(self.axes, randsphere(self.n, rstate=rstate))
|
MIT License
|
borchero/pycave
|
pycave/core/estimator.py
|
Estimator.load
|
python
|
def load(cls: Type[E], path: Path) -> E:
estimator = cls()
with (path / "estimator.pickle").open("rb") as f:
estimator.set_params(pickle.load(f))
if (path / "config.json").exists():
model_cls = cls._get_model_class()
model = model_cls.load(path)
estimator.load_model(model)
return estimator
|
Loads the estimator and (if available) the fitted model. See :meth:`save` for more
information about the required filenames for loading.
Args:
path: The directory from which to load the estimator.
Returns:
The loaded estimator, either fitted or not, depending on the availability of the
``config.json`` file.
|
https://github.com/borchero/pycave/blob/8c19dbd7b0b123d9d2bcdd28913918cd6555b8a0/pycave/core/estimator.py#L137-L158
|
from __future__ import annotations
import inspect
import logging
import pickle
from abc import ABC
from pathlib import Path
from typing import (
Any,
Callable,
Dict,
Generic,
get_args,
get_origin,
Optional,
Sized,
Type,
TypeVar,
Union,
)
import numpy as np
import numpy.typing as npt
import pytorch_lightning as pl
import torch
from pytorch_lightning.plugins import DataParallelPlugin, DDP2Plugin, DDPSpawnPlugin
from torch.utils.data import DataLoader, Dataset
from pycave.data import (
DistributedTensorBatchSampler,
TensorBatchSampler,
TensorDataLoader,
UnrepeatedDistributedTensorBatchSampler,
)
from .exception import NotFittedError
from .module import ConfigModule
M = TypeVar("M", bound=ConfigModule)
E = TypeVar("E", bound="Estimator")
logger = logging.getLogger(__name__)
class Estimator(Generic[M], ABC):
_model: M
def __init__(
self,
*,
batch_size: Optional[int] = None,
num_workers: int = 0,
verbose: bool = False,
default_params: Optional[Dict[str, Any]] = None,
user_params: Optional[Dict[str, Any]] = None,
overwrite_params: Optional[Dict[str, Any]] = None,
):
self.batch_size = batch_size
self.num_workers = num_workers
self.verbose = verbose
self.trainer_params_user = user_params
self.trainer_params = {
**dict(
checkpoint_callback=False,
logger=False,
log_every_n_steps=1,
progress_bar_refresh_rate=(
None if logger.getEffectiveLevel() <= logging.INFO else 0
),
weights_summary=(
"top" if logger.getEffectiveLevel() <= logging.DEBUG else None
),
),
**(default_params or {}),
**(user_params or {}),
**(overwrite_params or {}),
}
@property
def model_(self) -> M:
return self._model
@property
def _is_fitted(self) -> bool:
try:
getattr(self, "model_")
return True
except NotFittedError:
return False
def load_model(self, model: M) -> None:
self._model = model
def save(self, path: Path) -> None:
assert path.is_dir(), "Estimators can only be saved to a directory."
with (path / "estimator.pickle").open("wb+") as f:
pickle.dump(self.get_params(), f)
if self._is_fitted:
self.model_.save(path)
@classmethod
|
MIT License
|
restran/hacker-scripts
|
misc/win_file_monitor/watchdog/events.py
|
PatternMatchingEventHandler.ignore_patterns
|
python
|
def ignore_patterns(self):
return self._ignore_patterns
|
(Read-only)
Patterns to ignore matching event paths.
|
https://github.com/restran/hacker-scripts/blob/30bbfd8bb97cda2b4762156aaf2973296f0e7cde/misc/win_file_monitor/watchdog/events.py#L401-L406
|
import os.path
import logging
import re
from pathtools.patterns import match_any_paths
from watchdog.utils import has_attribute
from watchdog.utils import unicode_paths
EVENT_TYPE_MOVED = 'moved'
EVENT_TYPE_DELETED = 'deleted'
EVENT_TYPE_CREATED = 'created'
EVENT_TYPE_MODIFIED = 'modified'
class FileSystemEvent(object):
event_type = None
is_directory = False
def __init__(self, src_path):
self._src_path = src_path
@property
def src_path(self):
return self._src_path
def __str__(self):
return self.__repr__()
def __repr__(self):
return ("<%(class_name)s: event_type=%(event_type)s, "
"src_path=%(src_path)r, "
"is_directory=%(is_directory)s>"
) % (dict(
class_name=self.__class__.__name__,
event_type=self.event_type,
src_path=self.src_path,
is_directory=self.is_directory))
@property
def key(self):
return (self.event_type, self.src_path, self.is_directory)
def __eq__(self, event):
return self.key == event.key
def __ne__(self, event):
return self.key != event.key
def __hash__(self):
return hash(self.key)
class FileSystemMovedEvent(FileSystemEvent):
event_type = EVENT_TYPE_MOVED
def __init__(self, src_path, dest_path):
super(FileSystemMovedEvent, self).__init__(src_path)
self._dest_path = dest_path
@property
def dest_path(self):
return self._dest_path
@property
def key(self):
return (self.event_type, self.src_path, self.dest_path, self.is_directory)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r, "
"dest_path=%(dest_path)r, "
"is_directory=%(is_directory)s>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path,
dest_path=self.dest_path,
is_directory=self.is_directory))
class FileDeletedEvent(FileSystemEvent):
event_type = EVENT_TYPE_DELETED
def __init__(self, src_path):
super(FileDeletedEvent, self).__init__(src_path)
def __repr__(self):
return "<%(class_name)s: src_path=%(src_path)r>" % dict(class_name=self.__class__.__name__,
src_path=self.src_path)
class FileModifiedEvent(FileSystemEvent):
event_type = EVENT_TYPE_MODIFIED
def __init__(self, src_path):
super(FileModifiedEvent, self).__init__(src_path)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path))
class FileCreatedEvent(FileSystemEvent):
event_type = EVENT_TYPE_CREATED
def __init__(self, src_path):
super(FileCreatedEvent, self).__init__(src_path)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path))
class FileMovedEvent(FileSystemMovedEvent):
def __init__(self, src_path, dest_path):
super(FileMovedEvent, self).__init__(src_path, dest_path)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r, "
"dest_path=%(dest_path)r>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path,
dest_path=self.dest_path))
class DirDeletedEvent(FileSystemEvent):
event_type = EVENT_TYPE_DELETED
is_directory = True
def __init__(self, src_path):
super(DirDeletedEvent, self).__init__(src_path)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path))
class DirModifiedEvent(FileSystemEvent):
event_type = EVENT_TYPE_MODIFIED
is_directory = True
def __init__(self, src_path):
super(DirModifiedEvent, self).__init__(src_path)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path))
class DirCreatedEvent(FileSystemEvent):
event_type = EVENT_TYPE_CREATED
is_directory = True
def __init__(self, src_path):
super(DirCreatedEvent, self).__init__(src_path)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path))
class DirMovedEvent(FileSystemMovedEvent):
is_directory = True
def __init__(self, src_path, dest_path):
super(DirMovedEvent, self).__init__(src_path, dest_path)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r, "
"dest_path=%(dest_path)r>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path,
dest_path=self.dest_path))
class FileSystemEventHandler(object):
def dispatch(self, event):
self.on_any_event(event)
_method_map = {
EVENT_TYPE_MODIFIED: self.on_modified,
EVENT_TYPE_MOVED: self.on_moved,
EVENT_TYPE_CREATED: self.on_created,
EVENT_TYPE_DELETED: self.on_deleted,
}
event_type = event.event_type
_method_map[event_type](event)
def on_any_event(self, event):
def on_moved(self, event):
def on_created(self, event):
def on_deleted(self, event):
def on_modified(self, event):
class PatternMatchingEventHandler(FileSystemEventHandler):
def __init__(self, patterns=None, ignore_patterns=None,
ignore_directories=False, case_sensitive=False):
super(PatternMatchingEventHandler, self).__init__()
self._patterns = patterns
self._ignore_patterns = ignore_patterns
self._ignore_directories = ignore_directories
self._case_sensitive = case_sensitive
@property
def patterns(self):
return self._patterns
@property
|
MIT License
|
openmotics/sdk
|
python/sdk.py
|
OpenMoticsApi.get_output_configurations
|
python
|
def get_output_configurations(self):
return self.exec_action("get_output_configurations")
|
Get all output_configurations.
:returns: list of output_configuration dict: contains 'id' (Id), 'floor' (Byte), 'module_type' (String[1]), 'name' (String[16]), 'timer' (Word), 'type' (Byte)
|
https://github.com/openmotics/sdk/blob/7bdbacb60e26e1a5a645e6546fe8ae256e3268d4/python/sdk.py#L497-L503
|
import json
import requests
import random
import traceback
import time
class AuthenticationException(Exception):
def __init__(self):
self.msg = "The provided credentials are not valid."
def __str__(self):
return self.msg
class MaintenanceModeException(Exception):
def __init__(self):
self.msg = "The gateway is currently in maintenance mode."
def __str__(self):
return self.msg
class ApiException(Exception):
def __init__(self, msg):
self.msg = "Non successful api call: %s" % msg
def __str__(self):
return self.msg
class OpenMoticsApi:
def __init__(self, username, password, hostname, verify_https=False, port=443):
self.auth = { "username" : username, "password" : password }
self.hostname = hostname
self.verify_https = verify_https
self.port = port
self.token = None
def get_url(self, action):
return "https://%s:%s/%s" % (self.hostname, self.port, action)
def get_post_data(self, post_data):
d = post_data.copy()
if self.token != None:
d["token"] = self.token
return d
def fetch_url(self, action, post_data={}, get_params={}, json_decode=True):
url = self.get_url(action)
post_data = self.get_post_data(post_data)
print "Fetching url: %s" % url
r = requests.post(url, params=get_params, data=post_data, verify=self.verify_https)
if r.status_code == 401:
self.token = None
raise AuthenticationException()
elif r.status_code == 503:
raise MaintenanceModeException()
elif r.status_code == 200:
if json_decode:
msg = r.json()
if 'success' in msg and msg['success'] is False:
raise ApiException(msg)
else:
return msg
else:
return r.text
else:
raise Exception("Unknown status code: %s. Text: %s" % (r.status_code, r.text))
def login(self):
self.token = self.fetch_url("login", self.auth)["token"]
def exec_action(self, action, post_data={}, get_params={}, json_decode=True):
if self.token == None:
self.login()
try:
return self.fetch_url(action, post_data, get_params, json_decode)
except AuthenticationException:
self.login()
return self.fetch_url(action, post_data, get_params, json_decode)
def get_version(self):
return self.exec_action('get_version')
def get_status(self):
return self.exec_action('get_status')
def get_output_status(self):
return self.exec_action('get_output_status')
def get_thermostat_status(self):
return self.exec_action('get_thermostat_status')
def get_sensor_brightness_status(self):
return self.exec_action('get_sensor_brightness_status')
def get_sensor_humidity_status(self):
return self.exec_action('get_sensor_humidity_status')
def get_sensor_temperature_status(self):
return self.exec_action('get_sensor_temperature_status')
def set_output(self, id, on, dimmer=None, timer=None):
post_data = {'id' : id, 'is_on' : on}
if dimmer is not None:
post_data['dimmer'] = dimmer
if timer is not None:
post_data['timer'] = timer
return self.exec_action('set_output', post_data=post_data)
def set_all_lights_off(self):
return self.exec_action('set_all_lights_off')
def set_all_lights_floor_off(self, floor):
return self.exec_action('set_all_lights_floor_off', post_data={'floor': floor})
def set_all_lights_floor_on(self, floor):
return self.exec_action('set_all_lights_floor_on', post_data={'floor': floor})
def set_current_setpoint(self, thermostat, temperature):
return self.exec_action('set_current_setpoint', post_data={'thermostat': thermostat, 'temperature': temperature})
def set_thermostat_mode(self, on, automatic, setpoint):
return self.exec_action('set_thermostat_mode', post_data={'thermostat_on': on, 'automatic': automatic, 'setpoint': setpoint})
def do_group_action(self, group_action_id):
return self.exec_action('do_group_action', post_data={'group_action_id': group_action_id})
def module_discover_start(self):
return self.exec_action('module_discover_start')
def module_discover_stop(self):
return self.exec_action('module_discover_stop')
def get_modules(self):
return self.exec_action('get_modules')
def flash_leds(self, type, id):
return self.exec_action('flash_leds', post_data={'type': type, 'id': id})
def get_last_inputs(self):
return self.exec_action('get_last_inputs')
def get_pulse_counter_status(self):
return self.exec_action('get_pulse_counter_status')
def get_errors(self):
return self.exec_action('get_errors')
def master_clear_error_list(self):
return self.exec_action('master_clear_error_list')
def reset_master(self):
return self.exec_action('reset_master')
def get_power_modules(self):
return self.exec_action('get_power_modules')
def set_power_modules(self, modules):
return self.exec_action('set_power_modules', post_data={'modules': json.dumps(modules)})
def get_realtime_power(self):
return self.exec_action('get_realtime_power')
def get_total_energy(self):
return self.exec_action('get_total_energy')
def set_power_voltage(self, module_id, voltage):
return self.exec_action('set_power_voltage', post_data={'module_id': module_id, 'voltage': voltage})
def start_power_address_mode(self):
return self.exec_action('start_power_address_mode')
def stop_power_address_mode(self):
return self.exec_action('stop_power_address_mode')
def in_power_address_mode(self):
return self.exec_action('in_power_address_mode')
def set_timezone(self, timezone):
return self.exec_action('set_timezone', post_data={'timezone': timezone})
def get_timezone(self):
return self.exec_action('get_timezone')
def do_url_action(self, url, method='GET', headers=None, data=None, auth=None, timeout=10):
return self.exec_action('do_url_action',
post_data={ 'url':url, 'method':method, 'headers':headers, 'data':data,
'auth':auth, 'timeout':timeout })
def schedule_action(self, timestamp, action):
return self.exec_action('schedule_action', post_data = { 'timestamp' : timestamp,
'action' : json.dumps(action) })
def list_scheduled_actions(self):
return self.exec_action('list_scheduled_actions')
def remove_scheduled_action(self, id):
return self.exec_action('remove_scheduled_action', post_data = { 'id' : id })
def set_output_delayed(self, timestamp, description, output_nr, on, dimmer=None, timer=None):
action = { 'type' : 'basic', 'action' : 'set_output', 'description' : description,
'params' : { 'output_nr' : output_nr, 'is_on' : on,
'dimmer': dimmer, 'timer' : timer } }
return self.schedule_action(timestamp, action)
def set_all_lights_off_delayed(self, timestamp, description):
return self.schedule_action(timestamp, { 'type' : 'basic', 'action' : 'set_all_lights_off',
'description' : description })
def set_all_lights_floor_off_delayed(self, timestamp, description, floor):
action = { 'type' : 'basic', 'action' : 'set_all_lights_floor_off',
'description' : description, 'params' : { 'floor' : floor } }
return self.schedule_action(timestamp, action)
def set_all_lights_floor_on_delayed(self, timestamp, description, floor):
action = { 'type' : 'basic', 'action' : 'set_all_lights_floor_on',
'description' : description, 'params' : { 'floor' : floor } }
return self.schedule_action(timestamp, action)
def set_current_setpoint_delayed(self, timestamp, description, thermostat, temperature):
action = { 'type' : 'basic', 'action' : 'set_current_setpoint',
'description' : description,
'params' : { 'thermostat': thermostat, 'temperature': temperature } }
return self.schedule_action(timestamp, action)
def set_mode_delayed(self, timestamp, description, on, automatic, setpoint):
action = { 'type' : 'basic', 'action' : 'set_thermostat_mode',
'description' : description,
'params' : { 'thermostat_on': on, 'automatic': automatic, 'setpoint': setpoint } }
return self.schedule_action(timestamp, action)
def do_group_action_delayed(self, timestamp, description, group_action_id):
action = { 'type' : 'basic', 'action' : 'do_group_action',
'description' : description,
'params' : { 'group_action_id': group_action_id } }
return self.schedule_action(timestamp, action)
def get_output_configuration(self, id):
return self.exec_action("get_output_configuration", post_data = { "id" : id })
|
MIT License
|
docusign/docusign-python-client
|
docusign_esign/models/signer.py
|
Signer.client_user_id
|
python
|
def client_user_id(self, client_user_id):
self._client_user_id = client_user_id
|
Sets the client_user_id of this Signer.
Specifies whether the recipient is embedded or remote. If the `clientUserId` property is not null then the recipient is embedded. Note that if the `ClientUserId` property is set and either `SignerMustHaveAccount` or `SignerMustLoginToSign` property of the account settings is set to **true**, an error is generated on sending.ng. Maximum length: 100 characters. # noqa: E501
:param client_user_id: The client_user_id of this Signer. # noqa: E501
:type: str
|
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/signer.py#L681-L690
|
import pprint
import re
import six
from docusign_esign.client.configuration import Configuration
class Signer(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'access_code': 'str',
'access_code_metadata': 'PropertyMetadata',
'add_access_code_to_email': 'str',
'additional_notifications': 'list[RecipientAdditionalNotification]',
'agent_can_edit_email': 'str',
'agent_can_edit_name': 'str',
'allow_system_override_for_locked_recipient': 'str',
'auto_navigation': 'str',
'auto_responded_reason': 'str',
'bulk_recipients_uri': 'str',
'can_sign_offline': 'str',
'client_user_id': 'str',
'completed_count': 'str',
'creation_reason': 'str',
'custom_fields': 'list[str]',
'declined_date_time': 'str',
'declined_reason': 'str',
'default_recipient': 'str',
'delivered_date_time': 'str',
'delivery_method': 'str',
'delivery_method_metadata': 'PropertyMetadata',
'designator_id': 'str',
'designator_id_guid': 'str',
'document_visibility': 'list[DocumentVisibility]',
'email': 'str',
'email_metadata': 'PropertyMetadata',
'email_notification': 'RecipientEmailNotification',
'embedded_recipient_start_url': 'str',
'error_details': 'ErrorDetails',
'excluded_documents': 'list[str]',
'fax_number': 'str',
'fax_number_metadata': 'PropertyMetadata',
'first_name': 'str',
'first_name_metadata': 'PropertyMetadata',
'full_name': 'str',
'full_name_metadata': 'PropertyMetadata',
'id_check_configuration_name': 'str',
'id_check_configuration_name_metadata': 'PropertyMetadata',
'id_check_information_input': 'IdCheckInformationInput',
'identity_verification': 'RecipientIdentityVerification',
'inherit_email_notification_configuration': 'str',
'is_bulk_recipient': 'str',
'is_bulk_recipient_metadata': 'PropertyMetadata',
'last_name': 'str',
'last_name_metadata': 'PropertyMetadata',
'locked_recipient_phone_auth_editable': 'str',
'locked_recipient_sms_editable': 'str',
'name': 'str',
'name_metadata': 'PropertyMetadata',
'notary_id': 'str',
'note': 'str',
'note_metadata': 'PropertyMetadata',
'offline_attributes': 'OfflineAttributes',
'phone_authentication': 'RecipientPhoneAuthentication',
'phone_number': 'RecipientPhoneNumber',
'proof_file': 'RecipientProofFile',
'recipient_attachments': 'list[RecipientAttachment]',
'recipient_authentication_status': 'AuthenticationStatus',
'recipient_feature_metadata': 'list[FeatureAvailableMetadata]',
'recipient_id': 'str',
'recipient_id_guid': 'str',
'recipient_signature_providers': 'list[RecipientSignatureProvider]',
'recipient_supplies_tabs': 'str',
'recipient_type': 'str',
'recipient_type_metadata': 'PropertyMetadata',
'require_id_lookup': 'str',
'require_id_lookup_metadata': 'PropertyMetadata',
'require_signer_certificate': 'str',
'require_sign_on_paper': 'str',
'require_upload_signature': 'str',
'role_name': 'str',
'routing_order': 'str',
'routing_order_metadata': 'PropertyMetadata',
'sent_date_time': 'str',
'signature_info': 'RecipientSignatureInformation',
'signed_date_time': 'str',
'sign_in_each_location': 'str',
'sign_in_each_location_metadata': 'PropertyMetadata',
'signing_group_id': 'str',
'signing_group_id_metadata': 'PropertyMetadata',
'signing_group_name': 'str',
'signing_group_users': 'list[UserInfo]',
'sms_authentication': 'RecipientSMSAuthentication',
'social_authentications': 'list[SocialAuthentication]',
'status': 'str',
'status_code': 'str',
'suppress_emails': 'str',
'tabs': 'Tabs',
'template_locked': 'str',
'template_required': 'str',
'total_tab_count': 'str',
'user_id': 'str'
}
attribute_map = {
'access_code': 'accessCode',
'access_code_metadata': 'accessCodeMetadata',
'add_access_code_to_email': 'addAccessCodeToEmail',
'additional_notifications': 'additionalNotifications',
'agent_can_edit_email': 'agentCanEditEmail',
'agent_can_edit_name': 'agentCanEditName',
'allow_system_override_for_locked_recipient': 'allowSystemOverrideForLockedRecipient',
'auto_navigation': 'autoNavigation',
'auto_responded_reason': 'autoRespondedReason',
'bulk_recipients_uri': 'bulkRecipientsUri',
'can_sign_offline': 'canSignOffline',
'client_user_id': 'clientUserId',
'completed_count': 'completedCount',
'creation_reason': 'creationReason',
'custom_fields': 'customFields',
'declined_date_time': 'declinedDateTime',
'declined_reason': 'declinedReason',
'default_recipient': 'defaultRecipient',
'delivered_date_time': 'deliveredDateTime',
'delivery_method': 'deliveryMethod',
'delivery_method_metadata': 'deliveryMethodMetadata',
'designator_id': 'designatorId',
'designator_id_guid': 'designatorIdGuid',
'document_visibility': 'documentVisibility',
'email': 'email',
'email_metadata': 'emailMetadata',
'email_notification': 'emailNotification',
'embedded_recipient_start_url': 'embeddedRecipientStartURL',
'error_details': 'errorDetails',
'excluded_documents': 'excludedDocuments',
'fax_number': 'faxNumber',
'fax_number_metadata': 'faxNumberMetadata',
'first_name': 'firstName',
'first_name_metadata': 'firstNameMetadata',
'full_name': 'fullName',
'full_name_metadata': 'fullNameMetadata',
'id_check_configuration_name': 'idCheckConfigurationName',
'id_check_configuration_name_metadata': 'idCheckConfigurationNameMetadata',
'id_check_information_input': 'idCheckInformationInput',
'identity_verification': 'identityVerification',
'inherit_email_notification_configuration': 'inheritEmailNotificationConfiguration',
'is_bulk_recipient': 'isBulkRecipient',
'is_bulk_recipient_metadata': 'isBulkRecipientMetadata',
'last_name': 'lastName',
'last_name_metadata': 'lastNameMetadata',
'locked_recipient_phone_auth_editable': 'lockedRecipientPhoneAuthEditable',
'locked_recipient_sms_editable': 'lockedRecipientSmsEditable',
'name': 'name',
'name_metadata': 'nameMetadata',
'notary_id': 'notaryId',
'note': 'note',
'note_metadata': 'noteMetadata',
'offline_attributes': 'offlineAttributes',
'phone_authentication': 'phoneAuthentication',
'phone_number': 'phoneNumber',
'proof_file': 'proofFile',
'recipient_attachments': 'recipientAttachments',
'recipient_authentication_status': 'recipientAuthenticationStatus',
'recipient_feature_metadata': 'recipientFeatureMetadata',
'recipient_id': 'recipientId',
'recipient_id_guid': 'recipientIdGuid',
'recipient_signature_providers': 'recipientSignatureProviders',
'recipient_supplies_tabs': 'recipientSuppliesTabs',
'recipient_type': 'recipientType',
'recipient_type_metadata': 'recipientTypeMetadata',
'require_id_lookup': 'requireIdLookup',
'require_id_lookup_metadata': 'requireIdLookupMetadata',
'require_signer_certificate': 'requireSignerCertificate',
'require_sign_on_paper': 'requireSignOnPaper',
'require_upload_signature': 'requireUploadSignature',
'role_name': 'roleName',
'routing_order': 'routingOrder',
'routing_order_metadata': 'routingOrderMetadata',
'sent_date_time': 'sentDateTime',
'signature_info': 'signatureInfo',
'signed_date_time': 'signedDateTime',
'sign_in_each_location': 'signInEachLocation',
'sign_in_each_location_metadata': 'signInEachLocationMetadata',
'signing_group_id': 'signingGroupId',
'signing_group_id_metadata': 'signingGroupIdMetadata',
'signing_group_name': 'signingGroupName',
'signing_group_users': 'signingGroupUsers',
'sms_authentication': 'smsAuthentication',
'social_authentications': 'socialAuthentications',
'status': 'status',
'status_code': 'statusCode',
'suppress_emails': 'suppressEmails',
'tabs': 'tabs',
'template_locked': 'templateLocked',
'template_required': 'templateRequired',
'total_tab_count': 'totalTabCount',
'user_id': 'userId'
}
def __init__(self, _configuration=None, **kwargs):
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._access_code = None
self._access_code_metadata = None
self._add_access_code_to_email = None
self._additional_notifications = None
self._agent_can_edit_email = None
self._agent_can_edit_name = None
self._allow_system_override_for_locked_recipient = None
self._auto_navigation = None
self._auto_responded_reason = None
self._bulk_recipients_uri = None
self._can_sign_offline = None
self._client_user_id = None
self._completed_count = None
self._creation_reason = None
self._custom_fields = None
self._declined_date_time = None
self._declined_reason = None
self._default_recipient = None
self._delivered_date_time = None
self._delivery_method = None
self._delivery_method_metadata = None
self._designator_id = None
self._designator_id_guid = None
self._document_visibility = None
self._email = None
self._email_metadata = None
self._email_notification = None
self._embedded_recipient_start_url = None
self._error_details = None
self._excluded_documents = None
self._fax_number = None
self._fax_number_metadata = None
self._first_name = None
self._first_name_metadata = None
self._full_name = None
self._full_name_metadata = None
self._id_check_configuration_name = None
self._id_check_configuration_name_metadata = None
self._id_check_information_input = None
self._identity_verification = None
self._inherit_email_notification_configuration = None
self._is_bulk_recipient = None
self._is_bulk_recipient_metadata = None
self._last_name = None
self._last_name_metadata = None
self._locked_recipient_phone_auth_editable = None
self._locked_recipient_sms_editable = None
self._name = None
self._name_metadata = None
self._notary_id = None
self._note = None
self._note_metadata = None
self._offline_attributes = None
self._phone_authentication = None
self._phone_number = None
self._proof_file = None
self._recipient_attachments = None
self._recipient_authentication_status = None
self._recipient_feature_metadata = None
self._recipient_id = None
self._recipient_id_guid = None
self._recipient_signature_providers = None
self._recipient_supplies_tabs = None
self._recipient_type = None
self._recipient_type_metadata = None
self._require_id_lookup = None
self._require_id_lookup_metadata = None
self._require_signer_certificate = None
self._require_sign_on_paper = None
self._require_upload_signature = None
self._role_name = None
self._routing_order = None
self._routing_order_metadata = None
self._sent_date_time = None
self._signature_info = None
self._signed_date_time = None
self._sign_in_each_location = None
self._sign_in_each_location_metadata = None
self._signing_group_id = None
self._signing_group_id_metadata = None
self._signing_group_name = None
self._signing_group_users = None
self._sms_authentication = None
self._social_authentications = None
self._status = None
self._status_code = None
self._suppress_emails = None
self._tabs = None
self._template_locked = None
self._template_required = None
self._total_tab_count = None
self._user_id = None
self.discriminator = None
setattr(self, "_{}".format('access_code'), kwargs.get('access_code', None))
setattr(self, "_{}".format('access_code_metadata'), kwargs.get('access_code_metadata', None))
setattr(self, "_{}".format('add_access_code_to_email'), kwargs.get('add_access_code_to_email', None))
setattr(self, "_{}".format('additional_notifications'), kwargs.get('additional_notifications', None))
setattr(self, "_{}".format('agent_can_edit_email'), kwargs.get('agent_can_edit_email', None))
setattr(self, "_{}".format('agent_can_edit_name'), kwargs.get('agent_can_edit_name', None))
setattr(self, "_{}".format('allow_system_override_for_locked_recipient'), kwargs.get('allow_system_override_for_locked_recipient', None))
setattr(self, "_{}".format('auto_navigation'), kwargs.get('auto_navigation', None))
setattr(self, "_{}".format('auto_responded_reason'), kwargs.get('auto_responded_reason', None))
setattr(self, "_{}".format('bulk_recipients_uri'), kwargs.get('bulk_recipients_uri', None))
setattr(self, "_{}".format('can_sign_offline'), kwargs.get('can_sign_offline', None))
setattr(self, "_{}".format('client_user_id'), kwargs.get('client_user_id', None))
setattr(self, "_{}".format('completed_count'), kwargs.get('completed_count', None))
setattr(self, "_{}".format('creation_reason'), kwargs.get('creation_reason', None))
setattr(self, "_{}".format('custom_fields'), kwargs.get('custom_fields', None))
setattr(self, "_{}".format('declined_date_time'), kwargs.get('declined_date_time', None))
setattr(self, "_{}".format('declined_reason'), kwargs.get('declined_reason', None))
setattr(self, "_{}".format('default_recipient'), kwargs.get('default_recipient', None))
setattr(self, "_{}".format('delivered_date_time'), kwargs.get('delivered_date_time', None))
setattr(self, "_{}".format('delivery_method'), kwargs.get('delivery_method', None))
setattr(self, "_{}".format('delivery_method_metadata'), kwargs.get('delivery_method_metadata', None))
setattr(self, "_{}".format('designator_id'), kwargs.get('designator_id', None))
setattr(self, "_{}".format('designator_id_guid'), kwargs.get('designator_id_guid', None))
setattr(self, "_{}".format('document_visibility'), kwargs.get('document_visibility', None))
setattr(self, "_{}".format('email'), kwargs.get('email', None))
setattr(self, "_{}".format('email_metadata'), kwargs.get('email_metadata', None))
setattr(self, "_{}".format('email_notification'), kwargs.get('email_notification', None))
setattr(self, "_{}".format('embedded_recipient_start_url'), kwargs.get('embedded_recipient_start_url', None))
setattr(self, "_{}".format('error_details'), kwargs.get('error_details', None))
setattr(self, "_{}".format('excluded_documents'), kwargs.get('excluded_documents', None))
setattr(self, "_{}".format('fax_number'), kwargs.get('fax_number', None))
setattr(self, "_{}".format('fax_number_metadata'), kwargs.get('fax_number_metadata', None))
setattr(self, "_{}".format('first_name'), kwargs.get('first_name', None))
setattr(self, "_{}".format('first_name_metadata'), kwargs.get('first_name_metadata', None))
setattr(self, "_{}".format('full_name'), kwargs.get('full_name', None))
setattr(self, "_{}".format('full_name_metadata'), kwargs.get('full_name_metadata', None))
setattr(self, "_{}".format('id_check_configuration_name'), kwargs.get('id_check_configuration_name', None))
setattr(self, "_{}".format('id_check_configuration_name_metadata'), kwargs.get('id_check_configuration_name_metadata', None))
setattr(self, "_{}".format('id_check_information_input'), kwargs.get('id_check_information_input', None))
setattr(self, "_{}".format('identity_verification'), kwargs.get('identity_verification', None))
setattr(self, "_{}".format('inherit_email_notification_configuration'), kwargs.get('inherit_email_notification_configuration', None))
setattr(self, "_{}".format('is_bulk_recipient'), kwargs.get('is_bulk_recipient', None))
setattr(self, "_{}".format('is_bulk_recipient_metadata'), kwargs.get('is_bulk_recipient_metadata', None))
setattr(self, "_{}".format('last_name'), kwargs.get('last_name', None))
setattr(self, "_{}".format('last_name_metadata'), kwargs.get('last_name_metadata', None))
setattr(self, "_{}".format('locked_recipient_phone_auth_editable'), kwargs.get('locked_recipient_phone_auth_editable', None))
setattr(self, "_{}".format('locked_recipient_sms_editable'), kwargs.get('locked_recipient_sms_editable', None))
setattr(self, "_{}".format('name'), kwargs.get('name', None))
setattr(self, "_{}".format('name_metadata'), kwargs.get('name_metadata', None))
setattr(self, "_{}".format('notary_id'), kwargs.get('notary_id', None))
setattr(self, "_{}".format('note'), kwargs.get('note', None))
setattr(self, "_{}".format('note_metadata'), kwargs.get('note_metadata', None))
setattr(self, "_{}".format('offline_attributes'), kwargs.get('offline_attributes', None))
setattr(self, "_{}".format('phone_authentication'), kwargs.get('phone_authentication', None))
setattr(self, "_{}".format('phone_number'), kwargs.get('phone_number', None))
setattr(self, "_{}".format('proof_file'), kwargs.get('proof_file', None))
setattr(self, "_{}".format('recipient_attachments'), kwargs.get('recipient_attachments', None))
setattr(self, "_{}".format('recipient_authentication_status'), kwargs.get('recipient_authentication_status', None))
setattr(self, "_{}".format('recipient_feature_metadata'), kwargs.get('recipient_feature_metadata', None))
setattr(self, "_{}".format('recipient_id'), kwargs.get('recipient_id', None))
setattr(self, "_{}".format('recipient_id_guid'), kwargs.get('recipient_id_guid', None))
setattr(self, "_{}".format('recipient_signature_providers'), kwargs.get('recipient_signature_providers', None))
setattr(self, "_{}".format('recipient_supplies_tabs'), kwargs.get('recipient_supplies_tabs', None))
setattr(self, "_{}".format('recipient_type'), kwargs.get('recipient_type', None))
setattr(self, "_{}".format('recipient_type_metadata'), kwargs.get('recipient_type_metadata', None))
setattr(self, "_{}".format('require_id_lookup'), kwargs.get('require_id_lookup', None))
setattr(self, "_{}".format('require_id_lookup_metadata'), kwargs.get('require_id_lookup_metadata', None))
setattr(self, "_{}".format('require_signer_certificate'), kwargs.get('require_signer_certificate', None))
setattr(self, "_{}".format('require_sign_on_paper'), kwargs.get('require_sign_on_paper', None))
setattr(self, "_{}".format('require_upload_signature'), kwargs.get('require_upload_signature', None))
setattr(self, "_{}".format('role_name'), kwargs.get('role_name', None))
setattr(self, "_{}".format('routing_order'), kwargs.get('routing_order', None))
setattr(self, "_{}".format('routing_order_metadata'), kwargs.get('routing_order_metadata', None))
setattr(self, "_{}".format('sent_date_time'), kwargs.get('sent_date_time', None))
setattr(self, "_{}".format('signature_info'), kwargs.get('signature_info', None))
setattr(self, "_{}".format('signed_date_time'), kwargs.get('signed_date_time', None))
setattr(self, "_{}".format('sign_in_each_location'), kwargs.get('sign_in_each_location', None))
setattr(self, "_{}".format('sign_in_each_location_metadata'), kwargs.get('sign_in_each_location_metadata', None))
setattr(self, "_{}".format('signing_group_id'), kwargs.get('signing_group_id', None))
setattr(self, "_{}".format('signing_group_id_metadata'), kwargs.get('signing_group_id_metadata', None))
setattr(self, "_{}".format('signing_group_name'), kwargs.get('signing_group_name', None))
setattr(self, "_{}".format('signing_group_users'), kwargs.get('signing_group_users', None))
setattr(self, "_{}".format('sms_authentication'), kwargs.get('sms_authentication', None))
setattr(self, "_{}".format('social_authentications'), kwargs.get('social_authentications', None))
setattr(self, "_{}".format('status'), kwargs.get('status', None))
setattr(self, "_{}".format('status_code'), kwargs.get('status_code', None))
setattr(self, "_{}".format('suppress_emails'), kwargs.get('suppress_emails', None))
setattr(self, "_{}".format('tabs'), kwargs.get('tabs', None))
setattr(self, "_{}".format('template_locked'), kwargs.get('template_locked', None))
setattr(self, "_{}".format('template_required'), kwargs.get('template_required', None))
setattr(self, "_{}".format('total_tab_count'), kwargs.get('total_tab_count', None))
setattr(self, "_{}".format('user_id'), kwargs.get('user_id', None))
@property
def access_code(self):
return self._access_code
@access_code.setter
def access_code(self, access_code):
self._access_code = access_code
@property
def access_code_metadata(self):
return self._access_code_metadata
@access_code_metadata.setter
def access_code_metadata(self, access_code_metadata):
self._access_code_metadata = access_code_metadata
@property
def add_access_code_to_email(self):
return self._add_access_code_to_email
@add_access_code_to_email.setter
def add_access_code_to_email(self, add_access_code_to_email):
self._add_access_code_to_email = add_access_code_to_email
@property
def additional_notifications(self):
return self._additional_notifications
@additional_notifications.setter
def additional_notifications(self, additional_notifications):
self._additional_notifications = additional_notifications
@property
def agent_can_edit_email(self):
return self._agent_can_edit_email
@agent_can_edit_email.setter
def agent_can_edit_email(self, agent_can_edit_email):
self._agent_can_edit_email = agent_can_edit_email
@property
def agent_can_edit_name(self):
return self._agent_can_edit_name
@agent_can_edit_name.setter
def agent_can_edit_name(self, agent_can_edit_name):
self._agent_can_edit_name = agent_can_edit_name
@property
def allow_system_override_for_locked_recipient(self):
return self._allow_system_override_for_locked_recipient
@allow_system_override_for_locked_recipient.setter
def allow_system_override_for_locked_recipient(self, allow_system_override_for_locked_recipient):
self._allow_system_override_for_locked_recipient = allow_system_override_for_locked_recipient
@property
def auto_navigation(self):
return self._auto_navigation
@auto_navigation.setter
def auto_navigation(self, auto_navigation):
self._auto_navigation = auto_navigation
@property
def auto_responded_reason(self):
return self._auto_responded_reason
@auto_responded_reason.setter
def auto_responded_reason(self, auto_responded_reason):
self._auto_responded_reason = auto_responded_reason
@property
def bulk_recipients_uri(self):
return self._bulk_recipients_uri
@bulk_recipients_uri.setter
def bulk_recipients_uri(self, bulk_recipients_uri):
self._bulk_recipients_uri = bulk_recipients_uri
@property
def can_sign_offline(self):
return self._can_sign_offline
@can_sign_offline.setter
def can_sign_offline(self, can_sign_offline):
self._can_sign_offline = can_sign_offline
@property
def client_user_id(self):
return self._client_user_id
@client_user_id.setter
|
MIT License
|
opendilab/di-star
|
ctools/torch_utils/network/activation.py
|
GLU.__init__
|
python
|
def __init__(self, input_dim, output_dim, context_dim, input_type='fc'):
super(GLU, self).__init__()
assert (input_type in ['fc', 'conv2d'])
if input_type == 'fc':
self.layer1 = nn.Linear(context_dim, input_dim)
self.layer2 = nn.Linear(input_dim, output_dim)
elif input_type == 'conv2d':
self.layer1 = nn.Conv2d(context_dim, input_dim, 1, 1, 0)
self.layer2 = nn.Conv2d(input_dim, output_dim, 1, 1, 0)
|
r"""
Overview:
Init glu
Arguments:
- input_dim (:obj:`int`): the input dimension
- output_dim (:obj:`int`): the output dimension
- context_dim (:obj:`int`): the context dimension
- input_type (:obj:`str`): the type of input, now support ['fc', 'conv2d']
|
https://github.com/opendilab/di-star/blob/f12d79403488e7df0498d7b116fc23a67506112b/ctools/torch_utils/network/activation.py#L32-L49
|
import torch
import torch.nn as nn
class GLU(nn.Module):
|
Apache License 2.0
|
rpotter12/whatsapp-play
|
tests/test_func.py
|
safe_rmpath
|
python
|
def safe_rmpath(path):
def retry_fun(fun):
stop_at = time.time() + 1
while time.time() < stop_at:
try:
return fun()
except FileNotFoundError:
pass
try:
st = os.stat(path)
if stat.S_ISDIR(st.st_mode):
fun = functools.partial(shutil.rmtree, path)
else:
fun = functools.partial(os.remove, path)
if POSIX:
fun()
else:
retry_fun(fun)
except FileNotFoundError:
pass
|
Convenience function for removing temporary test files or dirs
|
https://github.com/rpotter12/whatsapp-play/blob/f925c7ac84f8cf4a962788cf30712073b955255b/tests/test_func.py#L130-L155
|
import functools
import os
import shutil
import stat
import subprocess
import sys
import time
import psutil
from psutil import MACOS
from psutil import POSIX
from psutil import WINDOWS
from psutil._compat import which
__all__ = [
'DEVNULL' , 'PYTHON_EXE', 'TESTFILE_PREFIX' , 'TESTFN',
'get_test_subprocess',
'safe_rmpath' ,
'wait_for_pid', 'wait_for_file',
]
TESTFILE_PREFIX = '$testfn'
if os.name == 'java':
TESTFILE_PREFIX = '$psutil-test-'
else:
TESTFILE_PREFIX = '@psutil-test-'
TESTFN = os.path.join(os.path.realpath(os.getcwd()), TESTFILE_PREFIX)
TESTFN = TESTFN + str(os.getpid())
_TESTFN = TESTFN + '-internal'
def _get_py_exe():
def attempt(exe):
try:
subprocess.check_call(
[exe, "-V"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception:
return None
else:
return exe
if MACOS:
exe = attempt(sys.executable) or attempt(os.path.realpath(sys.executable)) or attempt(which("python%s.%s" % sys.version_info[:2])) or attempt(psutil.Process().exe())
if not exe:
raise ValueError("can't find python exe real abspath")
return exe
else:
exe = os.path.realpath(sys.executable)
assert os.path.exists(exe), exe
return exe
PYTHON_EXE = _get_py_exe()
DEVNULL = open(os.devnull, 'r+')
_subprocesses_started = set()
def get_test_subprocess(cmd=None, **kwds):
kwds.setdefault("stdin", DEVNULL)
kwds.setdefault("stdout", DEVNULL)
kwds.setdefault("cwd", os.getcwd())
kwds.setdefault("env", os.environ)
if WINDOWS:
CREATE_NO_WINDOW = 0x8000000
kwds.setdefault("creationflags", CREATE_NO_WINDOW)
if cmd is None:
safe_rmpath(_TESTFN)
pyline = "from time import sleep;" "open(r'%s', 'w').close();" "sleep(30);" % _TESTFN
cmd = [PYTHON_EXE, "-c", pyline]
sproc = subprocess.Popen(cmd, **kwds)
_subprocesses_started.add(sproc)
else:
sproc = subprocess.Popen(cmd, **kwds)
_subprocesses_started.add(sproc)
wait_for_pid(sproc.pid)
return sproc
def wait_for_pid(pid):
psutil.Process(pid)
if WINDOWS:
time.sleep(0.01)
def wait_for_file(fname, delete=True, empty=False):
with open(fname, "rb") as f:
data = f.read()
if not empty:
assert data
if delete:
safe_rmpath(fname)
return data
|
MIT License
|
happyleavesaoc/python-snapcast
|
snapcast/control/server.py
|
Snapserver.client
|
python
|
def client(self, client_identifier):
return self._clients[client_identifier]
|
Get a client.
|
https://github.com/happyleavesaoc/python-snapcast/blob/891ebb6001ad98f668668a77716fdeccb9a8cca1/snapcast/control/server.py#L203-L205
|
import asyncio
import logging
from packaging import version
from snapcast.control.client import Snapclient
from snapcast.control.group import Snapgroup
from snapcast.control.protocol import SERVER_ONDISCONNECT, SnapcastProtocol
from snapcast.control.stream import Snapstream
_LOGGER = logging.getLogger(__name__)
CONTROL_PORT = 1705
SERVER_GETSTATUS = 'Server.GetStatus'
SERVER_GETRPCVERSION = 'Server.GetRPCVersion'
SERVER_DELETECLIENT = 'Server.DeleteClient'
SERVER_ONUPDATE = 'Server.OnUpdate'
CLIENT_GETSTATUS = 'Client.GetStatus'
CLIENT_SETNAME = 'Client.SetName'
CLIENT_SETLATENCY = 'Client.SetLatency'
CLIENT_SETSTREAM = 'Client.SetStream'
CLIENT_SETVOLUME = 'Client.SetVolume'
CLIENT_ONCONNECT = 'Client.OnConnect'
CLIENT_ONDISCONNECT = 'Client.OnDisconnect'
CLIENT_ONVOLUMECHANGED = 'Client.OnVolumeChanged'
CLIENT_ONLATENCYCHANGED = 'Client.OnLatencyChanged'
CLIENT_ONNAMECHANGED = 'Client.OnNameChanged'
GROUP_GETSTATUS = 'Group.GetStatus'
GROUP_SETMUTE = 'Group.SetMute'
GROUP_SETSTREAM = 'Group.SetStream'
GROUP_SETCLIENTS = 'Group.SetClients'
GROUP_SETNAME = 'Group.SetName'
GROUP_ONMUTE = 'Group.OnMute'
GROUP_ONSTREAMCHANGED = 'Group.OnStreamChanged'
STREAM_SETMETA = 'Stream.SetMeta'
STREAM_ONUPDATE = 'Stream.OnUpdate'
STREAM_ONMETA = 'Stream.OnMetadata'
SERVER_RECONNECT_DELAY = 5
_EVENTS = [SERVER_ONUPDATE, CLIENT_ONVOLUMECHANGED, CLIENT_ONLATENCYCHANGED,
CLIENT_ONNAMECHANGED, CLIENT_ONCONNECT, CLIENT_ONDISCONNECT,
GROUP_ONMUTE, GROUP_ONSTREAMCHANGED, STREAM_ONUPDATE, STREAM_ONMETA]
_METHODS = [SERVER_GETSTATUS, SERVER_GETRPCVERSION, SERVER_DELETECLIENT,
SERVER_DELETECLIENT, CLIENT_GETSTATUS, CLIENT_SETNAME,
CLIENT_SETLATENCY, CLIENT_SETSTREAM, CLIENT_SETVOLUME,
GROUP_GETSTATUS, GROUP_SETMUTE, GROUP_SETSTREAM, GROUP_SETCLIENTS,
GROUP_SETNAME, STREAM_SETMETA]
_VERSIONS = {
GROUP_SETNAME: '0.16.0',
}
class ServerVersionError(NotImplementedError):
pass
class Snapserver(object):
def __init__(self, loop, host, port=CONTROL_PORT, reconnect=False):
self._loop = loop
self._port = port
self._reconnect = reconnect
self._clients = {}
self._streams = {}
self._groups = {}
self._host = host
self._version = None
self._protocol = None
self._callbacks = {
CLIENT_ONCONNECT: self._on_client_connect,
CLIENT_ONDISCONNECT: self._on_client_disconnect,
CLIENT_ONVOLUMECHANGED: self._on_client_volume_changed,
CLIENT_ONNAMECHANGED: self._on_client_name_changed,
CLIENT_ONLATENCYCHANGED: self._on_client_latency_changed,
GROUP_ONMUTE: self._on_group_mute,
GROUP_ONSTREAMCHANGED: self._on_group_stream_changed,
STREAM_ONMETA: self._on_stream_meta,
STREAM_ONUPDATE: self._on_stream_update,
SERVER_ONDISCONNECT: self._on_server_disconnect,
SERVER_ONUPDATE: self._on_server_update
}
self._on_update_callback_func = None
self._on_connect_callback_func = None
self._on_disconnect_callback_func = None
self._new_client_callback_func = None
@asyncio.coroutine
def start(self):
yield from self._do_connect()
_LOGGER.info('connected to snapserver on %s:%s', self._host, self._port)
status = yield from self.status()
self.synchronize(status)
self._on_server_connect()
@asyncio.coroutine
def _do_connect(self):
_, self._protocol = yield from self._loop.create_connection(
lambda: SnapcastProtocol(self._callbacks), self._host, self._port)
def _reconnect_cb(self):
@asyncio.coroutine
def try_reconnect():
try:
yield from self._do_connect()
except IOError:
self._loop.call_later(SERVER_RECONNECT_DELAY,
self._reconnect_cb)
asyncio.ensure_future(try_reconnect())
@asyncio.coroutine
def _transact(self, method, params=None):
result = yield from self._protocol.request(method, params)
return result
@property
def version(self):
return self._version
@asyncio.coroutine
def status(self):
result = yield from self._transact(SERVER_GETSTATUS)
return result
def rpc_version(self):
return self._transact(SERVER_GETRPCVERSION)
@asyncio.coroutine
def delete_client(self, identifier):
params = {'id': identifier}
response = yield from self._transact(SERVER_DELETECLIENT, params)
self.synchronize(response)
def client_name(self, identifier, name):
return self._request(CLIENT_SETNAME, identifier, 'name', name)
def client_latency(self, identifier, latency):
return self._request(CLIENT_SETLATENCY, identifier, 'latency', latency)
def client_volume(self, identifier, volume):
return self._request(CLIENT_SETVOLUME, identifier, 'volume', volume)
def client_status(self, identifier):
return self._request(CLIENT_GETSTATUS, identifier, 'client')
def group_status(self, identifier):
return self._request(GROUP_GETSTATUS, identifier, 'group')
def group_mute(self, identifier, status):
return self._request(GROUP_SETMUTE, identifier, 'mute', status)
def group_stream(self, identifier, stream_id):
return self._request(GROUP_SETSTREAM, identifier, 'stream_id', stream_id)
def group_clients(self, identifier, clients):
return self._request(GROUP_SETCLIENTS, identifier, 'clients', clients)
def group_name(self, identifier, name):
self._version_check(GROUP_SETNAME)
return self._request(GROUP_SETNAME, identifier, 'name', name)
def stream_setmeta(self, identifier, meta):
return self._request(STREAM_SETMETA, identifier, 'meta', meta)
def group(self, group_identifier):
return self._groups[group_identifier]
def stream(self, stream_identifier):
return self._streams[stream_identifier]
|
MIT License
|
tobkra96/music_led_strip_control
|
server/libs/dsp.py
|
Melbank.compute_melmat
|
python
|
def compute_melmat(self, num_mel_bands=12, freq_min=64, freq_max=8000,
num_fft_bands=513, sample_rate=16000):
center_frequencies_mel, lower_edges_mel, upper_edges_mel = self.melfrequencies_mel_filterbank(
num_mel_bands,
freq_min,
freq_max,
num_fft_bands
)
center_frequencies_hz = self.mel_to_hertz(center_frequencies_mel)
lower_edges_hz = self.mel_to_hertz(lower_edges_mel)
upper_edges_hz = self.mel_to_hertz(upper_edges_mel)
freqs = linspace(0.0, sample_rate / 2.0, num_fft_bands)
melmat = zeros((num_mel_bands, num_fft_bands))
for imelband, (center, lower, upper) in enumerate(zip(
center_frequencies_hz, lower_edges_hz, upper_edges_hz)):
left_slope = (freqs >= lower) == (freqs <= center)
melmat[imelband, left_slope] = (
(freqs[left_slope] - lower) / (center - lower)
)
right_slope = (freqs >= center) == (freqs <= upper)
melmat[imelband, right_slope] = (
(upper - freqs[right_slope]) / (upper - center)
)
return melmat, (center_frequencies_mel, freqs)
|
Returns tranformation matrix for mel spectrum.
Parameters
----------
num_mel_bands : int
Number of mel bands. Number of rows in melmat.
Default: 24
freq_min : scalar
Minimum frequency for the first band.
Default: 64
freq_max : scalar
Maximum frequency for the last band.
Default: 8000
num_fft_bands : int
Number of fft-frequenc bands. This ist NFFT/2+1 !
number of columns in melmat.
Default: 513 (this means NFFT=1024)
sample_rate : scalar
Sample rate for the signals that will be used.
Default: 44100
Returns
-------
melmat : ndarray
Transformation matrix for the mel spectrum.
Use this with fft spectra of num_fft_bands_bands length
and multiply the spectrum with the melmat
this will tranform your fft-spectrum
to a mel-spectrum.
frequencies : tuple (ndarray <num_mel_bands>, ndarray <num_fft_bands>)
Center frequencies of the mel bands, center frequencies of fft spectrum.
|
https://github.com/tobkra96/music_led_strip_control/blob/33d6e9ca3c5cc699f1f700e6bf00727fa2f65f3c/server/libs/dsp.py#L239-L297
|
from scipy.ndimage.filters import gaussian_filter1d
from numpy import abs, arange, linspace, zeros
from math import log
import numpy as np
class DSP():
def __init__(self, config, device_config=None):
self._config = config
self._device_config = device_config
n_fft_bins = self._config["general_settings"]["n_fft_bins"]
min_volume_threshold = self._config["general_settings"]["min_volume_threshold"]
frames_per_buffer = self._config["general_settings"]["frames_per_buffer"]
n_rolling_history = self._config["general_settings"]["n_rolling_history"]
if device_config is None:
led_count = 200
else:
led_count = self._device_config["led_count"]
self.fft_plot_filter = ExpFilter(np.tile(1e-1, n_fft_bins), alpha_decay=0.5, alpha_rise=0.99)
self.mel_gain = ExpFilter(np.tile(1e-1, n_fft_bins), alpha_decay=0.01, alpha_rise=0.99)
self.mel_smoothing = ExpFilter(np.tile(1e-1, n_fft_bins), alpha_decay=0.5, alpha_rise=0.99)
self.gain = ExpFilter(np.tile(0.01, n_fft_bins), alpha_decay=0.001, alpha_rise=0.99)
self.r_filt = ExpFilter(np.tile(0.01, led_count // 2), alpha_decay=0.2, alpha_rise=0.99)
self.g_filt = ExpFilter(np.tile(0.01, led_count // 2), alpha_decay=0.05, alpha_rise=0.3)
self.b_filt = ExpFilter(np.tile(0.01, led_count // 2), alpha_decay=0.1, alpha_rise=0.5)
self.common_mode = ExpFilter(np.tile(0.01, led_count // 2), alpha_decay=0.99, alpha_rise=0.01)
self.p_filt = ExpFilter(np.tile(1, (3, led_count // 2)), alpha_decay=0.1, alpha_rise=0.99)
self.volume = ExpFilter(min_volume_threshold, alpha_decay=0.02, alpha_rise=0.02)
self.p = np.tile(1.0, (3, led_count // 2))
self.samples_per_frame = int(frames_per_buffer)
self.y_roll = np.random.rand(n_rolling_history, self.samples_per_frame) / 1e16
self.fft_window = np.hamming(int(frames_per_buffer) * n_rolling_history)
self.samples = None
self.mel_y = None
self.mel_x = None
self.melbank = Melbank()
self.create_mel_bank()
def update(self, audio_samples):
min_frequency = self._config["general_settings"]["min_frequency"]
max_frequency = self._config["general_settings"]["max_frequency"]
audio_data = {}
y = audio_samples / 2.0**15
self.y_roll[:-1] = self.y_roll[1:]
self.y_roll[-1, :] = np.copy(y)
y_data = np.concatenate(self.y_roll, axis=0).astype(np.float32)
vol = np.max(np.abs(y_data))
N = len(y_data)
N_zeros = 2**int(np.ceil(np.log2(N))) - N
y_data *= self.fft_window
y_padded = np.pad(y_data, (0, N_zeros), mode='constant')
YS = np.abs(np.fft.rfft(y_padded)[:N // 2])
mel = np.atleast_2d(YS).T * self.mel_y.T
mel = np.sum(mel, axis=0)
mel = mel**2.0
self.mel_gain.update(np.max(gaussian_filter1d(mel, sigma=1.0)))
mel /= self.mel_gain.value
mel = self.mel_smoothing.update(mel)
x = np.linspace(min_frequency, max_frequency, len(mel))
y = self.fft_plot_filter.update(mel)
audio_data["mel"] = mel
audio_data["vol"] = vol
audio_data["x"] = x
audio_data["y"] = y
return audio_data
def rfft(self, data, window=None):
default_sample_rate = self._config["general_settings"]["default_sample_rate"]
window = 1.0 if window is None else window(len(data))
ys = np.abs(np.fft.rfft(data * window))
xs = np.fft.rfftfreq(len(data), 1.0 / default_sample_rate)
return xs, ys
def fft(self, data, window=None):
default_sample_rate = self._config["general_settings"]["default_sample_rate"]
window = 1.0 if window is None else window(len(data))
ys = np.fft.fft(data * window)
xs = np.fft.fftfreq(len(data), 1.0 / default_sample_rate)
return xs, ys
def create_mel_bank(self):
default_sample_rate = self._config["general_settings"]["default_sample_rate"]
min_frequency = self._config["general_settings"]["min_frequency"]
max_frequency = self._config["general_settings"]["max_frequency"]
frames_per_buffer = self._config["general_settings"]["frames_per_buffer"]
n_rolling_history = self._config["general_settings"]["n_rolling_history"]
n_fft_bins = self._config["general_settings"]["n_fft_bins"]
samples = int(frames_per_buffer * (n_rolling_history / 2))
self.mel_y, (_, self.mel_x) = self.melbank.compute_melmat(
num_mel_bands=n_fft_bins,
freq_min=min_frequency,
freq_max=max_frequency,
num_fft_bands=samples,
sample_rate=default_sample_rate
)
class ExpFilter():
def __init__(self, val=0.0, alpha_decay=0.5, alpha_rise=0.5):
assert 0.0 < alpha_decay < 1.0, 'Invalid decay smoothing factor.'
assert 0.0 < alpha_rise < 1.0, 'Invalid rise smoothing factor.'
self.alpha_decay = alpha_decay
self.alpha_rise = alpha_rise
self.value = val
def update(self, value):
if isinstance(self.value, (list, np.ndarray, tuple)):
alpha = value - self.value
alpha[alpha > 0.0] = self.alpha_rise
alpha[alpha <= 0.0] = self.alpha_decay
else:
alpha = self.alpha_rise if value > self.value else self.alpha_decay
self.value = alpha * value + (1.0 - alpha) * self.value
return self.value
class Melbank():
def hertz_to_mel(self, freq):
return 3340.0 * log(1 + (freq / 250.0), 9)
def mel_to_hertz(self, mel):
return 250.0 * (9**(mel / 3340.0)) - 250.0
def melfrequencies_mel_filterbank(self, num_bands, freq_min, freq_max, num_fft_bands):
mel_max = self.hertz_to_mel(freq_max)
mel_min = self.hertz_to_mel(freq_min)
delta_mel = abs(mel_max - mel_min) / (num_bands + 1.0)
frequencies_mel = mel_min + delta_mel * arange(0, num_bands + 2)
lower_edges_mel = frequencies_mel[:-2]
upper_edges_mel = frequencies_mel[2:]
center_frequencies_mel = frequencies_mel[1:-1]
return center_frequencies_mel, lower_edges_mel, upper_edges_mel
|
MIT License
|
adrobinoga/pyzatt
|
pyzatt/zkmodules/terminal.py
|
TerminalMixin.get_firmware_version
|
python
|
def get_firmware_version(self):
self.send_command(DEFS.CMD_GET_VERSION)
self.recv_reply()
return self.last_payload_data.decode('ascii')
|
Returns the firmware version.
:return: String.
|
https://github.com/adrobinoga/pyzatt/blob/dc30714ed641388f53537319f6c0e7bd8dba544a/pyzatt/zkmodules/terminal.py#L240-L248
|
import socket
import struct
import pyzatt.zkmodules.defs as DEFS
import pyzatt.misc as misc
class TerminalMixin:
def connect_net(self, ip_addr, dev_port):
self.soc_zk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.soc_zk.connect((ip_addr, dev_port))
self.send_command(DEFS.CMD_CONNECT)
self.recv_reply()
self.session_id = self.last_session_code
self.set_device_info('SDKBuild', '1')
self.connected_flg = self.recvd_ack()
return self.connected_flg
def disconnect(self):
self.send_command(DEFS.CMD_EXIT)
self.recv_reply()
self.soc_zk.close()
self.connected_flg = False
return self.recvd_ack()
def get_device_time(self):
self.send_command(DEFS.CMD_GET_TIME)
self.recv_reply()
return misc.decode_time(self.last_payload_data)
def set_device_time(self, t=misc.datetime.datetime.now()):
self.send_command(DEFS.CMD_SET_TIME, data=misc.encode_time(t))
self.recv_reply()
return self.recvd_ack()
def get_device_status(self, stat_keys):
self.send_command(DEFS.CMD_GET_FREE_SIZES)
self.recv_reply()
self.dev_status = self.last_payload_data
for k in stat_keys:
try:
stat_keys[k] = self.read_status(DEFS.STATUS[k])
except struct.error:
print("Failed to read field: {0}".format(k))
stat_keys[k] = -1
return stat_keys
def read_status(self, p):
return struct.unpack('<I', self.dev_status[p: p + 4])[0]
def read_attlog_count(self):
return self.read_status(DEFS.STATUS['attlog_count'])
def read_user_count(self):
return self.read_status(DEFS.STATUS['user_count'])
def get_device_info(self, param_name):
self.send_command(DEFS.CMD_OPTIONS_RRQ,
bytearray("{0}\x00".format(param_name), 'ascii'))
self.recv_reply()
return self.last_payload_data.decode('ascii').split('=')[-1]
def set_device_info(self, param_name, new_value):
self.send_command(DEFS.CMD_OPTIONS_WRQ, bytearray(
"{0}={1}\x00".format(param_name, new_value), 'ascii'))
self.recv_reply()
ack1 = self.recvd_ack()
self.send_command(DEFS.CMD_REFRESHOPTION)
self.recv_reply()
ack2 = self.recvd_ack()
return ack1 and ack2
def get_serial_number(self):
return self.get_device_info("~SerialNumber")
def get_product_code(self):
return self.get_device_info("~DeviceName")
def get_cardfun(self):
self.get_device_info("~IsOnlyRFMachine")
return self.get_device_info("~RFCardOn")
def get_vendor(self):
return self.get_device_info("~OEMVendor")
def get_product_time(self):
return self.get_device_info("~ProductTime")
def get_platform(self):
return self.get_device_info("~Platform")
def get_pinwidth(self):
return int(self.get_device_info('~PIN2Width').replace('\x00', ''))
|
MIT License
|
attzonko/mmpy_bot
|
mmpy_bot/threadpool.py
|
ThreadPool.__init__
|
python
|
def __init__(self, num_workers: int):
self.num_workers = num_workers
self.alive = False
self._queue = Queue()
self._busy_workers = Queue()
self._threads = []
|
Threadpool class to easily specify a number of worker threads and assign work
to any of them.
Arguments:
- num_workers: int, how many threads to run simultaneously.
|
https://github.com/attzonko/mmpy_bot/blob/2c8d8c3f08c9a006186a34ce69239509400301aa/mmpy_bot/threadpool.py#L14-L25
|
import asyncio
import logging
import threading
import time
from queue import Queue
from mmpy_bot.scheduler import default_scheduler
from mmpy_bot.webhook_server import WebHookServer
log = logging.getLogger("mmpy.threadpool")
class ThreadPool(object):
|
MIT License
|
tensorpack/tensorpack
|
tensorpack/callbacks/monitor.py
|
Monitors.put_scalar
|
python
|
def put_scalar(self, name, val):
if isinstance(val, np.floating):
val = float(val)
if isinstance(val, np.integer):
val = int(val)
self._dispatch(lambda m: m.process_scalar(name, val))
s = create_scalar_summary(name, val)
self._dispatch(lambda m: m.process_summary(s))
|
Put a scalar.
|
https://github.com/tensorpack/tensorpack/blob/1a79d595f7eda9dc9dc8428f4461680ed2222ab6/tensorpack/callbacks/monitor.py#L163-L173
|
import json
import numpy as np
import operator
import os
import re
import shutil
import time
from collections import defaultdict
from datetime import datetime
import six
import threading
from ..compat import tfv1 as tf
from ..libinfo import __git_version__
from ..tfutils.summary import create_image_summary, create_scalar_summary
from ..utils import fs, logger
from ..utils.develop import HIDE_DOC
from .base import Callback
__all__ = ['MonitorBase', 'Monitors',
'TFEventWriter', 'JSONWriter',
'ScalarPrinter', 'SendMonitorData',
'CometMLMonitor']
def image_to_nhwc(arr):
if arr.ndim == 4:
pass
elif arr.ndim == 3:
if arr.shape[-1] in [1, 3, 4]:
arr = arr[np.newaxis, :]
else:
arr = arr[:, :, :, np.newaxis]
elif arr.ndim == 2:
arr = arr[np.newaxis, :, :, np.newaxis]
else:
raise ValueError("Array of shape {} is not an image!".format(arr.shape))
return arr
class MonitorBase(Callback):
_chief_only = False
def setup_graph(self, trainer):
self.trainer = trainer
self.graph = tf.get_default_graph()
self._setup_graph()
def _setup_graph(self):
pass
def process_summary(self, summary):
pass
def process(self, name, val):
pass
def process_scalar(self, name, val):
pass
def process_image(self, name, val):
pass
def process_event(self, evt):
pass
class NoOpMonitor(MonitorBase):
def __init__(self, name=None):
self._name = name
def __str__(self):
if self._name is None:
return "NoOpMonitor"
return "NoOpMonitor({})".format(self._name)
class Monitors(Callback):
_chief_only = False
def __init__(self, monitors):
self._scalar_history = ScalarHistory()
self._monitors = monitors + [self._scalar_history]
for m in self._monitors:
assert isinstance(m, MonitorBase), m
def _setup_graph(self):
self._scalar_history.setup_graph(self.trainer)
def _dispatch(self, func):
for m in self._monitors:
func(m)
def put_summary(self, summary):
if isinstance(summary, six.binary_type):
summary = tf.Summary.FromString(summary)
assert isinstance(summary, tf.Summary), type(summary)
for val in summary.value:
if val.WhichOneof('value') == 'simple_value':
val.tag = re.sub('tower[0-9]+/', '', val.tag)
suffix = '-summary'
if val.tag.endswith(suffix):
val.tag = val.tag[:-len(suffix)]
self._dispatch(lambda m: m.process_scalar(val.tag, val.simple_value))
self._dispatch(lambda m: m.process_summary(summary))
|
Apache License 2.0
|
bashfuscator/bashfuscator
|
bashfuscator/core/mutators/command_obfuscator.py
|
Stub.genStub
|
python
|
def genStub(self, userCmd):
if self.escapeQuotes:
userCmd = userCmd.replace("'", "'\"'\"'")
genStub = self.stub
for var in re.findall(r"VAR\d+", genStub):
genStub = genStub.replace(var, self.randGen.randGenVar())
genStub = self.mangler.getMangledLine(genStub)
if "CMD" not in genStub:
printError(f"Stub '{self.name}' is improperly formatted: no 'CMD' string found")
else:
genStub = genStub.replace("CMD", userCmd)
return genStub
|
Generate a valid deobfuscation stub and wrap an obfuscated
command in it.
:param userCmd: command that need to be wrapped in a
deobfuscation stub
:type userCmd: str
|
https://github.com/bashfuscator/bashfuscator/blob/7487348da2d0112213f8540ae28bf12b652f924a/bashfuscator/core/mutators/command_obfuscator.py#L86-L110
|
import re
from bashfuscator.common.messages import printError
from bashfuscator.core.mutators.mutator import Mutator
class CommandObfuscator(Mutator):
def __init__(self, name, description, sizeRating, timeRating, notes=None, author=None, credits=None, evalWrap=True, unreadableOutput=False, reversible=False):
super().__init__(name, "command", description, sizeRating, timeRating, notes, author, credits, evalWrap, unreadableOutput)
self.reversible = reversible
self.stubs = []
self.deobStub = None
class Stub(object):
def __init__(self, name, sizeRating, timeRating, binariesUsed, fileWrite, escapeQuotes, stub):
self.name = name
self.longName = self.name.replace(" ", "_").lower()
self.sizeRating = sizeRating
self.timeRating = timeRating
self.binariesUsed = binariesUsed
self.fileWrite = fileWrite
self.escapeQuotes = escapeQuotes
self.stub = stub
self.mangler = None
self.randGen = None
|
MIT License
|
nowsecure/datagrid-gtk3
|
datagrid_gtk3/ui/grid.py
|
OptionsPopup.on_combo_view_changed
|
python
|
def on_combo_view_changed(self, widget):
model = widget.get_model()
value = model[widget.get_active()][1]
self.emit('view-changed', value)
self.popdown()
|
Handle changes on the view combo.
Emit 'view-changed' for the given view.
:param widget: the combobox that received the event
:type widget: :class:`Gtk.ComboBox`
|
https://github.com/nowsecure/datagrid-gtk3/blob/28083a4b9b4e2e0c7fbe2755d8464d2b02163086/datagrid_gtk3/ui/grid.py#L299-L310
|
import base64
import contextlib
import datetime
import itertools
import logging
import os
from gi.repository import (
GLib,
GObject,
Gdk,
GdkPixbuf,
Gtk,
Pango,
)
from pygtkcompat.generictreemodel import GenericTreeModel
from datagrid_gtk3.ui.popupcal import DateEntry
from datagrid_gtk3.ui.uifile import UIFile
from datagrid_gtk3.utils.dateutils import normalize_timestamp
from datagrid_gtk3.utils.imageutils import ImageCacheManager
from datagrid_gtk3.utils.transformations import get_transformer
_MEDIA_FILES = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir,
"data",
"media"
)
logger = logging.getLogger(__name__)
_no_image_loader = GdkPixbuf.PixbufLoader.new_with_type("png")
_no_image_loader.write(base64.b64decode("""
iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAABmJLR0QA/wD/AP+gvaeTAAAACXBI
WXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH3wEPEDYaIuf2wwAAABl0RVh0Q29tbWVudABDcmVhdGVk
IHdpdGggR0lNUFeBDhcAAAANSURBVAjXY2BgYGAAAAAFAAFe8yo6AAAAAElFTkSuQmCC
"""))
_no_image_loader.close()
NO_IMAGE_PIXBUF = _no_image_loader.get_pixbuf()
NO_FILTER_OPTION = object()
class OptionsPopup(Gtk.Window):
OPTIONS_PADDING = 5
MAX_HEIGHT = 500
(VIEW_TREE,
VIEW_FLAT,
VIEW_ICON) = range(3)
__gsignals__ = {
'column-visibility-changed': (GObject.SignalFlags.RUN_FIRST,
None, (str, bool)),
'view-changed': (GObject.SignalFlags.RUN_FIRST, None, (int, ))
}
def __init__(self, toggle_btn, controller, *args, **kwargs):
self._toggle_btn = toggle_btn
self._toggled_id = self._toggle_btn.connect(
'toggled', self.on_toggle_button_toggled)
self._controller = controller
super(OptionsPopup, self).__init__(
Gtk.WindowType.POPUP, *args, **kwargs)
self.connect('button-press-event', self.on_button_press_event)
self.connect('key-press-event', self.on_key_press_event)
self._scrolled_window = Gtk.ScrolledWindow(
vscrollbar_policy=Gtk.PolicyType.AUTOMATIC,
hscrollbar_policy=Gtk.PolicyType.NEVER)
alignment = Gtk.Alignment()
alignment.set_padding(5, 5, 5, 5)
alignment.add(self._scrolled_window)
self.add(alignment)
def popup(self):
if not self._toggle_btn.get_realized():
return
child = self._scrolled_window.get_child()
if child:
self._scrolled_window.remove(child)
vbox = Gtk.VBox()
combo = self._get_view_options()
if combo is not None:
vbox.pack_start(combo, expand=False, fill=False,
padding=self.OPTIONS_PADDING)
if not isinstance(self._controller.view, DataGridIconView):
if combo is not None:
vbox.pack_start(
Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL),
expand=True, fill=True, padding=self.OPTIONS_PADDING)
for switch in self._get_visibility_options():
vbox.pack_start(switch, expand=False, fill=False,
padding=self.OPTIONS_PADDING)
self._scrolled_window.add(vbox)
toplevel = self._toggle_btn.get_toplevel().get_toplevel()
if isinstance(toplevel, (Gtk.Window, Gtk.Dialog)):
group = toplevel.get_group()
if group:
group.add_window(self)
x, y = self._get_position()
self.move(x, y)
self.show_all()
allocation = vbox.get_allocation()
height = min(allocation.height + 2 * self.OPTIONS_PADDING,
self.MAX_HEIGHT)
self.set_size_request(-1, height)
if not self._popup_grab_window():
self.popdown()
def popdown(self):
if not self._toggle_btn.get_realized():
return
with self._toggle_btn.handler_block(self._toggled_id):
self._toggle_btn.set_active(False)
self.grab_remove()
self.hide()
def _popup_grab_window(self):
window = self.get_window()
grab_status = Gdk.pointer_grab(
window, True,
(Gdk.EventMask.BUTTON_PRESS_MASK |
Gdk.EventMask.BUTTON_RELEASE_MASK |
Gdk.EventMask.POINTER_MOTION_MASK),
None, None, 0L)
if grab_status == Gdk.GrabStatus.SUCCESS:
if Gdk.keyboard_grab(window, True, 0L) != Gdk.GrabStatus.SUCCESS:
display = window.get_display()
display.pointer_ungrab(0L)
return False
self.grab_add()
return True
def _get_position(self):
allocation = self._toggle_btn.get_allocation()
window = self._toggle_btn.get_window()
if self._toggle_btn.get_has_window():
x_coord = 0
y_coord = 0
else:
x_coord = allocation.x
y_coord = allocation.y
x, y = window.get_root_coords(x_coord, y_coord)
return x, y + allocation.height
def _get_view_options(self):
iters = {}
model = Gtk.ListStore(str, int)
iters[self.VIEW_TREE] = model.append(("Tree View", self.VIEW_TREE))
if self._controller.model.flat_column_idx is not None:
iters[self.VIEW_FLAT] = model.append(("Flat View", self.VIEW_FLAT))
if any(c['transform'] == 'image'
for c in self._controller.model.columns):
iters[self.VIEW_ICON] = model.append(("Icon View", self.VIEW_ICON))
if len(iters) == 1:
return None
combo = Gtk.ComboBox()
combo.set_model(model)
renderer = Gtk.CellRendererText()
combo.pack_start(renderer, True)
combo.add_attribute(renderer, 'text', 0)
if isinstance(self._controller.view, DataGridView):
if self._controller.model.active_params.get('flat', False):
combo.set_active_iter(iters[self.VIEW_FLAT])
else:
combo.set_active_iter(iters[self.VIEW_TREE])
elif isinstance(self._controller.view, DataGridIconView):
combo.set_active_iter(iters[self.VIEW_ICON])
else:
raise AssertionError("Unknown view type %r" % (
self._controller.view, ))
combo.connect('changed', self.on_combo_view_changed)
return combo
def _get_visibility_options(self):
model = self._controller.model
hidden_columns = model.hidden_columns
for column in model.columns:
if column['name'] in hidden_columns:
continue
switch = Gtk.Switch()
label = Gtk.Label(column['display'])
switch.set_active(column['name'] in model.display_columns)
hbox = Gtk.HBox(spacing=5)
hbox.pack_start(switch, expand=False, fill=True, padding=0)
hbox.pack_start(label, expand=True, fill=True, padding=0)
switch.connect(
'notify::active',
self.on_column_switch_notify_active, column['name'])
yield hbox
def on_key_press_event(self, window, event):
if event.get_keyval()[1] == Gdk.KEY_Escape:
self.popdown()
return True
return False
def on_button_press_event(self, window, event):
event_rect = Gdk.Rectangle()
event_rect.x, event_rect.y = event.get_root_coords()
event_rect.width = 1
event_rect.height = 1
allocation = self.get_allocation()
window_rect = Gdk.Rectangle()
window_rect.x, window_rect.y = self._get_position()
window_rect.width = allocation.width
window_rect.height = allocation.height
intersection = Gdk.rectangle_intersect(
event_rect, window_rect)
if not intersection[0]:
self.popdown()
|
MIT License
|
lithops-cloud/lithops
|
lithops/monitor.py
|
RabbitmqMonitor._create_resources
|
python
|
def _create_resources(self):
logger.debug(f'ExecutorID {self.executor_id} - Creating RabbitMQ queue {self.queue}')
self.pikaparams = pika.URLParameters(self.rabbit_amqp_url)
self.connection = pika.BlockingConnection(self.pikaparams)
channel = self.connection.channel()
channel.queue_declare(queue=self.queue, auto_delete=True)
channel.close()
|
Creates RabbitMQ queues and exchanges of a given job
|
https://github.com/lithops-cloud/lithops/blob/a274a0bc423e22b9a68834cac5d63130666a4ee8/lithops/monitor.py#L136-L146
|
import json
import pika
import logging
import time
import lithops
import pickle
import sys
import queue
import threading
import concurrent.futures as cf
from tblib import pickling_support
pickling_support.install()
logger = logging.getLogger(__name__)
class Monitor(threading.Thread):
def __init__(self, executor_id,
internal_storage,
token_bucket_q,
generate_tokens,
config):
super().__init__()
self.executor_id = executor_id
self.futures = []
self.internal_storage = internal_storage
self.should_run = True
self.token_bucket_q = token_bucket_q
self.generate_tokens = generate_tokens
self.config = config
self.daemon = True
self.workers = {}
self.workers_done = []
self.callids_done_worker = {}
self.job_chunksize = {}
self.present_jobs = set()
def add_futures(self, fs, job_id=None, chunksize=None):
self.futures.extend(fs)
if job_id:
self.job_chunksize[job_id] = chunksize
present_jobs = {f.job_id for f in fs}
for job_id in present_jobs:
self.present_jobs.add(job_id)
def _all_ready(self):
return all([f.ready or f.success or f.done for f in self.futures])
def _check_new_futures(self, call_status, f):
if 'new_futures' not in call_status:
return False
f._set_futures(call_status)
self.futures.extend(f._new_futures)
logger.debug(f'ExecutorID {self.executor_id} - Got {len(f._new_futures)} new futures to track')
return True
def _future_timeout_checker(self, futures):
current_time = time.time()
futures_running = [f for f in futures if f.running]
for fut in futures_running:
try:
start_tstamp = fut._call_status['worker_start_tstamp']
fut_timeout = start_tstamp + fut.execution_timeout + 5
if current_time > fut_timeout:
msg = 'The function did not run as expected.'
raise TimeoutError('HANDLER', msg)
except TimeoutError:
pickled_exception = str(pickle.dumps(sys.exc_info()))
call_status = {'type': '__end__',
'exception': True,
'exc_info': pickled_exception,
'executor_id': fut.executor_id,
'job_id': fut.job_id,
'call_id': fut.call_id,
'activation_id': fut.activation_id}
fut._set_ready(call_status)
def _print_status_log(self):
callids_pending = len([f for f in self.futures if f.invoked])
callids_running = len([f for f in self.futures if f.running])
callids_done = len([f for f in self.futures if f.ready or f.success or f.done])
logger.debug(f'ExecutorID {self.executor_id} - Pending: {callids_pending} '
f'- Running: {callids_running} - Done: {callids_done}')
class RabbitmqMonitor(Monitor):
def __init__(self, executor_id, internal_storage, token_bucket_q, generate_tokens, config):
super().__init__(executor_id, internal_storage, token_bucket_q, generate_tokens, config)
self.rabbit_amqp_url = config.get('amqp_url')
self.queue = f'lithops-{self.executor_id}'
self._create_resources()
|
Apache License 2.0
|
georgesung/deep_rl_acrobot
|
learning_agent.py
|
sample_action
|
python
|
def sample_action(probs):
num_actions = len(probs)
threshold = random.uniform(0,1)
cumulative_prob = 0.
for action in range(num_actions):
cumulative_prob += probs[action]
if cumulative_prob > threshold:
return action
return num_actions - 1
|
Sample action (0/1/2/etc.) from probability distribution probs
|
https://github.com/georgesung/deep_rl_acrobot/blob/ef4d266f2fb16bb6cccf393404047542b3eb9ea2/learning_agent.py#L65-L76
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import random
import gym
import math
import matplotlib.pyplot as plt
ENV = 'Acrobot-v1'
OBS_WIDTH = 6
NUM_ACTIONS = 3
RESUME = False
SAVE_MODEL = False
MODEL_LOC = 'models/model.ckpt'
SAVE_THRESHOLD = -200
RECORD_LOC = 'openai_data'
NUM_EPISODES = 1000
MAX_ITER = 3000
NUM_ENVS = 5
EPISODES_PER_UPDATE = 1
DS_FACTOR = 1
ACTOR_LR = 0.005
CRITIC_LR_SCALE = 0.5
CRITIC_LR = ACTOR_LR * CRITIC_LR_SCALE
REWARD_DISCOUNT = 0.97
A_REG_SCALE = 0.0005
C_REG_SCALE = 0.0005
def discount_rewards(r):
discounted_r = np.zeros_like(r)
running_add = 0.
for t in reversed(range(len(r))):
running_add = running_add * REWARD_DISCOUNT + r[t]
discounted_r[t] = running_add
return discounted_r
|
MIT License
|
ai4finance-llc/neofinrl
|
rllib/agents/mbmpo/model_ensemble.py
|
DynamicsEnsembleCustomModel.__init__
|
python
|
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
nn.Module.__init__(self)
if isinstance(action_space, Discrete):
input_space = gym.spaces.Box(
obs_space.low[0],
obs_space.high[0],
shape=(obs_space.shape[0] + action_space.n, ))
elif isinstance(action_space, Box):
input_space = gym.spaces.Box(
obs_space.low[0],
obs_space.high[0],
shape=(obs_space.shape[0] + action_space.shape[0], ))
else:
raise NotImplementedError
super(DynamicsEnsembleCustomModel, self).__init__(
input_space, action_space, num_outputs, model_config, name)
self.env_obs_space = obs_space
self.num_models = model_config["ensemble_size"]
self.max_epochs = model_config["train_epochs"]
self.lr = model_config["lr"]
self.valid_split = model_config["valid_split_ratio"]
self.batch_size = model_config["batch_size"]
self.normalize_data = model_config["normalize_data"]
self.normalizations = {}
self.dynamics_ensemble = [
TDModel(
input_size=input_space.shape[0],
output_size=obs_space.shape[0],
hidden_layers=model_config["fcnet_hiddens"],
hidden_nonlinearity=nn.ReLU,
output_nonlinearity=None,
weight_normalization=True) for _ in range(self.num_models)
]
for i in range(self.num_models):
self.add_module("TD-model-" + str(i), self.dynamics_ensemble[i])
self.replay_buffer_max = 10000
self.replay_buffer = None
self.optimizers = [
torch.optim.Adam(
self.dynamics_ensemble[i].parameters(), lr=self.lr)
for i in range(self.num_models)
]
self.metrics = {}
self.metrics[STEPS_SAMPLED_COUNTER] = 0
worker_index = get_global_worker().worker_index
self.sample_index = int((worker_index - 1) / self.num_models)
self.global_itr = 0
self.device = (torch.device("cuda")
if torch.cuda.is_available() else torch.device("cpu"))
|
Initializes a DynamicEnsemble object.
|
https://github.com/ai4finance-llc/neofinrl/blob/51338dbb0ec86f74e4fc6cce90bc385a4639de79/rllib/agents/mbmpo/model_ensemble.py#L124-L182
|
import gym
from gym.spaces import Discrete, Box
import numpy as np
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.evaluation.rollout_worker import get_global_worker
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.execution.common import STEPS_SAMPLED_COUNTER
from ray.rllib.utils.typing import SampleBatchType
from ray.rllib.utils.torch_ops import convert_to_torch_tensor
torch, nn = try_import_torch()
class TDModel(nn.Module):
def __init__(self,
input_size,
output_size,
hidden_layers=(512, 512),
hidden_nonlinearity=None,
output_nonlinearity=None,
weight_normalization=False,
use_bias=True):
super().__init__()
assert len(hidden_layers) >= 1
if not hidden_nonlinearity:
hidden_nonlinearity = nn.ReLU
if weight_normalization:
weight_norm = nn.utils.weight_norm
self.layers = []
cur_size = input_size
for h_size in hidden_layers:
layer = nn.Linear(cur_size, h_size, bias=use_bias)
if weight_normalization:
layer = weight_norm(layer)
self.layers.append(layer)
if hidden_nonlinearity:
self.layers.append(hidden_nonlinearity())
cur_size = h_size
layer = nn.Linear(cur_size, output_size, bias=use_bias)
if weight_normalization:
layer = weight_norm(layer)
self.layers.append(layer)
if output_nonlinearity:
self.layers.append(output_nonlinearity())
self.model = nn.Sequential(*self.layers)
def forward(self, x):
return self.model(x)
if torch:
class TDDataset(torch.utils.data.Dataset):
def __init__(self, dataset: SampleBatchType, norms):
self.count = dataset.count
obs = dataset[SampleBatch.CUR_OBS]
actions = dataset[SampleBatch.ACTIONS]
delta = dataset[SampleBatch.NEXT_OBS] - obs
if norms:
obs = normalize(obs, norms[SampleBatch.CUR_OBS])
actions = normalize(actions, norms[SampleBatch.ACTIONS])
delta = normalize(delta, norms["delta"])
self.x = np.concatenate([obs, actions], axis=1)
self.y = delta
def __len__(self):
return self.count
def __getitem__(self, index):
return self.x[index], self.y[index]
def normalize(data_array, stats):
mean, std = stats
return (data_array - mean) / (std + 1e-10)
def denormalize(data_array, stats):
mean, std = stats
return data_array * (std + 1e-10) + mean
def mean_std_stats(dataset: SampleBatchType):
norm_dict = {}
obs = dataset[SampleBatch.CUR_OBS]
act = dataset[SampleBatch.ACTIONS]
delta = dataset[SampleBatch.NEXT_OBS] - obs
norm_dict[SampleBatch.CUR_OBS] = (np.mean(obs, axis=0), np.std(
obs, axis=0))
norm_dict[SampleBatch.ACTIONS] = (np.mean(act, axis=0), np.std(
act, axis=0))
norm_dict["delta"] = (np.mean(delta, axis=0), np.std(delta, axis=0))
return norm_dict
def process_samples(samples: SampleBatchType):
filter_keys = [
SampleBatch.CUR_OBS, SampleBatch.ACTIONS, SampleBatch.NEXT_OBS
]
filtered = {}
for key in filter_keys:
filtered[key] = samples[key]
return SampleBatch(filtered)
class DynamicsEnsembleCustomModel(TorchModelV2, nn.Module):
|
MIT License
|
airbnb/streamalert
|
streamalert/threat_intel_downloader/main.py
|
ThreatStream.runner
|
python
|
def runner(self, event):
event = event or {}
self._load_api_creds()
query = '(status="{}")+AND+({})+AND+NOT+({})'.format(
self._IOC_STATUS,
"+OR+".join(['type="{}"'.format(ioc) for ioc in self.ioc_types]),
"+OR+".join(['itype="{}"'.format(itype) for itype in self.excluded_sub_types])
)
next_url = event.get(
'next_url',
'/api/v2/{}/?username={}&api_key={}&limit={}&q={}'.format(
self._API_RESOURCE,
self.api_user,
self.api_key,
self._API_MAX_LIMIT,
query
)
)
self._connect(next_url)
|
Process URL before making API call
Args:
event (dict): Contains lambda function invocation information. Initially,
Threat Intel Downloader lambda funciton is invoked by Cloudwatch
event. 'next_url' key will be inserted to event lambda function
invokes itself to retrieve more IOCs.
Returns:
(tuple): (list, str, bool)
- First object is a list of intelligence.
- Second object is a string of next token to retrieve more IOCs.
- Third object is bool to indicated if retrieve more IOCs from
threat feed.
|
https://github.com/airbnb/streamalert/blob/ceb680b7b821ebf6b6800adf164977e57c886fef/streamalert/threat_intel_downloader/main.py#L279-L314
|
from datetime import datetime, timedelta
import json
import backoff
import boto3
from botocore.exceptions import ClientError
import requests
from streamalert.shared.backoff_handlers import (
backoff_handler,
success_handler,
giveup_handler
)
from streamalert.shared.config import load_config, parse_lambda_arn
from streamalert.shared.logger import get_logger
from streamalert.threat_intel_downloader.exceptions import (
ThreatStreamCredsError,
ThreatStreamLambdaInvokeError,
ThreatStreamRequestsError
)
LOGGER = get_logger(__name__)
class ThreatStream:
_API_URL = 'https://api.threatstream.com'
_API_RESOURCE = 'intelligence'
_IOC_STATUS = 'active'
_API_MAX_LIMIT = 1000
_API_MAX_INDEX = 500000
_END_TIME_BUFFER = 5
CRED_PARAMETER_NAME = 'threat_intel_downloader_api_creds'
EXCEPTIONS_TO_BACKOFF = (requests.exceptions.Timeout,
requests.exceptions.ConnectionError,
requests.exceptions.ChunkedEncodingError,
ThreatStreamRequestsError)
BACKOFF_MAX_RETRIES = 3
def __init__(self, function_arn, timing_func):
self._config = self._load_config(function_arn)
self.timing_func = timing_func
self.api_user = None
self.api_key = None
@staticmethod
def _load_config(function_arn):
base_config = parse_lambda_arn(function_arn)
config = load_config(include={'lambda.json'})['lambda']
base_config.update(config.get('threat_intel_downloader_config', {}))
return base_config
def _load_api_creds(self):
if self.api_user and self.api_key:
return
try:
ssm = boto3.client('ssm', self.region)
response = ssm.get_parameter(Name=self.CRED_PARAMETER_NAME, WithDecryption=True)
except ClientError:
LOGGER.exception('Failed to get SSM parameters')
raise
if not response:
raise ThreatStreamCredsError('Invalid response')
try:
decoded_creds = json.loads(response['Parameter']['Value'])
except ValueError:
raise ThreatStreamCredsError('Cannot load value for parameter with name '
'\'{}\'. The value is not valid json: '
'\'{}\''.format(response['Parameter']['Name'],
response['Parameter']['Value']))
self.api_user = decoded_creds['api_user']
self.api_key = decoded_creds['api_key']
if not (self.api_user and self.api_key):
raise ThreatStreamCredsError('API Creds Error')
@backoff.on_exception(backoff.constant,
EXCEPTIONS_TO_BACKOFF,
max_tries=BACKOFF_MAX_RETRIES,
on_backoff=backoff_handler(),
on_success=success_handler(),
on_giveup=giveup_handler())
def _connect(self, next_url):
intelligence = list()
https_req = requests.get('{}{}'.format(self._API_URL, next_url), timeout=10)
next_url = None
if https_req.status_code == 200:
data = https_req.json()
if data.get('objects'):
intelligence.extend(self._process_data(data['objects']))
LOGGER.info('IOC Offset: %d', data['meta']['offset'])
if not (data['meta']['next'] and data['meta']['offset'] < self.threshold):
LOGGER.debug('Either next token is empty or IOC offset reaches threshold '
'%d. Stop retrieve more IOCs.', self.threshold)
else:
next_url = data['meta']['next']
elif https_req.status_code == 401:
raise ThreatStreamRequestsError('Response status code 401, unauthorized.')
elif https_req.status_code == 500:
raise ThreatStreamRequestsError('Response status code 500, retry now.')
else:
raise ThreatStreamRequestsError(
'Unknown status code {}, do not retry.'.format(https_req.status_code))
self._finalize(intelligence, next_url)
def _finalize(self, intel, next_url):
if intel:
LOGGER.info('Write %d IOCs to DynamoDB table', len(intel))
self._write_to_dynamodb_table(intel)
if next_url and self.timing_func() > self._END_TIME_BUFFER * 1000:
self._invoke_lambda_function(next_url)
LOGGER.debug("Time remaining (MS): %s", self.timing_func())
def _invoke_lambda_function(self, next_url):
LOGGER.debug('This invocation is invoked by lambda function self.')
lambda_client = boto3.client('lambda', region_name=self.region)
try:
lambda_client.invoke(
FunctionName=self._config['function_name'],
InvocationType='Event',
Payload=json.dumps({'next_url': next_url}),
Qualifier=self._config['qualifier']
)
except ClientError as err:
raise ThreatStreamLambdaInvokeError('Error invoking function: {}'.format(err))
@staticmethod
def _epoch_time(time_str, days=90):
if not time_str:
return int((datetime.utcnow()
+ timedelta(days)
- datetime.utcfromtimestamp(0)).total_seconds())
try:
utc_time = datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%S.%fZ")
return int((utc_time - datetime.utcfromtimestamp(0)).total_seconds())
except ValueError:
LOGGER.error('Cannot convert expiration date \'%s\' to epoch time', time_str)
raise
def _process_data(self, data):
results = list()
for obj in data:
for source in self.ioc_sources:
if source in obj['source'].lower():
filtered_obj = {key: value for key, value in obj.items()
if key in self.ioc_keys}
filtered_obj['expiration_ts'] = self._epoch_time(filtered_obj['expiration_ts'])
results.append(filtered_obj)
return results
def _write_to_dynamodb_table(self, intelligence):
try:
dynamodb = boto3.resource('dynamodb', region_name=self.region)
table = dynamodb.Table(self.table_name)
with table.batch_writer() as batch:
for ioc in intelligence:
batch.put_item(
Item={
'ioc_value': ioc['value'],
'ioc_type': ioc['type'],
'sub_type': ioc['itype'],
'source': ioc['source'],
'expiration_ts': ioc['expiration_ts']
}
)
except ClientError as err:
LOGGER.debug('DynamoDB client error: %s', err)
raise
|
Apache License 2.0
|
rstudio/mlflow-original
|
mlflow/tracking/service.py
|
MLflowService.create_run
|
python
|
def create_run(self, experiment_id, user_id=None, run_name=None, source_type=None,
source_name=None, entry_point_name=None, start_time=None,
source_version=None, tags=None):
tags = tags if tags else {}
return self.store.create_run(
experiment_id=experiment_id,
user_id=user_id if user_id is not None else _get_user_id(),
run_name=run_name,
source_type=source_type,
source_name=source_name,
entry_point_name=entry_point_name,
start_time=start_time or int(time.time() * 1000),
source_version=source_version,
tags=[RunTag(key, value) for (key, value) in iteritems(tags)],
)
|
Creates a new :py:class:`mlflow.entities.Run` object, which can be associated with
metrics, parameters, artifacts, etc.
Unlike :py:func:`mlflow.projects.run`, does not actually run code, just creates objects.
Unlike :py:func:`mlflow.start_run`, this does not change the "active run" used by
:py:func:`mlflow.log_param` and friends.
:param user_id: If not provided, we will use the current user as a default.
:param start_time: If not provided, we will use the current timestamp.
:param tags: A dictionary of key-value pairs which will be converted into
RunTag objects.
:return: :py:class:`mlflow.entities.Run` which was created
|
https://github.com/rstudio/mlflow-original/blob/4eb703bc9af5f5bb6c63b8772ccde590624cf73d/mlflow/tracking/service.py#L34-L60
|
import os
import time
from six import iteritems
from mlflow.utils.validation import _validate_metric_name, _validate_param_name, _validate_tag_name, _validate_run_id
from mlflow.entities import Param, Metric, RunStatus, RunTag
from mlflow.tracking.utils import _get_store
from mlflow.store.artifact_repo import ArtifactRepository
_DEFAULT_USER_ID = "unknown"
class MLflowService(object):
def __init__(self, store):
self.store = store
def get_run(self, run_id):
_validate_run_id(run_id)
return self.store.get_run(run_id)
|
Apache License 2.0
|
docusign/docusign-python-client
|
docusign_esign/models/approve.py
|
Approve.anchor_ignore_if_not_present
|
python
|
def anchor_ignore_if_not_present(self):
return self._anchor_ignore_if_not_present
|
Gets the anchor_ignore_if_not_present of this Approve. # noqa: E501
When set to **true**, this tab is ignored if anchorString is not found in the document. # noqa: E501
:return: The anchor_ignore_if_not_present of this Approve. # noqa: E501
:rtype: str
|
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/approve.py#L527-L535
|
import pprint
import re
import six
from docusign_esign.client.configuration import Configuration
class Approve(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'anchor_allow_white_space_in_characters': 'str',
'anchor_allow_white_space_in_characters_metadata': 'PropertyMetadata',
'anchor_case_sensitive': 'str',
'anchor_case_sensitive_metadata': 'PropertyMetadata',
'anchor_horizontal_alignment': 'str',
'anchor_horizontal_alignment_metadata': 'PropertyMetadata',
'anchor_ignore_if_not_present': 'str',
'anchor_ignore_if_not_present_metadata': 'PropertyMetadata',
'anchor_match_whole_word': 'str',
'anchor_match_whole_word_metadata': 'PropertyMetadata',
'anchor_string': 'str',
'anchor_string_metadata': 'PropertyMetadata',
'anchor_tab_processor_version': 'str',
'anchor_tab_processor_version_metadata': 'PropertyMetadata',
'anchor_units': 'str',
'anchor_units_metadata': 'PropertyMetadata',
'anchor_x_offset': 'str',
'anchor_x_offset_metadata': 'PropertyMetadata',
'anchor_y_offset': 'str',
'anchor_y_offset_metadata': 'PropertyMetadata',
'bold': 'str',
'bold_metadata': 'PropertyMetadata',
'button_text': 'str',
'button_text_metadata': 'PropertyMetadata',
'conditional_parent_label': 'str',
'conditional_parent_label_metadata': 'PropertyMetadata',
'conditional_parent_value': 'str',
'conditional_parent_value_metadata': 'PropertyMetadata',
'custom_tab_id': 'str',
'custom_tab_id_metadata': 'PropertyMetadata',
'document_id': 'str',
'document_id_metadata': 'PropertyMetadata',
'error_details': 'ErrorDetails',
'font': 'str',
'font_color': 'str',
'font_color_metadata': 'PropertyMetadata',
'font_metadata': 'PropertyMetadata',
'font_size': 'str',
'font_size_metadata': 'PropertyMetadata',
'form_order': 'str',
'form_order_metadata': 'PropertyMetadata',
'form_page_label': 'str',
'form_page_label_metadata': 'PropertyMetadata',
'form_page_number': 'str',
'form_page_number_metadata': 'PropertyMetadata',
'height': 'str',
'height_metadata': 'PropertyMetadata',
'italic': 'str',
'italic_metadata': 'PropertyMetadata',
'locale_policy': 'LocalePolicyTab',
'merge_field': 'MergeField',
'merge_field_xml': 'str',
'page_number': 'str',
'page_number_metadata': 'PropertyMetadata',
'recipient_id': 'str',
'recipient_id_guid': 'str',
'recipient_id_guid_metadata': 'PropertyMetadata',
'recipient_id_metadata': 'PropertyMetadata',
'smart_contract_information': 'SmartContractInformation',
'source': 'str',
'status': 'str',
'status_metadata': 'PropertyMetadata',
'tab_group_labels': 'list[str]',
'tab_group_labels_metadata': 'PropertyMetadata',
'tab_id': 'str',
'tab_id_metadata': 'PropertyMetadata',
'tab_label': 'str',
'tab_label_metadata': 'PropertyMetadata',
'tab_order': 'str',
'tab_order_metadata': 'PropertyMetadata',
'tab_type': 'str',
'tab_type_metadata': 'PropertyMetadata',
'template_locked': 'str',
'template_locked_metadata': 'PropertyMetadata',
'template_required': 'str',
'template_required_metadata': 'PropertyMetadata',
'tooltip': 'str',
'tool_tip_metadata': 'PropertyMetadata',
'underline': 'str',
'underline_metadata': 'PropertyMetadata',
'width': 'str',
'width_metadata': 'PropertyMetadata',
'x_position': 'str',
'x_position_metadata': 'PropertyMetadata',
'y_position': 'str',
'y_position_metadata': 'PropertyMetadata'
}
attribute_map = {
'anchor_allow_white_space_in_characters': 'anchorAllowWhiteSpaceInCharacters',
'anchor_allow_white_space_in_characters_metadata': 'anchorAllowWhiteSpaceInCharactersMetadata',
'anchor_case_sensitive': 'anchorCaseSensitive',
'anchor_case_sensitive_metadata': 'anchorCaseSensitiveMetadata',
'anchor_horizontal_alignment': 'anchorHorizontalAlignment',
'anchor_horizontal_alignment_metadata': 'anchorHorizontalAlignmentMetadata',
'anchor_ignore_if_not_present': 'anchorIgnoreIfNotPresent',
'anchor_ignore_if_not_present_metadata': 'anchorIgnoreIfNotPresentMetadata',
'anchor_match_whole_word': 'anchorMatchWholeWord',
'anchor_match_whole_word_metadata': 'anchorMatchWholeWordMetadata',
'anchor_string': 'anchorString',
'anchor_string_metadata': 'anchorStringMetadata',
'anchor_tab_processor_version': 'anchorTabProcessorVersion',
'anchor_tab_processor_version_metadata': 'anchorTabProcessorVersionMetadata',
'anchor_units': 'anchorUnits',
'anchor_units_metadata': 'anchorUnitsMetadata',
'anchor_x_offset': 'anchorXOffset',
'anchor_x_offset_metadata': 'anchorXOffsetMetadata',
'anchor_y_offset': 'anchorYOffset',
'anchor_y_offset_metadata': 'anchorYOffsetMetadata',
'bold': 'bold',
'bold_metadata': 'boldMetadata',
'button_text': 'buttonText',
'button_text_metadata': 'buttonTextMetadata',
'conditional_parent_label': 'conditionalParentLabel',
'conditional_parent_label_metadata': 'conditionalParentLabelMetadata',
'conditional_parent_value': 'conditionalParentValue',
'conditional_parent_value_metadata': 'conditionalParentValueMetadata',
'custom_tab_id': 'customTabId',
'custom_tab_id_metadata': 'customTabIdMetadata',
'document_id': 'documentId',
'document_id_metadata': 'documentIdMetadata',
'error_details': 'errorDetails',
'font': 'font',
'font_color': 'fontColor',
'font_color_metadata': 'fontColorMetadata',
'font_metadata': 'fontMetadata',
'font_size': 'fontSize',
'font_size_metadata': 'fontSizeMetadata',
'form_order': 'formOrder',
'form_order_metadata': 'formOrderMetadata',
'form_page_label': 'formPageLabel',
'form_page_label_metadata': 'formPageLabelMetadata',
'form_page_number': 'formPageNumber',
'form_page_number_metadata': 'formPageNumberMetadata',
'height': 'height',
'height_metadata': 'heightMetadata',
'italic': 'italic',
'italic_metadata': 'italicMetadata',
'locale_policy': 'localePolicy',
'merge_field': 'mergeField',
'merge_field_xml': 'mergeFieldXml',
'page_number': 'pageNumber',
'page_number_metadata': 'pageNumberMetadata',
'recipient_id': 'recipientId',
'recipient_id_guid': 'recipientIdGuid',
'recipient_id_guid_metadata': 'recipientIdGuidMetadata',
'recipient_id_metadata': 'recipientIdMetadata',
'smart_contract_information': 'smartContractInformation',
'source': 'source',
'status': 'status',
'status_metadata': 'statusMetadata',
'tab_group_labels': 'tabGroupLabels',
'tab_group_labels_metadata': 'tabGroupLabelsMetadata',
'tab_id': 'tabId',
'tab_id_metadata': 'tabIdMetadata',
'tab_label': 'tabLabel',
'tab_label_metadata': 'tabLabelMetadata',
'tab_order': 'tabOrder',
'tab_order_metadata': 'tabOrderMetadata',
'tab_type': 'tabType',
'tab_type_metadata': 'tabTypeMetadata',
'template_locked': 'templateLocked',
'template_locked_metadata': 'templateLockedMetadata',
'template_required': 'templateRequired',
'template_required_metadata': 'templateRequiredMetadata',
'tooltip': 'tooltip',
'tool_tip_metadata': 'toolTipMetadata',
'underline': 'underline',
'underline_metadata': 'underlineMetadata',
'width': 'width',
'width_metadata': 'widthMetadata',
'x_position': 'xPosition',
'x_position_metadata': 'xPositionMetadata',
'y_position': 'yPosition',
'y_position_metadata': 'yPositionMetadata'
}
def __init__(self, _configuration=None, **kwargs):
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._anchor_allow_white_space_in_characters = None
self._anchor_allow_white_space_in_characters_metadata = None
self._anchor_case_sensitive = None
self._anchor_case_sensitive_metadata = None
self._anchor_horizontal_alignment = None
self._anchor_horizontal_alignment_metadata = None
self._anchor_ignore_if_not_present = None
self._anchor_ignore_if_not_present_metadata = None
self._anchor_match_whole_word = None
self._anchor_match_whole_word_metadata = None
self._anchor_string = None
self._anchor_string_metadata = None
self._anchor_tab_processor_version = None
self._anchor_tab_processor_version_metadata = None
self._anchor_units = None
self._anchor_units_metadata = None
self._anchor_x_offset = None
self._anchor_x_offset_metadata = None
self._anchor_y_offset = None
self._anchor_y_offset_metadata = None
self._bold = None
self._bold_metadata = None
self._button_text = None
self._button_text_metadata = None
self._conditional_parent_label = None
self._conditional_parent_label_metadata = None
self._conditional_parent_value = None
self._conditional_parent_value_metadata = None
self._custom_tab_id = None
self._custom_tab_id_metadata = None
self._document_id = None
self._document_id_metadata = None
self._error_details = None
self._font = None
self._font_color = None
self._font_color_metadata = None
self._font_metadata = None
self._font_size = None
self._font_size_metadata = None
self._form_order = None
self._form_order_metadata = None
self._form_page_label = None
self._form_page_label_metadata = None
self._form_page_number = None
self._form_page_number_metadata = None
self._height = None
self._height_metadata = None
self._italic = None
self._italic_metadata = None
self._locale_policy = None
self._merge_field = None
self._merge_field_xml = None
self._page_number = None
self._page_number_metadata = None
self._recipient_id = None
self._recipient_id_guid = None
self._recipient_id_guid_metadata = None
self._recipient_id_metadata = None
self._smart_contract_information = None
self._source = None
self._status = None
self._status_metadata = None
self._tab_group_labels = None
self._tab_group_labels_metadata = None
self._tab_id = None
self._tab_id_metadata = None
self._tab_label = None
self._tab_label_metadata = None
self._tab_order = None
self._tab_order_metadata = None
self._tab_type = None
self._tab_type_metadata = None
self._template_locked = None
self._template_locked_metadata = None
self._template_required = None
self._template_required_metadata = None
self._tooltip = None
self._tool_tip_metadata = None
self._underline = None
self._underline_metadata = None
self._width = None
self._width_metadata = None
self._x_position = None
self._x_position_metadata = None
self._y_position = None
self._y_position_metadata = None
self.discriminator = None
setattr(self, "_{}".format('anchor_allow_white_space_in_characters'), kwargs.get('anchor_allow_white_space_in_characters', None))
setattr(self, "_{}".format('anchor_allow_white_space_in_characters_metadata'), kwargs.get('anchor_allow_white_space_in_characters_metadata', None))
setattr(self, "_{}".format('anchor_case_sensitive'), kwargs.get('anchor_case_sensitive', None))
setattr(self, "_{}".format('anchor_case_sensitive_metadata'), kwargs.get('anchor_case_sensitive_metadata', None))
setattr(self, "_{}".format('anchor_horizontal_alignment'), kwargs.get('anchor_horizontal_alignment', None))
setattr(self, "_{}".format('anchor_horizontal_alignment_metadata'), kwargs.get('anchor_horizontal_alignment_metadata', None))
setattr(self, "_{}".format('anchor_ignore_if_not_present'), kwargs.get('anchor_ignore_if_not_present', None))
setattr(self, "_{}".format('anchor_ignore_if_not_present_metadata'), kwargs.get('anchor_ignore_if_not_present_metadata', None))
setattr(self, "_{}".format('anchor_match_whole_word'), kwargs.get('anchor_match_whole_word', None))
setattr(self, "_{}".format('anchor_match_whole_word_metadata'), kwargs.get('anchor_match_whole_word_metadata', None))
setattr(self, "_{}".format('anchor_string'), kwargs.get('anchor_string', None))
setattr(self, "_{}".format('anchor_string_metadata'), kwargs.get('anchor_string_metadata', None))
setattr(self, "_{}".format('anchor_tab_processor_version'), kwargs.get('anchor_tab_processor_version', None))
setattr(self, "_{}".format('anchor_tab_processor_version_metadata'), kwargs.get('anchor_tab_processor_version_metadata', None))
setattr(self, "_{}".format('anchor_units'), kwargs.get('anchor_units', None))
setattr(self, "_{}".format('anchor_units_metadata'), kwargs.get('anchor_units_metadata', None))
setattr(self, "_{}".format('anchor_x_offset'), kwargs.get('anchor_x_offset', None))
setattr(self, "_{}".format('anchor_x_offset_metadata'), kwargs.get('anchor_x_offset_metadata', None))
setattr(self, "_{}".format('anchor_y_offset'), kwargs.get('anchor_y_offset', None))
setattr(self, "_{}".format('anchor_y_offset_metadata'), kwargs.get('anchor_y_offset_metadata', None))
setattr(self, "_{}".format('bold'), kwargs.get('bold', None))
setattr(self, "_{}".format('bold_metadata'), kwargs.get('bold_metadata', None))
setattr(self, "_{}".format('button_text'), kwargs.get('button_text', None))
setattr(self, "_{}".format('button_text_metadata'), kwargs.get('button_text_metadata', None))
setattr(self, "_{}".format('conditional_parent_label'), kwargs.get('conditional_parent_label', None))
setattr(self, "_{}".format('conditional_parent_label_metadata'), kwargs.get('conditional_parent_label_metadata', None))
setattr(self, "_{}".format('conditional_parent_value'), kwargs.get('conditional_parent_value', None))
setattr(self, "_{}".format('conditional_parent_value_metadata'), kwargs.get('conditional_parent_value_metadata', None))
setattr(self, "_{}".format('custom_tab_id'), kwargs.get('custom_tab_id', None))
setattr(self, "_{}".format('custom_tab_id_metadata'), kwargs.get('custom_tab_id_metadata', None))
setattr(self, "_{}".format('document_id'), kwargs.get('document_id', None))
setattr(self, "_{}".format('document_id_metadata'), kwargs.get('document_id_metadata', None))
setattr(self, "_{}".format('error_details'), kwargs.get('error_details', None))
setattr(self, "_{}".format('font'), kwargs.get('font', None))
setattr(self, "_{}".format('font_color'), kwargs.get('font_color', None))
setattr(self, "_{}".format('font_color_metadata'), kwargs.get('font_color_metadata', None))
setattr(self, "_{}".format('font_metadata'), kwargs.get('font_metadata', None))
setattr(self, "_{}".format('font_size'), kwargs.get('font_size', None))
setattr(self, "_{}".format('font_size_metadata'), kwargs.get('font_size_metadata', None))
setattr(self, "_{}".format('form_order'), kwargs.get('form_order', None))
setattr(self, "_{}".format('form_order_metadata'), kwargs.get('form_order_metadata', None))
setattr(self, "_{}".format('form_page_label'), kwargs.get('form_page_label', None))
setattr(self, "_{}".format('form_page_label_metadata'), kwargs.get('form_page_label_metadata', None))
setattr(self, "_{}".format('form_page_number'), kwargs.get('form_page_number', None))
setattr(self, "_{}".format('form_page_number_metadata'), kwargs.get('form_page_number_metadata', None))
setattr(self, "_{}".format('height'), kwargs.get('height', None))
setattr(self, "_{}".format('height_metadata'), kwargs.get('height_metadata', None))
setattr(self, "_{}".format('italic'), kwargs.get('italic', None))
setattr(self, "_{}".format('italic_metadata'), kwargs.get('italic_metadata', None))
setattr(self, "_{}".format('locale_policy'), kwargs.get('locale_policy', None))
setattr(self, "_{}".format('merge_field'), kwargs.get('merge_field', None))
setattr(self, "_{}".format('merge_field_xml'), kwargs.get('merge_field_xml', None))
setattr(self, "_{}".format('page_number'), kwargs.get('page_number', None))
setattr(self, "_{}".format('page_number_metadata'), kwargs.get('page_number_metadata', None))
setattr(self, "_{}".format('recipient_id'), kwargs.get('recipient_id', None))
setattr(self, "_{}".format('recipient_id_guid'), kwargs.get('recipient_id_guid', None))
setattr(self, "_{}".format('recipient_id_guid_metadata'), kwargs.get('recipient_id_guid_metadata', None))
setattr(self, "_{}".format('recipient_id_metadata'), kwargs.get('recipient_id_metadata', None))
setattr(self, "_{}".format('smart_contract_information'), kwargs.get('smart_contract_information', None))
setattr(self, "_{}".format('source'), kwargs.get('source', None))
setattr(self, "_{}".format('status'), kwargs.get('status', None))
setattr(self, "_{}".format('status_metadata'), kwargs.get('status_metadata', None))
setattr(self, "_{}".format('tab_group_labels'), kwargs.get('tab_group_labels', None))
setattr(self, "_{}".format('tab_group_labels_metadata'), kwargs.get('tab_group_labels_metadata', None))
setattr(self, "_{}".format('tab_id'), kwargs.get('tab_id', None))
setattr(self, "_{}".format('tab_id_metadata'), kwargs.get('tab_id_metadata', None))
setattr(self, "_{}".format('tab_label'), kwargs.get('tab_label', None))
setattr(self, "_{}".format('tab_label_metadata'), kwargs.get('tab_label_metadata', None))
setattr(self, "_{}".format('tab_order'), kwargs.get('tab_order', None))
setattr(self, "_{}".format('tab_order_metadata'), kwargs.get('tab_order_metadata', None))
setattr(self, "_{}".format('tab_type'), kwargs.get('tab_type', None))
setattr(self, "_{}".format('tab_type_metadata'), kwargs.get('tab_type_metadata', None))
setattr(self, "_{}".format('template_locked'), kwargs.get('template_locked', None))
setattr(self, "_{}".format('template_locked_metadata'), kwargs.get('template_locked_metadata', None))
setattr(self, "_{}".format('template_required'), kwargs.get('template_required', None))
setattr(self, "_{}".format('template_required_metadata'), kwargs.get('template_required_metadata', None))
setattr(self, "_{}".format('tooltip'), kwargs.get('tooltip', None))
setattr(self, "_{}".format('tool_tip_metadata'), kwargs.get('tool_tip_metadata', None))
setattr(self, "_{}".format('underline'), kwargs.get('underline', None))
setattr(self, "_{}".format('underline_metadata'), kwargs.get('underline_metadata', None))
setattr(self, "_{}".format('width'), kwargs.get('width', None))
setattr(self, "_{}".format('width_metadata'), kwargs.get('width_metadata', None))
setattr(self, "_{}".format('x_position'), kwargs.get('x_position', None))
setattr(self, "_{}".format('x_position_metadata'), kwargs.get('x_position_metadata', None))
setattr(self, "_{}".format('y_position'), kwargs.get('y_position', None))
setattr(self, "_{}".format('y_position_metadata'), kwargs.get('y_position_metadata', None))
@property
def anchor_allow_white_space_in_characters(self):
return self._anchor_allow_white_space_in_characters
@anchor_allow_white_space_in_characters.setter
def anchor_allow_white_space_in_characters(self, anchor_allow_white_space_in_characters):
self._anchor_allow_white_space_in_characters = anchor_allow_white_space_in_characters
@property
def anchor_allow_white_space_in_characters_metadata(self):
return self._anchor_allow_white_space_in_characters_metadata
@anchor_allow_white_space_in_characters_metadata.setter
def anchor_allow_white_space_in_characters_metadata(self, anchor_allow_white_space_in_characters_metadata):
self._anchor_allow_white_space_in_characters_metadata = anchor_allow_white_space_in_characters_metadata
@property
def anchor_case_sensitive(self):
return self._anchor_case_sensitive
@anchor_case_sensitive.setter
def anchor_case_sensitive(self, anchor_case_sensitive):
self._anchor_case_sensitive = anchor_case_sensitive
@property
def anchor_case_sensitive_metadata(self):
return self._anchor_case_sensitive_metadata
@anchor_case_sensitive_metadata.setter
def anchor_case_sensitive_metadata(self, anchor_case_sensitive_metadata):
self._anchor_case_sensitive_metadata = anchor_case_sensitive_metadata
@property
def anchor_horizontal_alignment(self):
return self._anchor_horizontal_alignment
@anchor_horizontal_alignment.setter
def anchor_horizontal_alignment(self, anchor_horizontal_alignment):
self._anchor_horizontal_alignment = anchor_horizontal_alignment
@property
def anchor_horizontal_alignment_metadata(self):
return self._anchor_horizontal_alignment_metadata
@anchor_horizontal_alignment_metadata.setter
def anchor_horizontal_alignment_metadata(self, anchor_horizontal_alignment_metadata):
self._anchor_horizontal_alignment_metadata = anchor_horizontal_alignment_metadata
@property
|
MIT License
|
ethereum/py-evm
|
eth/db/chain.py
|
ChainDB._add_transaction_to_canonical_chain
|
python
|
def _add_transaction_to_canonical_chain(db: DatabaseAPI,
transaction_hash: Hash32,
block_header: BlockHeaderAPI,
index: int) -> None:
transaction_key = TransactionKey(block_header.block_number, index)
db.set(
SchemaV1.make_transaction_hash_to_block_lookup_key(transaction_hash),
rlp.encode(transaction_key),
)
|
:param bytes transaction_hash: the hash of the transaction to add the lookup for
:param block_header: The header of the block with the txn that is in the canonical chain
:param int index: the position of the transaction in the block
- add lookup from transaction hash to the block number and index that the body is stored at
- remove transaction hash to body lookup in the pending pool
|
https://github.com/ethereum/py-evm/blob/21759ee681315f7099b14893b6ac6d1a5e659bc0/eth/db/chain.py#L436-L451
|
import functools
import itertools
from typing import (
Dict,
Iterable,
Sequence,
Tuple,
Type,
cast,
)
from eth_typing import (
BlockNumber,
Hash32
)
from eth_utils import (
encode_hex,
)
from eth_hash.auto import keccak
from trie.exceptions import (
MissingTrieNode,
)
from eth.abc import (
BlockAPI,
BlockHeaderAPI,
ChainDatabaseAPI,
DatabaseAPI,
AtomicDatabaseAPI,
ReceiptAPI,
ReceiptDecoderAPI,
SignedTransactionAPI,
TransactionDecoderAPI,
)
from eth.constants import (
EMPTY_UNCLE_HASH,
GENESIS_PARENT_HASH,
)
from eth.db.chain_gaps import (
fill_gap,
GapChange,
GapInfo,
GENESIS_CHAIN_GAPS,
is_block_number_in_gap,
reopen_gap,
)
from eth.db.trie import make_trie_root_and_nodes
from eth.exceptions import (
HeaderNotFound,
ReceiptNotFound,
TransactionNotFound,
)
from eth.db.header import HeaderDB
from eth.db.schema import SchemaV1
from eth.rlp.sedes import chain_gaps
from eth.typing import ChainGaps
from eth.validation import (
validate_word,
)
from eth.vm.header import HeaderSedes
from eth._warnings import catch_and_ignore_import_warning
with catch_and_ignore_import_warning():
import rlp
from trie import (
HexaryTrie,
)
from eth_utils import (
to_tuple,
ValidationError,
)
class TransactionKey(rlp.Serializable):
fields = [
('block_number', rlp.sedes.big_endian_int),
('index', rlp.sedes.big_endian_int),
]
class ChainDB(HeaderDB, ChainDatabaseAPI):
def __init__(self, db: AtomicDatabaseAPI) -> None:
self.db = db
def get_chain_gaps(self) -> ChainGaps:
return self._get_chain_gaps(self.db)
@classmethod
def _get_chain_gaps(cls, db: DatabaseAPI) -> ChainGaps:
try:
encoded_gaps = db[SchemaV1.make_chain_gaps_lookup_key()]
except KeyError:
return GENESIS_CHAIN_GAPS
else:
return rlp.decode(encoded_gaps, sedes=chain_gaps)
@classmethod
def _update_chain_gaps(
cls,
db: DatabaseAPI,
persisted_block: BlockAPI,
base_gaps: ChainGaps = None
) -> GapInfo:
if base_gaps is None:
base_gaps = cls._get_chain_gaps(db)
gap_change, gaps = fill_gap(persisted_block.number, base_gaps)
if gap_change is not GapChange.NoChange:
db.set(
SchemaV1.make_chain_gaps_lookup_key(),
rlp.encode(gaps, sedes=chain_gaps)
)
return gap_change, gaps
@classmethod
def _update_header_chain_gaps(
cls,
db: DatabaseAPI,
persisting_header: BlockHeaderAPI,
base_gaps: ChainGaps = None
) -> GapInfo:
gap_change, gaps = super()._update_header_chain_gaps(db, persisting_header, base_gaps)
if gap_change is not GapChange.NoChange or persisting_header.block_number == 0:
return gap_change, gaps
current_gaps = cls._get_chain_gaps(db)
if not is_block_number_in_gap(persisting_header.block_number, current_gaps):
old_canonical_header = cls._get_canonical_block_header_by_number(
db,
persisting_header.block_number
)
if old_canonical_header != persisting_header:
updated_gaps = reopen_gap(persisting_header.block_number, current_gaps)
db.set(
SchemaV1.make_chain_gaps_lookup_key(),
rlp.encode(updated_gaps, sedes=chain_gaps)
)
return gap_change, gaps
def get_block_uncles(self, uncles_hash: Hash32) -> Tuple[BlockHeaderAPI, ...]:
validate_word(uncles_hash, title="Uncles Hash")
if uncles_hash == EMPTY_UNCLE_HASH:
return ()
try:
encoded_uncles = self.db[uncles_hash]
except KeyError as exc:
raise HeaderNotFound(
f"No uncles found for hash {uncles_hash!r}"
) from exc
else:
return tuple(rlp.decode(encoded_uncles, sedes=rlp.sedes.CountableList(HeaderSedes)))
@classmethod
def _decanonicalize_old_headers(
cls,
db: DatabaseAPI,
numbers_to_decanonicalize: Sequence[BlockNumber],
) -> Tuple[BlockHeaderAPI, ...]:
old_canonical_headers = []
for block_number in numbers_to_decanonicalize:
try:
old_hash = cls._get_canonical_block_hash(db, block_number)
except HeaderNotFound:
break
else:
old_header = cls._get_block_header_by_hash(db, old_hash)
old_canonical_headers.append(old_header)
try:
transaction_hashes = cls._get_block_transaction_hashes(db, old_header)
for transaction_hash in transaction_hashes:
cls._remove_transaction_from_canonical_chain(db, transaction_hash)
except MissingTrieNode:
pass
return tuple(old_canonical_headers)
def persist_block(self,
block: BlockAPI,
genesis_parent_hash: Hash32 = GENESIS_PARENT_HASH
) -> Tuple[Tuple[Hash32, ...], Tuple[Hash32, ...]]:
with self.db.atomic_batch() as db:
return self._persist_block(db, block, genesis_parent_hash)
def persist_unexecuted_block(self,
block: BlockAPI,
receipts: Tuple[ReceiptAPI, ...],
genesis_parent_hash: Hash32 = GENESIS_PARENT_HASH
) -> Tuple[Tuple[Hash32, ...], Tuple[Hash32, ...]]:
tx_root_hash, tx_kv_nodes = make_trie_root_and_nodes(block.transactions)
if tx_root_hash != block.header.transaction_root:
raise ValidationError(
f"Block's transaction_root ({block.header.transaction_root!r}) "
f"does not match expected value: {tx_root_hash!r}"
)
receipt_root_hash, receipt_kv_nodes = make_trie_root_and_nodes(receipts)
if receipt_root_hash != block.header.receipt_root:
raise ValidationError(
f"Block's receipt_root ({block.header.receipt_root!r}) "
f"does not match expected value: {receipt_root_hash!r}"
)
with self.db.atomic_batch() as db:
self._persist_trie_data_dict(db, receipt_kv_nodes)
self._persist_trie_data_dict(db, tx_kv_nodes)
return self._persist_block(db, block, genesis_parent_hash)
@classmethod
def _persist_block(
cls,
db: DatabaseAPI,
block: BlockAPI,
genesis_parent_hash: Hash32) -> Tuple[Tuple[Hash32, ...], Tuple[Hash32, ...]]:
header_chain = (block.header, )
new_canonical_headers, old_canonical_headers = cls._persist_header_chain(
db,
header_chain,
genesis_parent_hash
)
for header in new_canonical_headers:
if header.hash == block.hash:
tx_hashes = tuple(tx.hash for tx in block.transactions)
else:
tx_hashes = cls._get_block_transaction_hashes(db, header)
for index, transaction_hash in enumerate(tx_hashes):
cls._add_transaction_to_canonical_chain(db, transaction_hash, header, index)
if block.uncles:
uncles_hash = cls._persist_uncles(db, block.uncles)
else:
uncles_hash = EMPTY_UNCLE_HASH
if uncles_hash != block.header.uncles_hash:
raise ValidationError(
"Block's uncles_hash (%s) does not match actual uncles' hash (%s)",
block.header.uncles_hash, uncles_hash)
new_canonical_hashes = tuple(header.hash for header in new_canonical_headers)
old_canonical_hashes = tuple(
header.hash for header in old_canonical_headers)
cls._update_chain_gaps(db, block)
return new_canonical_hashes, old_canonical_hashes
def persist_uncles(
self,
uncles: Tuple[BlockHeaderAPI]) -> Hash32:
return self._persist_uncles(self.db, uncles)
@staticmethod
def _persist_uncles(
db: DatabaseAPI,
uncles: Tuple[BlockHeaderAPI, ...]) -> Hash32:
uncles_hash = keccak(rlp.encode(uncles))
db.set(
uncles_hash,
rlp.encode(uncles, sedes=rlp.sedes.CountableList(HeaderSedes)))
return cast(Hash32, uncles_hash)
def add_receipt(self,
block_header: BlockHeaderAPI,
index_key: int, receipt: ReceiptAPI) -> Hash32:
receipt_db = HexaryTrie(db=self.db, root_hash=block_header.receipt_root)
receipt_db[index_key] = receipt.encode()
return receipt_db.root_hash
def add_transaction(self,
block_header: BlockHeaderAPI,
index_key: int,
transaction: SignedTransactionAPI) -> Hash32:
transaction_db = HexaryTrie(self.db, root_hash=block_header.transaction_root)
transaction_db[index_key] = transaction.encode()
return transaction_db.root_hash
def get_block_transactions(
self,
header: BlockHeaderAPI,
transaction_decoder: Type[TransactionDecoderAPI]) -> Tuple[SignedTransactionAPI, ...]:
return self._get_block_transactions(header.transaction_root, transaction_decoder)
def get_block_transaction_hashes(self, block_header: BlockHeaderAPI) -> Tuple[Hash32, ...]:
return self._get_block_transaction_hashes(self.db, block_header)
@classmethod
@to_tuple
def _get_block_transaction_hashes(
cls,
db: DatabaseAPI,
block_header: BlockHeaderAPI) -> Iterable[Hash32]:
all_encoded_transactions = cls._get_block_transaction_data(
db,
block_header.transaction_root,
)
for encoded_transaction in all_encoded_transactions:
yield cast(Hash32, keccak(encoded_transaction))
@to_tuple
def get_receipts(self,
header: BlockHeaderAPI,
receipt_decoder: Type[ReceiptDecoderAPI]) -> Iterable[ReceiptAPI]:
receipt_db = HexaryTrie(db=self.db, root_hash=header.receipt_root)
for receipt_idx in itertools.count():
receipt_key = rlp.encode(receipt_idx)
receipt_data = receipt_db[receipt_key]
if receipt_data != b'':
yield receipt_decoder.decode(receipt_data)
else:
break
def get_transaction_by_index(
self,
block_number: BlockNumber,
transaction_index: int,
transaction_decoder: Type[TransactionDecoderAPI]) -> SignedTransactionAPI:
try:
block_header = self.get_canonical_block_header_by_number(block_number)
except HeaderNotFound:
raise TransactionNotFound(f"Block {block_number} is not in the canonical chain")
transaction_db = HexaryTrie(self.db, root_hash=block_header.transaction_root)
encoded_index = rlp.encode(transaction_index)
encoded_transaction = transaction_db[encoded_index]
if encoded_transaction != b'':
return transaction_decoder.decode(encoded_transaction)
else:
raise TransactionNotFound(
f"No transaction is at index {transaction_index} of block {block_number}"
)
def get_transaction_index(self, transaction_hash: Hash32) -> Tuple[BlockNumber, int]:
key = SchemaV1.make_transaction_hash_to_block_lookup_key(transaction_hash)
try:
encoded_key = self.db[key]
except KeyError:
raise TransactionNotFound(
f"Transaction {encode_hex(transaction_hash)} not found in canonical chain"
)
transaction_key = rlp.decode(encoded_key, sedes=TransactionKey)
return (transaction_key.block_number, transaction_key.index)
def get_receipt_by_index(self,
block_number: BlockNumber,
receipt_index: int,
receipt_decoder: Type[ReceiptDecoderAPI],
) -> ReceiptAPI:
try:
block_header = self.get_canonical_block_header_by_number(block_number)
except HeaderNotFound:
raise ReceiptNotFound(f"Block {block_number} is not in the canonical chain")
receipt_db = HexaryTrie(db=self.db, root_hash=block_header.receipt_root)
receipt_key = rlp.encode(receipt_index)
receipt_data = receipt_db[receipt_key]
if receipt_data != b'':
return receipt_decoder.decode(receipt_data)
else:
raise ReceiptNotFound(
f"Receipt with index {receipt_index} not found in block"
)
@staticmethod
def _get_block_transaction_data(db: DatabaseAPI, transaction_root: Hash32) -> Iterable[Hash32]:
transaction_db = HexaryTrie(db, root_hash=transaction_root)
for transaction_idx in itertools.count():
transaction_key = rlp.encode(transaction_idx)
encoded = transaction_db[transaction_key]
if encoded != b'':
yield encoded
else:
break
@functools.lru_cache(maxsize=32)
@to_tuple
def _get_block_transactions(
self,
transaction_root: Hash32,
transaction_decoder: Type[TransactionDecoderAPI]) -> Iterable[SignedTransactionAPI]:
for encoded_transaction in self._get_block_transaction_data(self.db, transaction_root):
yield transaction_decoder.decode(encoded_transaction)
@staticmethod
def _remove_transaction_from_canonical_chain(db: DatabaseAPI, transaction_hash: Hash32) -> None:
db.delete(SchemaV1.make_transaction_hash_to_block_lookup_key(transaction_hash))
@staticmethod
|
MIT License
|
tudelft-cda-lab/groot
|
groot/model.py
|
GrootTree.__fit_recursive
|
python
|
def __fit_recursive(self, X, y, constraints, depth=0):
if (
(self.max_depth is not None and depth == self.max_depth)
or len(y) < self.min_samples_split
or np.sum(y == 0) == 0
or np.sum(y == 1) == 0
):
return self.__create_leaf(y)
current_gini = gini_impurity(np.sum(y == 0), np.sum(y == 1))
numeric, rule, feature, split_gini = self.__best_adversarial_decision(
X, y, constraints
)
gini_gain = current_gini - split_gini
if rule is None or gini_gain <= 0.00:
return self.__create_leaf(y)
if numeric:
assert rule >= constraints[feature][0]
assert rule < constraints[feature][1]
else:
assert rule[0].isdisjoint(constraints[feature])
assert rule[1].isdisjoint(constraints[feature])
X_left, y_left, X_right, y_right = self.__split_left_right(
X, y, rule, feature, numeric, self.attack_model_[feature]
)
if len(y_left) < self.min_samples_leaf or len(y_right) < self.min_samples_leaf:
return self.__create_leaf(y)
if numeric:
old_right_bound = constraints[feature][1]
constraints[feature][1] = rule
else:
constraints[feature].update(rule[1])
left_node = self.__fit_recursive(X_left, y_left, constraints, depth + 1)
if numeric:
constraints[feature][1] = old_right_bound
old_left_bound = constraints[feature][0]
constraints[feature][0] = rule
else:
constraints[feature].difference_update(rule[1])
constraints[feature].update(rule[0])
right_node = self.__fit_recursive(X_right, y_right, constraints, depth + 1)
if numeric:
constraints[feature][0] = old_left_bound
node = NumericalNode(feature, rule, left_node, right_node, _TREE_UNDEFINED)
else:
constraints[feature].difference_update(rule[0])
node = CategoricalNode(
feature, rule, left_node, right_node, _TREE_UNDEFINED
)
return node
|
Recursively fit the decision tree on the training dataset (X, y).
The constraints make sure that leaves are well formed, e.g. don't
cross an earlier split. Stop when the depth has reached self.max_depth,
when a leaf is pure or when the leaf contains too few samples.
|
https://github.com/tudelft-cda-lab/groot/blob/3c06ab8521cf22b76fdc3a38a73e8e9b5a8fa912/groot/model.py#L948-L1026
|
import json
import numbers
import time
import numpy as np
from collections import defaultdict
from itertools import product
from numba import jit
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn.utils import check_random_state
from joblib import Parallel, delayed
from sklearn.base import clone
from .util import convert_numpy
_TREE_LEAF = -1
_TREE_UNDEFINED = -2
LEFT = 0
LEFT_INTERSECT = 1
RIGHT_INTERSECT = 2
RIGHT = 3
NOGIL = True
class Node:
def __init__(self, feature, left_child, right_child, value):
self.feature = feature
self.left_child = left_child
self.right_child = right_child
self.value = value
def predict(self, _):
assert self.left_child == _TREE_LEAF
assert self.right_child == _TREE_LEAF
n_samples_in_leaf = np.sum(self.value)
if n_samples_in_leaf == 0:
return [0.0, 1.0]
else:
return self.value / n_samples_in_leaf
def pretty_print(self, depth=0):
indentation = depth * " "
return f"{indentation}return [{self.value[0]:.3f}, {self.value[1]:.3f}]"
def to_json(self):
return {
"value": [self.value[0], self.value[1]],
}
def to_xgboost_json(self, node_id, depth):
return {"nodeid": node_id, "leaf": self.value[1] * 2 - 1}, node_id
def is_leaf(self):
return self.left_child == _TREE_LEAF and self.right_child == _TREE_LEAF
def prune(self, _):
return self
class CategoricalNode(Node):
def __init__(self, feature, category_split, left_child, right_child, value):
super().__init__(feature, left_child, right_child, value)
self.categories_left = category_split[0]
self.categories_right = category_split[1]
def predict(self, sample):
sample_feature = int(sample[self.feature])
if sample_feature in self.categories_left:
return self.left_child.predict(sample)
else:
return self.right_child.predict(sample)
def pretty_print(self, depth=0):
indentation = depth * " "
return f"""{indentation}if x{self.feature} in {self.categories_left}:
{self.left_child.pretty_print(depth + 1)}
{indentation}elif x{self.feature} in {self.categories_right}:
{self.right_child.pretty_print(depth + 1)}"""
def to_json(self):
return {
"feature": self.feature,
"categories_left": list(self.categories_left),
"categories_right": list(self.categories_right),
"left_child": self.left_child.to_json(),
"right_child": self.right_child.to_json(),
}
def to_xgboost_json(self, node_id, depth):
raise NotImplementedError(
"XGBoost JSON is not yet supported for categorical features"
)
class NumericalNode(Node):
def __init__(self, feature, threshold, left_child, right_child, value):
super().__init__(feature, left_child, right_child, value)
self.threshold = threshold
def predict(self, sample):
comparison = sample[self.feature] <= self.threshold
if comparison:
return self.left_child.predict(sample)
else:
return self.right_child.predict(sample)
def pretty_print(self, depth=0):
indentation = depth * " "
return f"""{indentation}if x{self.feature} <= {self.threshold}:
{self.left_child.pretty_print(depth + 1)}
{indentation}else:
{self.right_child.pretty_print(depth + 1)}"""
def to_json(self):
return {
"feature": self.feature,
"threshold": self.threshold,
"left_child": self.left_child.to_json(),
"right_child": self.right_child.to_json(),
}
def to_xgboost_json(self, node_id, depth):
left_id = node_id + 1
left_dict, new_node_id = self.left_child.to_xgboost_json(left_id, depth + 1)
right_id = new_node_id + 1
right_dict, new_node_id = self.right_child.to_xgboost_json(right_id, depth + 1)
return (
{
"nodeid": node_id,
"depth": depth,
"split": self.feature,
"split_condition": self.threshold,
"yes": left_id,
"no": right_id,
"missing": left_id,
"children": [left_dict, right_dict],
},
new_node_id,
)
def prune(self, bounds=defaultdict(lambda: [-np.inf, np.inf])):
old_high = bounds[self.feature][1]
bounds[self.feature][1] = self.threshold
self.left_child = self.left_child.prune(bounds)
bounds[self.feature][1] = old_high
old_low = bounds[self.feature][0]
bounds[self.feature][0] = self.threshold
self.right_child = self.right_child.prune(bounds)
bounds[self.feature][0] = old_low
if self.threshold >= bounds[self.feature][1] or self.threshold == np.inf:
return self.left_child
elif self.threshold <= bounds[self.feature][0] or self.threshold == -np.inf:
return self.right_child
elif (
self.left_child.is_leaf()
and self.right_child.is_leaf()
and self.left_child.value[1] == self.right_child.value[1]
):
return self.left_child
else:
return self
def _attack_model_to_tuples(attack_model):
new_attack_model = []
for attack_mode in attack_model:
if attack_mode == "":
new_attack_model.append((0, 0))
elif attack_mode == ">":
new_attack_model.append((0, 10e9))
elif attack_mode == "<":
new_attack_model.append((10e9, 0))
elif attack_mode == "<>":
new_attack_model.append((10e9, 10e9))
elif isinstance(attack_mode, numbers.Number):
new_attack_model.append((attack_mode, attack_mode))
elif isinstance(attack_mode, tuple) and len(attack_mode) == 2:
new_attack_model.append(attack_mode)
elif isinstance(attack_mode, dict):
new_attack_model.append(attack_mode)
else:
raise Exception("Unknown attack model spec:", attack_mode)
return new_attack_model
@jit(nopython=True, nogil=NOGIL)
def _split_left_right_fast(self, X, y, rule, feature, inc, dec, chen_heuristic):
b_L = X[:, feature] <= rule - dec
b_LI = (X[:, feature] <= rule) & (X[:, feature] > rule - dec)
b_RI = (X[:, feature] <= rule + inc) & (X[:, feature] > rule)
b_R = X[:, feature] > rule + inc
i_L_0 = np.where(b_L & (y == 0))[0]
i_L_1 = np.where(b_L & (y == 1))[0]
i_LI_0 = np.where(b_LI & (y == 0))[0]
i_LI_1 = np.where(b_LI & (y == 1))[0]
i_RI_0 = np.where(b_RI & (y == 0))[0]
i_RI_1 = np.where(b_RI & (y == 1))[0]
i_R_0 = np.where(b_R & (y == 0))[0]
i_R_1 = np.where(b_R & (y == 1))[0]
if chen_heuristic:
_, x, y = chen_adversarial_gini_gain_two_class(
len(i_L_0),
len(i_L_1),
len(i_LI_0),
len(i_LI_1),
len(i_RI_0),
len(i_RI_1),
len(i_R_0),
len(i_R_1),
)
else:
_, x, y = adversarial_gini_gain_two_class(
len(i_L_0),
len(i_L_1),
len(i_LI_0),
len(i_LI_1),
len(i_RI_0),
len(i_RI_1),
len(i_R_0),
len(i_R_1),
)
if len(i_LI_0) < y:
i_LI_0 = np.append(i_LI_0, i_RI_0[: y - len(i_LI_0)])
i_RI_0 = i_RI_0[y - len(i_LI_0) :]
elif len(i_LI_0) > y:
i_RI_0 = np.append(i_RI_0, i_LI_0[: len(i_LI_0) - y])
i_LI_0 = i_LI_0[: len(i_LI_0) - y]
if len(i_LI_1) < x:
i_LI_1 = np.append(i_LI_1, i_RI_1[: x - len(i_LI_1)])
i_RI_1 = i_RI_1[x - len(i_LI_1) :]
elif len(i_LI_1) > x:
i_RI_1 = np.append(i_RI_1, i_LI_1[: len(i_LI_1) - x])
i_LI_1 = i_LI_1[: len(i_LI_1) - x]
i_left = np.concatenate((i_RI_0, i_RI_1, i_R_0, i_R_1))
i_right = np.concatenate((i_RI_0, i_RI_1, i_R_0, i_R_1))
return X[i_left], y[i_left], X[i_right], y[i_right]
@jit(nopython=True, nogil=NOGIL)
def _scan_numerical_feature_fast(
samples,
y,
dec,
inc,
left_bound,
right_bound,
chen_heuristic,
):
sort_order = samples.argsort()
sorted_labels = y[sort_order]
sample_queue = samples[sort_order]
dec_queue = sample_queue - dec
inc_queue = sample_queue + inc
l_0 = l_1 = li_0 = li_1 = ri_0 = ri_1 = 0
label_counts = np.bincount(y)
r_0 = label_counts[0]
r_1 = label_counts[1]
sample_i = dec_i = inc_i = 0
sample_val = sample_queue[0]
dec_val = dec_queue[0]
inc_val = inc_queue[0]
best_score = 10e9
best_split = None
adv_gini = None
while True:
smallest_val = min(sample_val, dec_val, inc_val)
if sample_val == smallest_val:
point = sample_val
label = sorted_labels[sample_i]
if label == 0:
ri_0 -= 1
li_0 += 1
else:
ri_1 -= 1
li_1 += 1
if sample_i < sample_queue.shape[0] - 1:
sample_i += 1
sample_val = sample_queue[sample_i]
else:
sample_val = 10e9
elif dec_val == smallest_val:
point = dec_val
label = sorted_labels[dec_i]
if label == 0:
r_0 -= 1
ri_0 += 1
else:
r_1 -= 1
ri_1 += 1
if dec_i < dec_queue.shape[0] - 1:
dec_i += 1
dec_val = dec_queue[dec_i]
else:
dec_val = 10e9
else:
point = inc_val
label = sorted_labels[inc_i]
if label == 0:
li_0 -= 1
l_0 += 1
else:
li_1 -= 1
l_1 += 1
if inc_i < inc_queue.shape[0] - 1:
inc_i += 1
inc_val = inc_queue[inc_i]
else:
inc_val = 10e9
if point >= right_bound:
break
next_point = min(sample_val, dec_val, inc_val)
if next_point != point:
if chen_heuristic:
adv_gini, _, __ = chen_adversarial_gini_gain_two_class(
l_0, l_1, li_0, li_1, ri_0, ri_1, r_0, r_1
)
else:
adv_gini, _, __ = adversarial_gini_gain_two_class(
l_0, l_1, li_0, li_1, ri_0, ri_1, r_0, r_1
)
split = (point + next_point) * 0.5
if (
adv_gini is not None
and adv_gini < best_score
and split > left_bound
and split < right_bound
):
best_score = adv_gini
best_split = split
return True, best_score, best_split
def chen_adversarial_gini_gain_one_class(l_0, l_1, r_0, r_1, li_1, ri_1):
i_1 = li_1 + ri_1
s1 = weighted_gini(l_0, l_1 + li_1, r_0, r_1 + ri_1)
s2 = weighted_gini(l_0, l_1, r_0, r_1 + i_1)
s3 = weighted_gini(l_0, l_1 + i_1, r_0, r_1)
s4 = weighted_gini(l_0, l_1 + ri_1, r_0, r_1 + li_1)
worst_case = max(s1, s2, s3, s4)
if s1 == worst_case:
return s1, li_1
if s2 == worst_case:
return s2, 0
if s3 == worst_case:
return s3, i_1
if s4 == worst_case:
return s4, ri_1
@jit(nopython=True, nogil=NOGIL)
def chen_adversarial_gini_gain_two_class(l_0, l_1, li_0, li_1, ri_0, ri_1, r_0, r_1):
i_0 = li_0 + ri_0
i_1 = li_1 + ri_1
s1 = weighted_gini(l_0 + li_0, l_1 + li_1, r_0 + ri_0, r_1 + ri_1)
s2 = weighted_gini(l_0, l_1, r_0 + i_0, r_1 + i_1)
s3 = weighted_gini(l_0 + i_0, l_1 + i_1, r_0, r_1)
s4 = weighted_gini(l_0 + ri_0, l_1 + ri_1, r_0 + li_0, r_1 + li_1)
worst_case = max(s1, s2, s3, s4)
if s1 == worst_case:
return s1, li_1, li_0
if s2 == worst_case:
return s2, 0, 0
if s3 == worst_case:
return s3, i_1, i_0
if s4 == worst_case:
return s4, ri_1, ri_0
@jit(nopython=True, nogil=NOGIL)
def adversarial_gini_gain_one_class(l_0, l_1, r_0, r_1, i_1):
x = max(min((l_0 * r_1 + l_0 * i_1 - l_1 * r_0) / (l_0 + r_0), i_1), 0)
x_floor = np.floor(x)
x_ceil = np.ceil(x)
adv_gini_floor = weighted_gini(l_0, l_1 + x_floor, r_0, r_1 + i_1 - x_floor)
adv_gini_ceil = weighted_gini(l_0, l_1 + x_ceil, r_0, r_1 + i_1 - x_ceil)
if adv_gini_floor > adv_gini_ceil:
return adv_gini_floor, x_floor
else:
return adv_gini_ceil, x_ceil
@jit(nopython=True, nogil=NOGIL)
def adversarial_gini_gain_two_class(l_0, l_1, li_0, li_1, ri_0, ri_1, r_0, r_1):
i_0 = li_0 + ri_0
i_1 = li_1 + ri_1
if i_0 == 0 and i_1 == 0:
return weighted_gini(l_0, l_1, r_0, r_1), 0, 0
if l_1 + r_1 + i_1 == 0:
return (
weighted_gini(l_0 + li_0, l_1 + li_1, r_0 + i_0 - li_0, r_1 + i_1 - li_1),
li_1,
li_0,
)
x_coef = (l_0 + r_0 + i_0) / (l_1 + r_1 + i_1)
intercept = (l_1 * r_0 - l_0 * r_1 - l_0 * i_1 + l_1 * i_0) / (l_1 + r_1 + i_1)
denominator = x_coef ** 2 + 1
x_prime = round((li_1 + x_coef * (li_0 - intercept)) / denominator)
y_prime = round((x_coef * (li_1 + x_coef * li_0) + intercept) / denominator)
if x_prime < 0 and y_prime > i_0:
x_prime = 0
y_prime = i_0
elif x_prime < 0:
x_prime = 0
y_prime = (
l_1 * r_0 - l_0 * r_1 - l_0 * i_1 + l_1 * i_0 + (l_0 + r_0 + i_0) * x_prime
) / (l_1 + r_1 + i_1)
if y_prime > i_0:
x_prime = 0
y_prime = i_0
elif x_prime > i_1 and y_prime < 0:
x_prime = i_1
y_prime = 0
elif x_prime > i_1:
x_prime = i_1
y_prime = (
l_1 * r_0 - l_0 * r_1 - l_0 * i_1 + l_1 * i_0 + (l_0 + r_0 + i_0) * x_prime
) / (l_1 + r_1 + i_1)
if y_prime < 0:
x_prime = i_1
y_prime = 0
elif y_prime < 0:
y_prime = 0
x_prime = (
l_0 * r_1 + l_0 * i_1 - l_1 * r_0 - l_1 * i_0 + (l_1 + r_1 + i_1) * y_prime
) / (l_0 + r_0 + i_0)
if x_prime > i_1:
x_prime = i_1
y_prime = 0
elif y_prime > i_0:
y_prime = i_0
x_prime = (
l_0 * r_1 + l_0 * i_1 - l_1 * r_0 - l_1 * i_0 + (l_1 + r_1 + i_1) * y_prime
) / (l_0 + r_0 + i_0)
if x_prime < 0:
x_prime = 0
y_prime = i_0
x_prime = int(round(x_prime))
y_prime = int(round(y_prime))
assert x_prime >= 0 and x_prime <= i_1
assert y_prime >= 0 and y_prime <= i_0
return (
weighted_gini(
l_0 + y_prime, l_1 + x_prime, r_0 + i_0 - y_prime, r_1 + i_1 - x_prime
),
x_prime,
y_prime,
)
@jit(nopython=True, nogil=NOGIL)
def gini_impurity(i_0, i_1):
if i_0 + i_1 == 0:
return 1.0
ratio = i_0 / (i_0 + i_1)
return 1.0 - (ratio ** 2) - ((1 - ratio) ** 2)
@jit(nopython=True, nogil=NOGIL)
def weighted_gini(l_0, l_1, r_0, r_1):
l_t = l_0 + l_1
r_t = r_0 + r_1
if l_t == 0:
l_p = 1.0
else:
l_p = l_0 / (l_0 + l_1)
if r_t == 0:
r_p = 1.0
else:
r_p = r_0 / (r_0 + r_1)
gini = l_t * (1 - (l_p ** 2) - ((1 - l_p) ** 2)) + r_t * (
1 - (r_p ** 2) - ((1 - r_p) ** 2)
)
total = l_t + r_t
if total != 0:
gini /= total
return gini
else:
return 1.0
def _counts_to_one_class_adv_gini(counts, rho, chen_heuristic):
rho_inv = 1.0 - rho
left_mal = counts[LEFT][1] + int(round(rho_inv * counts[LEFT_INTERSECT][1]))
right_mal = counts[RIGHT][1] + int(round(rho_inv * counts[RIGHT_INTERSECT][1]))
left_i_mal = int(round(rho * counts[LEFT_INTERSECT][1]))
right_i_mal = int(round(rho * counts[RIGHT_INTERSECT][1]))
if chen_heuristic:
adv_gini, _ = chen_adversarial_gini_gain_one_class(
counts[LEFT][0],
left_mal,
counts[RIGHT][0],
right_mal,
left_i_mal,
right_i_mal,
)
else:
adv_gini, _ = adversarial_gini_gain_one_class(
counts[LEFT][0],
left_mal,
counts[RIGHT][0],
right_mal,
left_i_mal + right_i_mal,
)
return adv_gini
@jit(nopython=True, nogil=NOGIL)
def _counts_to_two_class_adv_gini(counts, rho, chen_heuristic):
rho_inv = 1.0 - rho
left = counts[LEFT] + np.rint(rho_inv * counts[LEFT_INTERSECT]).astype(np.int64)
right = counts[RIGHT] + np.rint(rho_inv * counts[RIGHT_INTERSECT]).astype(np.int64)
left_i = np.rint(rho * counts[LEFT_INTERSECT]).astype(np.int64)
right_i = np.rint(rho * counts[RIGHT_INTERSECT]).astype(np.int64)
if chen_heuristic:
adv_gini, _, _ = chen_adversarial_gini_gain_two_class(
left[0],
left[1],
left_i[0],
left_i[1],
right_i[0],
right_i[1],
right[0],
right[1],
)
else:
adv_gini, _, _ = adversarial_gini_gain_two_class(
left[0],
left[1],
left_i[0],
left_i[1],
right_i[0],
right_i[1],
right[0],
right[1],
)
return adv_gini
@jit(nopython=True, nogil=NOGIL)
def _categorical_counts_to_one_class_adv_gini(
left_counts,
left_intersection_counts,
right_intersection_counts,
right_counts,
rho,
chen_heuristic,
):
inv_rho = 1.0 - rho
left_counts += np.rint(inv_rho * left_intersection_counts).astype(np.int64)
right_counts += np.rint(inv_rho * right_intersection_counts).astype(np.int64)
left_intersection_counts = np.rint(rho * left_intersection_counts).astype(np.int64)
right_intersection_counts = np.rint(rho * right_intersection_counts).astype(
np.int64
)
if chen_heuristic:
adv_gini, _ = chen_adversarial_gini_gain_one_class(
left_counts[0],
left_counts[1],
right_counts[0],
right_counts[1],
left_intersection_counts[1],
right_intersection_counts[1],
)
else:
adv_gini, _ = adversarial_gini_gain_one_class(
left_counts[0],
left_counts[1],
right_counts[0],
right_counts[1],
left_intersection_counts[1] + right_intersection_counts[1],
)
return adv_gini
@jit(nopython=True, nogil=NOGIL)
def _categorical_counts_to_two_class_adv_gini(
left_counts,
left_intersection_counts,
right_intersection_counts,
right_counts,
rho,
chen_heuristic,
):
inv_rho = 1.0 - rho
left_counts += np.rint(inv_rho * left_intersection_counts).astype(np.int64)
right_counts += np.rint(inv_rho * right_intersection_counts).astype(np.int64)
left_intersection_counts = np.rint(rho * left_intersection_counts).astype(np.int64)
right_intersection_counts = np.rint(rho * right_intersection_counts).astype(
np.int64
)
if chen_heuristic:
adv_gini, _, _ = chen_adversarial_gini_gain_two_class(
left_counts[0],
left_counts[1],
left_intersection_counts[0],
left_intersection_counts[1],
right_intersection_counts[0],
right_intersection_counts[1],
right_counts[0],
right_counts[1],
)
else:
adv_gini, _, _ = adversarial_gini_gain_two_class(
left_counts[0],
left_counts[1],
left_intersection_counts[0],
left_intersection_counts[1],
right_intersection_counts[0],
right_intersection_counts[1],
right_counts[0],
right_counts[1],
)
return adv_gini
def _identify_intersection_categories(
left_categories,
right_categories,
categories_counts,
attack_mode_array,
one_adversarial_class,
):
left_intersection_mask = np.any(attack_mode_array[:, right_categories], axis=1)
left_intersection_mask[right_categories] = 0
left_intersection_counts = np.sum(categories_counts[left_intersection_mask], axis=0)
right_intersection_mask = np.any(attack_mode_array[:, left_categories], axis=1)
right_intersection_mask[left_categories] = 0
right_intersection_counts = np.sum(
categories_counts[right_intersection_mask], axis=0
)
if one_adversarial_class:
left_intersection_counts[0] = 0
right_intersection_counts[0] = 0
return left_intersection_counts, right_intersection_counts
class GrootTree(BaseEstimator, ClassifierMixin):
def __init__(
self,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
max_features=None,
robust_weight=1.0,
attack_model=None,
is_numerical=None,
one_adversarial_class=False,
chen_heuristic=False,
random_state=None,
):
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_features = max_features
self.robust_weight = robust_weight
self.attack_model = attack_model
self.is_numerical = is_numerical
self.one_adversarial_class = one_adversarial_class
self.chen_heuristic = chen_heuristic
self.random_state = random_state
def fit(self, X, y):
X, y = check_X_y(X, y)
target_type = type_of_target(y)
if target_type != "binary":
raise ValueError(
f"Unknown label type: classifier only supports binary labels but found {target_type}"
)
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
self.n_samples_, self.n_features_in_ = X.shape
if self.attack_model is None:
self.attack_model = [""] * X.shape[1]
if self.is_numerical is None:
self.is_numerical = [True] * X.shape[1]
self.attack_model_ = _attack_model_to_tuples(self.attack_model)
self.n_categories_ = []
for feature, numeric in enumerate(self.is_numerical):
if numeric:
self.n_categories_.append(None)
else:
self.n_categories_.append(int(np.max(X[:, feature])) + 1)
self.random_state_ = check_random_state(self.random_state)
if self.max_features == "sqrt":
self.max_features_ = int(np.sqrt(self.n_features_in_))
elif self.max_features == "log2":
self.max_features_ = int(np.log2(self.n_features_in_))
elif self.max_features is None:
self.max_features_ = self.n_features_in_
else:
self.max_features_ = self.max_features
if self.max_features_ == 0:
self.max_features_ = 1
constraints = []
for feature_i, numeric in enumerate(self.is_numerical):
if numeric:
constraints.append([np.min(X[:, feature_i]), np.max(X[:, feature_i])])
else:
constraints.append(set())
self.root_ = self.__fit_recursive(X, y, constraints)
return self
|
MIT License
|
peerchemist/cryptotik
|
cryptotik/bittrex.py
|
Bittrex.private_api
|
python
|
def private_api(self, url, params):
params.update({"apikey": self.apikey, "nonce": self.get_nonce()})
url += "?" + requests.compat.urlencode(params)
self.headers.update({"apisign": self._generate_signature(url)
})
try:
response = self.api_session.get(url, headers=self.headers,
timeout=self.timeout,
proxies=self.proxy)
response.raise_for_status()
except requests.exceptions.HTTPError as e:
print(e)
self._verify_response(response)
return response.json()
|
handles private api methods
|
https://github.com/peerchemist/cryptotik/blob/24ffd74c43ff1fc171081e135cb2b66b775af3f3/cryptotik/bittrex.py#L109-L126
|
import requests
from cryptotik.common import (headers, ExchangeWrapper,
NormalizedExchangeWrapper)
from cryptotik.exceptions import (InvalidBaseCurrencyError,
InvalidDelimiterError,
APIError,
OutdatedBaseCurrenciesError)
from cryptotik.common import is_sale
import time
import hmac
import hashlib
from datetime import datetime
from decimal import Decimal
class Bittrex(ExchangeWrapper):
name = 'bittrex'
url = 'https://bittrex.com/api/v1.1/'
url2 = 'https://bittrex.com/Api/v2.0/'
delimiter = "-"
headers = headers
taker_fee, maker_fee = 0.0025, 0.0025
private_commands = ('getopenorders', 'cancel', 'sellmarket', 'selllimit',
'buymarket', 'buylimit')
public_commands = ('getbalances', 'getbalance', 'getdepositaddress',
'withdraw')
base_currencies = ['btc', 'eth', 'usdt']
quote_order = 1
def __init__(self, apikey=None, secret=None, timeout=None, proxy=None):
if apikey and secret:
self.apikey = apikey.encode("utf-8")
self.secret = secret.encode("utf-8")
if proxy:
assert proxy.startswith('https'), {'Error': 'Only https proxies supported.'}
self.proxy = {'https': proxy}
if not timeout:
self.timeout = (8, 15)
else:
self.timeout = timeout
self.api_session = requests.Session()
def get_base_currencies(self):
bases = list(set([i.split('-')[0] for i in self.get_markets()]))
try:
assert sorted(bases) == sorted(self.base_currencies)
except AssertionError:
raise OutdatedBaseCurrenciesError('Update the hardcoded base currency clist!',
{'actual': bases,
'hardcoded': self.base_currencies})
def get_nonce(self):
nonce = getattr(self, '_nonce', 0)
if nonce:
nonce += 1
self._nonce = max(int(time.time()), nonce)
return self._nonce
@classmethod
def format_pair(self, pair):
pair = pair.replace("_", self.delimiter)
if not pair.islower():
return pair.lower()
else:
return pair
def _verify_response(self, response):
if not response.json()['success'] is True:
raise APIError(response.json()['message'])
def _generate_signature(self, url):
return hmac.new(self.secret, url.encode(), hashlib.sha512).hexdigest()
def api(self, url, params):
try:
response = self.api_session.get(url, params=params, headers=self.headers,
timeout=self.timeout, proxies=self.proxy)
response.raise_for_status()
except requests.exceptions.HTTPError as e:
print(e)
self._verify_response(response)
return response.json()
|
BSD 3-Clause New or Revised License
|
ibm-security/ibmsecurity
|
ibmsecurity/isds/appliance.py
|
_changes_available
|
python
|
def _changes_available(isdsAppliance):
return True
|
Check for pending changes on the appliance
:param isdsAppliance:
:return:
|
https://github.com/ibm-security/ibmsecurity/blob/da098f7d555e571a99a0d7cd47a51add483feb6f/ibmsecurity/isds/appliance.py#L32-L54
|
import json
import logging
import time
logger = logging.getLogger(__name__)
def reboot(isdsAppliance, check_mode=False, force=False):
if check_mode is True:
return isdsAppliance.create_return_object(changed=True)
else:
return isdsAppliance.invoke_post("Rebooting appliance",
"/diagnostics/restart_shutdown/reboot",
{})
def shutdown(isdsAppliance, check_mode=False, force=False):
if check_mode is True:
return isdsAppliance.create_return_object(changed=True)
else:
return isdsAppliance.invoke_post("Shutting down appliance",
"/diagnostics/restart_shutdown/shutdown",
{})
|
Apache License 2.0
|
vincentstimper/normalizing-flows
|
normflow/distributions.py
|
Sinusoidal.__init__
|
python
|
def __init__(self, scale, period):
self.scale = scale
self.period = period
|
Distribution 2d with sinusoidal density
:param loc: distance of modes from the origin
:param scale: scale of modes
|
https://github.com/vincentstimper/normalizing-flows/blob/8ee48560a864ac9821c9ea5fef8104c7d8c5b038/normflow/distributions.py#L778-L785
|
import torch
import torch.nn as nn
import numpy as np
from . import flows
class BaseDistribution(nn.Module):
def __init__(self):
super().__init__()
def forward(self, num_samples=1):
raise NotImplementedError
def log_prob(self, z):
raise NotImplementedError
class DiagGaussian(BaseDistribution):
def __init__(self, shape, trainable=True):
super().__init__()
if isinstance(shape, int):
shape = (shape,)
self.shape = shape
self.n_dim = len(shape)
self.d = np.prod(shape)
if trainable:
self.loc = nn.Parameter(torch.zeros(1, *self.shape))
self.log_scale = nn.Parameter(torch.zeros(1, *self.shape))
else:
self.register_buffer("loc", torch.zeros(1, *self.shape))
self.register_buffer("log_scale", torch.zeros(1, *self.shape))
self.temperature = None
def forward(self, num_samples=1):
eps = torch.randn((num_samples,) + self.shape, dtype=self.loc.dtype,
device=self.loc.device)
if self.temperature is None:
log_scale = self.log_scale
else:
log_scale = self.log_scale + np.log(self.temperature)
z = self.loc + torch.exp(log_scale) * eps
log_p = - 0.5 * self.d * np.log(2 * np.pi) - torch.sum(log_scale + 0.5 * torch.pow(eps, 2), list(range(1, self.n_dim + 1)))
return z, log_p
def log_prob(self, z):
if self.temperature is None:
log_scale = self.log_scale
else:
log_scale = self.log_scale + np.log(self.temperature)
log_p = - 0.5 * self.d * np.log(2 * np.pi) - torch.sum(log_scale + 0.5 * torch.pow((z - self.loc) / torch.exp(log_scale), 2),
list(range(1, self.n_dim + 1)))
return log_p
class ClassCondDiagGaussian(BaseDistribution):
def __init__(self, shape, num_classes):
super().__init__()
if isinstance(shape, int):
shape = (shape,)
self.shape = shape
self.n_dim = len(shape)
self.perm = [self.n_dim] + list(range(self.n_dim))
self.d = np.prod(shape)
self.num_classes = num_classes
self.loc = nn.Parameter(torch.zeros(*self.shape, num_classes))
self.log_scale = nn.Parameter(torch.zeros(*self.shape, num_classes))
self.temperature = None
def forward(self, num_samples=1, y=None):
if y is not None:
num_samples = len(y)
else:
y = torch.randint(self.num_classes, (num_samples,), device=self.loc.device)
if y.dim() == 1:
y_onehot = torch.zeros((self.num_classes, num_samples), dtype=self.loc.dtype,
device=self.loc.device)
y_onehot.scatter_(0, y[None], 1)
y = y_onehot
else:
y = y.t()
eps = torch.randn((num_samples,) + self.shape, dtype=self.loc.dtype,
device=self.loc.device)
loc = (self.loc @ y).permute(*self.perm)
log_scale = (self.log_scale @ y).permute(*self.perm)
if self.temperature is not None:
log_scale = np.log(self.temperature) + log_scale
z = loc + torch.exp(log_scale) * eps
log_p = - 0.5 * self.d * np.log(2 * np.pi) - torch.sum(log_scale + 0.5 * torch.pow(eps, 2), list(range(1, self.n_dim + 1)))
return z, log_p
def log_prob(self, z, y):
if y.dim() == 1:
y_onehot = torch.zeros((self.num_classes, len(y)), dtype=self.loc.dtype,
device=self.loc.device)
y_onehot.scatter_(0, y[None], 1)
y = y_onehot
else:
y = y.t()
loc = (self.loc @ y).permute(*self.perm)
log_scale = (self.log_scale @ y).permute(*self.perm)
if self.temperature is not None:
log_scale = np.log(self.temperature) + log_scale
log_p = - 0.5 * self.d * np.log(2 * np.pi) - torch.sum(log_scale + 0.5 * torch.pow((z - loc) / torch.exp(log_scale), 2),
list(range(1, self.n_dim + 1)))
return log_p
class GlowBase(BaseDistribution):
def __init__(self, shape, num_classes=None, logscale_factor=3.):
super().__init__()
if isinstance(shape, int):
shape = (shape,)
self.shape = shape
self.n_dim = len(shape)
self.num_pix = np.prod(shape[1:])
self.d = np.prod(shape)
self.sum_dim = list(range(1, self.n_dim + 1))
self.num_classes = num_classes
self.class_cond = num_classes is not None
self.logscale_factor = logscale_factor
self.loc = nn.Parameter(torch.zeros(1, self.shape[0], *((self.n_dim - 1) * [1])))
self.loc_logs = nn.Parameter(torch.zeros(1, self.shape[0],
*((self.n_dim - 1) * [1])))
self.log_scale = nn.Parameter(torch.zeros(1, self.shape[0],
*((self.n_dim - 1) * [1])))
self.log_scale_logs = nn.Parameter(torch.zeros(1, self.shape[0],
*((self.n_dim - 1) * [1])))
if self.class_cond:
self.loc_cc = nn.Parameter(torch.zeros(self.num_classes, self.shape[0]))
self.log_scale_cc = nn.Parameter(torch.zeros(self.num_classes, self.shape[0]))
self.temperature = None
def forward(self, num_samples=1, y=None):
loc = self.loc * torch.exp(self.loc_logs * self.logscale_factor)
log_scale = self.log_scale * torch.exp(self.log_scale_logs * self.logscale_factor)
if self.class_cond:
if y is not None:
num_samples = len(y)
else:
y = torch.randint(self.num_classes, (num_samples,), device=self.loc.device)
if y.dim() == 1:
y_onehot = torch.zeros((len(y), self.num_classes), dtype=self.loc.dtype,
device=self.loc.device)
y_onehot.scatter_(1, y[:, None], 1)
y = y_onehot
loc = loc + (y @ self.loc_cc).view(y.size(0), self.shape[0],
*((self.n_dim - 1) * [1]))
log_scale = log_scale + (y @ self.log_scale_cc).view(y.size(0), self.shape[0],
*((self.n_dim - 1) * [1]))
if self.temperature is not None:
log_scale = log_scale + np.log(self.temperature)
eps = torch.randn((num_samples,) + self.shape, dtype=self.loc.dtype,
device=self.loc.device)
z = loc + torch.exp(log_scale) * eps
log_p = - 0.5 * self.d * np.log(2 * np.pi) - self.num_pix * torch.sum(log_scale, dim=self.sum_dim) - 0.5 * torch.sum(torch.pow(eps, 2), dim=self.sum_dim)
return z, log_p
def log_prob(self, z, y=None):
loc = self.loc * torch.exp(self.loc_logs * self.logscale_factor)
log_scale = self.log_scale * torch.exp(self.log_scale_logs * self.logscale_factor)
if self.class_cond:
if y.dim() == 1:
y_onehot = torch.zeros((len(y), self.num_classes), dtype=self.loc.dtype,
device=self.loc.device)
y_onehot.scatter_(1, y[:, None], 1)
y = y_onehot
loc = loc + (y @ self.loc_cc).view(y.size(0), self.shape[0],
*((self.n_dim - 1) * [1]))
log_scale = log_scale + (y @ self.log_scale_cc).view(y.size(0), self.shape[0],
*((self.n_dim - 1) * [1]))
if self.temperature is not None:
log_scale = log_scale + np.log(self.temperature)
log_p = - 0.5 * self.d * np.log(2 * np.pi) - self.num_pix * torch.sum(log_scale, dim=self.sum_dim) - 0.5 * torch.sum(torch.pow((z - loc) / torch.exp(log_scale), 2),
dim=self.sum_dim)
return log_p
class AffineGaussian(BaseDistribution):
def __init__(self, shape, affine_shape, num_classes=None):
super().__init__()
self.shape = shape
self.n_dim = len(shape)
self.d = np.prod(shape)
self.sum_dim = list(range(1, self.n_dim + 1))
self.affine_shape = affine_shape
self.num_classes = num_classes
self.class_cond = num_classes is not None
if self.class_cond:
self.transform = flows.CCAffineConst(self.affine_shape, self.num_classes)
else:
self.transform = flows.AffineConstFlow(self.affine_shape)
self.temperature = None
def forward(self, num_samples=1, y=None):
dtype = self.transform.s.dtype
device = self.transform.s.device
if self.class_cond:
if y is not None:
num_samples = len(y)
else:
y = torch.randint(self.num_classes, (num_samples,), device=device)
if y.dim() == 1:
y_onehot = torch.zeros((len(y), self.num_classes), dtype=dtype, device=device)
y_onehot.scatter_(1, y[:, None], 1)
y = y_onehot
if self.temperature is not None:
log_scale = np.log(self.temperature)
else:
log_scale = 0.
eps = torch.randn((num_samples,) + self.shape, dtype=dtype, device=device)
z = np.exp(log_scale) * eps
log_p = - 0.5 * self.d * np.log(2 * np.pi) - self.d * log_scale - 0.5 * torch.sum(torch.pow(eps, 2), dim=self.sum_dim)
if self.class_cond:
z, log_det = self.transform(z, y)
else:
z, log_det = self.transform(z)
log_p -= log_det
return z, log_p
def log_prob(self, z, y=None):
if self.class_cond:
if y.dim() == 1:
y_onehot = torch.zeros((len(y), self.num_classes),
dtype=self.transform.s.dtype,
device=self.transform.s.device)
y_onehot.scatter_(1, y[:, None], 1)
y = y_onehot
if self.temperature is not None:
log_scale = np.log(self.temperature)
else:
log_scale = 0.
if self.class_cond:
z, log_p = self.transform.inverse(z, y)
else:
z, log_p = self.transform.inverse(z)
z = z / np.exp(log_scale)
log_p = log_p - self.d * log_scale - 0.5 * self.d * np.log(2 * np.pi) - 0.5 * torch.sum(torch.pow(z, 2), dim=self.sum_dim)
return log_p
class GaussianMixture(BaseDistribution):
def __init__(self, n_modes, dim, loc=None, scale=None, weights=None, trainable=True):
super().__init__()
self.n_modes = n_modes
self.dim = dim
if loc is None:
loc = np.random.randn(self.n_modes, self.dim)
loc = np.array(loc)[None, ...]
if scale is None:
scale = np.ones((self.n_modes, self.dim))
scale = np.array(scale)[None, ...]
if weights is None:
weights = np.ones(self.n_modes)
weights = np.array(weights)[None, ...]
weights /= weights.sum(1)
if trainable:
self.loc = nn.Parameter(torch.tensor(1. * loc))
self.log_scale = nn.Parameter(torch.tensor(np.log(1. * scale)))
self.weight_scores = nn.Parameter(torch.tensor(np.log(1. * weights)))
else:
self.register_buffer("loc", torch.tensor(1. * loc))
self.register_buffer("log_scale", torch.tensor(np.log(1. * scale)))
self.register_buffer("weight_scores", torch.tensor(np.log(1. * weights)))
def forward(self, num_samples=1):
mode_ind = torch.randint(high=self.n_modes, size=(num_samples,))
mode_1h = torch.zeros((num_samples, self.n_modes), dtype=torch.int64)
mode_1h.scatter_(1, mode_ind[:, None], 1)
mode_1h = mode_1h[..., None]
weights = torch.softmax(self.weight_scores, 1)
eps = torch.randn(num_samples, self.dim, dtype=self.loc.dtype, device=self.loc.device)
scale_sample = torch.sum(torch.exp(self.log_scale) * mode_1h, 1)
loc_sample = torch.sum(self.loc * mode_1h, 1)
z = eps * scale_sample + loc_sample
log_p = - 0.5 * self.dim * np.log(2 * np.pi) + torch.log(weights) - 0.5 * torch.sum(torch.pow(eps, 2), 1, keepdim=True) - torch.sum(self.log_scale, 2)
log_p = torch.logsumexp(log_p, 1)
return z, log_p
def log_prob(self, z):
weights = torch.softmax(self.weight_scores, 1)
eps = (z[:, None, :] - self.loc) / torch.exp(self.log_scale)
log_p = - 0.5 * self.dim * np.log(2 * np.pi) + torch.log(weights) - 0.5 * torch.sum(torch.pow(eps, 2), 2) - torch.sum(self.log_scale, 2)
log_p = torch.logsumexp(log_p, 1)
return log_p
class GaussianPCA(BaseDistribution):
def __init__(self, dim, latent_dim=None, sigma=0.1):
super().__init__()
self.dim = dim
if latent_dim is None:
self.latent_dim = dim
else:
self.latent_dim = latent_dim
self.loc = nn.Parameter(torch.zeros(1, dim))
self.W = nn.Parameter(torch.randn(latent_dim, dim))
self.log_sigma = nn.Parameter(torch.tensor(np.log(sigma)))
def forward(self, num_samples=1):
eps = torch.randn(num_samples, self.latent_dim, dtype=self.loc.dtype,
device=self.loc.device)
z_ = torch.matmul(eps, self.W)
z = z_ + self.loc
Sig = torch.matmul(self.W.T, self.W) + torch.exp(self.log_sigma * 2) * torch.eye(self.dim, dtype=self.loc.dtype, device=self.loc.device)
log_p = self.dim / 2 * np.log(2 * np.pi) - 0.5 * torch.det(Sig) - 0.5 * torch.sum(z_ * torch.matmul(z_, torch.inverse(Sig)), 1)
return z, log_p
def log_prob(self, z):
z_ = z - self.loc
Sig = torch.matmul(self.W.T, self.W) + torch.exp(self.log_sigma * 2) * torch.eye(self.dim, dtype=self.loc.dtype, device=self.loc.device)
log_p = self.dim / 2 * np.log(2 * np.pi) - 0.5 * torch.det(Sig) - 0.5 * torch.sum(z_ * torch.matmul(z_, torch.inverse(Sig)), 1)
return log_p
class BaseEncoder(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, num_samples=1):
raise NotImplementedError
def log_prob(self, z, x):
raise NotImplementedError
class Dirac(BaseEncoder):
def __init__(self):
super().__init__()
def forward(self, x, num_samples=1):
z = x.unsqueeze(1).repeat(1, num_samples, 1)
log_p = torch.zeros(z.size()[0:2])
return z, log_p
def log_prob(self, z, x):
log_p = torch.zeros(z.size()[0:2])
return log_p
class Uniform(BaseEncoder):
def __init__(self, zmin=0.0, zmax=1.0):
super().__init__()
self.zmin = zmin
self.zmax = zmax
self.log_p = -torch.log(zmax-zmin)
def forward(self, x, num_samples=1):
z = x.unsqueeze(1).repeat(1, num_samples, 1).uniform_(min=self.zmin, max=self.zmax)
log_p = torch.zeros(z.size()[0:2]).fill_(self.log_p)
return z, log_p
def log_prob(self, z, x):
log_p = torch.zeros(z.size()[0:2]).fill_(self.log_p)
return log_p
class ConstDiagGaussian(BaseEncoder):
def __init__(self, loc, scale):
super().__init__()
self.d = len(loc)
if not torch.is_tensor(loc):
loc = torch.tensor(loc).float()
if not torch.is_tensor(scale):
scale = torch.tensor(scale).float()
self.loc = nn.Parameter(loc.reshape((1, 1, self.d)))
self.scale = nn.Parameter(scale)
def forward(self, x=None, num_samples=1):
if x is not None:
batch_size = len(x)
else:
batch_size = 1
eps = torch.randn((batch_size, num_samples, self.d), device=x.device)
z = self.loc + self.scale * eps
log_p = - 0.5 * self.d * np.log(2 * np.pi) - torch.sum(torch.log(self.scale) + 0.5 * torch.pow(eps, 2), 2)
return z, log_p
def log_prob(self, z, x):
if z.dim() == 1:
z = z.unsqueeze(0)
if z.dim() == 2:
z = z.unsqueeze(0)
log_p = - 0.5 * self.d * np.log(2 * np.pi) - torch.sum(torch.log(self.scale) + 0.5 * ((z - self.loc) / self.scale) ** 2, 2)
return log_p
class NNDiagGaussian(BaseEncoder):
def __init__(self, net):
super().__init__()
self.net = net
def forward(self, x, num_samples=1):
batch_size = len(x)
mean_std = self.net(x)
n_hidden = mean_std.size()[1] // 2
mean = mean_std[:, :n_hidden, ...].unsqueeze(1)
std = torch.exp(0.5 * mean_std[:, n_hidden:(2 * n_hidden), ...].unsqueeze(1))
eps = torch.randn((batch_size, num_samples) + tuple(mean.size()[2:]), device=x.device)
z = mean + std * eps
log_p = - 0.5 * torch.prod(torch.tensor(z.size()[2:])) * np.log(2 * np.pi) - torch.sum(torch.log(std) + 0.5 * torch.pow(eps, 2), list(range(2, z.dim())))
return z, log_p
def log_prob(self, z, x):
if z.dim() == 1:
z = z.unsqueeze(0)
if z.dim() == 2:
z = z.unsqueeze(0)
mean_std = self.net(x)
n_hidden = mean_std.size()[1] // 2
mean = mean_std[:, :n_hidden, ...].unsqueeze(1)
var = torch.exp(mean_std[:, n_hidden:(2 * n_hidden), ...].unsqueeze(1))
log_p = - 0.5 * torch.prod(torch.tensor(z.size()[2:])) * np.log(2 * np.pi) - 0.5 * torch.sum(torch.log(var) + (z - mean) ** 2 / var, 2)
return log_p
class Decoder(nn.Module):
def __init__(self):
super().__init__()
def forward(self, z):
raise NotImplementedError
def log_prob(self, x, z):
raise NotImplementedError
class NNDiagGaussianDecoder(Decoder):
def __init__(self, net):
super().__init__()
self.net = net
def forward(self, z):
z_size = z.size()
mean_std = self.net(z.view(-1, *z_size[2:])).view(z_size)
n_hidden = mean_std.size()[2] // 2
mean = mean_std[:, :, :n_hidden, ...]
std = torch.exp(0.5 * mean_std[:, :, n_hidden:(2 * n_hidden), ...])
return mean, std
def log_prob(self, x, z):
mean_std = self.net(z.view(-1, *z.size()[2:])).view(*z.size()[:2], x.size(1) * 2, *x.size()[3:])
n_hidden = mean_std.size()[2] // 2
mean = mean_std[:, :, :n_hidden, ...]
var = torch.exp(mean_std[:, :, n_hidden:(2 * n_hidden), ...])
log_p = - 0.5 * torch.prod(torch.tensor(z.size()[2:])) * np.log(2 * np.pi) - 0.5 * torch.sum(torch.log(var) + (x.unsqueeze(1) - mean) ** 2 / var, list(range(2, z.dim())))
return log_p
class NNBernoulliDecoder(Decoder):
def __init__(self, net):
super().__init__()
self.net = net
def forward(self, z):
mean = torch.sigmoid(self.net(z))
return mean
def log_prob(self, x, z):
score = self.net(z)
x = x.unsqueeze(1)
x = x.repeat(1, z.size()[0] // x.size()[0], *((x.dim() - 2) * [1])).view(-1, *x.size()[2:])
log_sig = lambda a: -torch.relu(-a) - torch.log(1 + torch.exp(-torch.abs(a)))
log_p = torch.sum(x * log_sig(score) + (1 - x) * log_sig(-score), list(range(1, x.dim())))
return log_p
class PriorDistribution:
def __init__(self):
raise NotImplementedError
def log_prob(self, z):
raise NotImplementedError
class ImagePrior(nn.Module):
def __init__(self, image, x_range=[-3, 3], y_range=[-3, 3], eps=1.e-10):
super().__init__()
image_ = np.flip(image, 0).transpose() + eps
self.image_cpu = torch.tensor(image_ / np.max(image_))
self.image_size_cpu = self.image_cpu.size()
self.x_range = torch.tensor(x_range)
self.y_range = torch.tensor(y_range)
self.register_buffer('image', self.image_cpu)
self.register_buffer('image_size', torch.tensor(self.image_size_cpu).unsqueeze(0))
self.register_buffer('density', torch.log(self.image_cpu / torch.sum(self.image_cpu)))
self.register_buffer('scale', torch.tensor([[self.x_range[1] - self.x_range[0],
self.y_range[1] - self.y_range[0]]]))
self.register_buffer('shift', torch.tensor([[self.x_range[0], self.y_range[0]]]))
def log_prob(self, z):
z_ = torch.clamp((z - self.shift) / self.scale, max=1, min=0)
ind = (z_ * (self.image_size - 1)).long()
return self.density[ind[:, 0], ind[:, 1]]
def rejection_sampling(self, num_steps=1):
z_ = torch.rand((num_steps, 2), dtype=self.image.dtype, device=self.image.device)
prob = torch.rand(num_steps, dtype=self.image.dtype, device=self.image.device)
ind = (z_ * (self.image_size - 1)).long()
intensity = self.image[ind[:, 0], ind[:, 1]]
accept = intensity > prob
z = z_[accept, :] * self.scale + self.shift
return z
def sample(self, num_samples=1):
z = torch.ones((0, 2), dtype=self.image.dtype, device=self.image.device)
while len(z) < num_samples:
z_ = self.rejection_sampling(num_samples)
ind = np.min([len(z_), num_samples - len(z)])
z = torch.cat([z, z_[:ind, :]], 0)
return z
class TwoModes(PriorDistribution):
def __init__(self, loc, scale):
self.loc = loc
self.scale = scale
def log_prob(self, z):
a = torch.abs(z[:, 0])
eps = torch.abs(torch.tensor(self.loc))
log_prob = - 0.5 * ((torch.norm(z, dim=1) - self.loc) / (2 * self.scale)) ** 2 - 0.5 * ((a - eps) / (3 * self.scale)) ** 2 + torch.log(1 + torch.exp(-2 * (a * eps) / (3 * self.scale) ** 2))
return log_prob
class Sinusoidal(PriorDistribution):
|
MIT License
|
blocktrail/blocktrail-sdk-python
|
blocktrail/client.py
|
APIClient.address
|
python
|
def address(self, address):
response = self.client.get("/address/%s" % (address, ))
return response.json()
|
get a single address
:param str address: the address hash
:rtype: dict
|
https://github.com/blocktrail/blocktrail-sdk-python/blob/93b0afce79ae94d46257c85190aed1e876c87b4f/blocktrail/client.py#L23-L32
|
from blocktrail import connection
class APIClient(object):
def __init__(self, api_key, api_secret, network='BTC', testnet=False, api_version='v1', api_endpoint=None, debug=False):
if api_endpoint is None:
network = ("t" if testnet else "") + network.upper()
api_endpoint = "https://api.blocktrail.com/%s/%s" % (api_version, network)
self.client = connection.RestClient(api_endpoint=api_endpoint, api_key=api_key, api_secret=api_secret, debug=debug)
|
MIT License
|
google/clusterfuzz
|
src/appengine/libs/issue_management/issue_tracker_policy.py
|
IssueTrackerPolicy.substitution_mapping
|
python
|
def substitution_mapping(self, label):
if 'substitutions' not in self._data:
return label
mapped = self._data['substitutions'].get(label)
if not mapped:
return label
return str(mapped)
|
Get an explicit substitution mapping.
|
https://github.com/google/clusterfuzz/blob/e9e105d66f009356c4f3fe9ae7873ffff126b234/src/appengine/libs/issue_management/issue_tracker_policy.py#L79-L88
|
from collections import namedtuple
from clusterfuzz._internal.config import local_config
Status = namedtuple('Status',
['assigned', 'duplicate', 'wontfix', 'fixed', 'verified'])
EXPECTED_STATUSES = [
'assigned',
'duplicate',
'wontfix',
'fixed',
'verified',
'new',
]
class ConfigurationError(Exception):
class NewIssuePolicy(object):
def __init__(self):
self.status = ''
self.ccs = []
self.labels = []
self.issue_body_footer = ''
def _to_str_list(values):
return [str(value) for value in values]
class IssueTrackerPolicy(object):
def __init__(self, data):
self._data = data
if 'status' not in self._data:
raise ConfigurationError('Status not set in policies.')
if 'labels' not in self._data:
raise ConfigurationError('Labels not set in policies.')
for status in EXPECTED_STATUSES:
if status not in self._data['status']:
raise ConfigurationError(
'Expected status {} is not set.'.format(status))
def status(self, status_type):
return self._data['status'][status_type]
def label(self, label_type):
label = self._data['labels'].get(label_type)
if label is None:
return None
return str(label)
|
Apache License 2.0
|
maniacallabs/bibliopixel
|
bibliopixel/drivers/SimPixel/SimpleWebSocketServer.py
|
WebSocket.handleMessage
|
python
|
def handleMessage(self):
pass
|
Called when websocket frame is received.
To access the frame data call self.data.
If the frame is Text then self.data is a unicode object.
If the frame is Binary then self.data is a bytearray object.
|
https://github.com/maniacallabs/bibliopixel/blob/afb993fbbe56e75e7c98f252df402b0f3e83bb6e/bibliopixel/drivers/SimPixel/SimpleWebSocketServer.py#L113-L121
|
import sys
VER = sys.version_info[0]
if VER >= 3:
import socketserver
from http.server import BaseHTTPRequestHandler
from io import StringIO, BytesIO
else:
import SocketServer
from BaseHTTPServer import BaseHTTPRequestHandler
from StringIO import StringIO
import hashlib
import base64
import socket
import struct
import ssl
import errno
import codecs
from collections import deque
from select import select
__all__ = ['WebSocket',
'SimpleWebSocketServer']
def _check_unicode(val):
if VER >= 3:
return isinstance(val, str)
else:
return isinstance(val, unicode)
class HTTPRequest(BaseHTTPRequestHandler):
def __init__(self, request_text):
if VER >= 3:
self.rfile = BytesIO(request_text)
else:
self.rfile = StringIO(request_text)
self.raw_requestline = self.rfile.readline()
self.error_code = self.error_message = None
self.parse_request()
_VALID_STATUS_CODES = [1000, 1001, 1002, 1003, 1007, 1008,
1009, 1010, 1011, 3000, 3999, 4000, 4999]
HANDSHAKE_STR = (
"HTTP/1.1 101 Switching Protocols\r\n"
"Upgrade: WebSocket\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Accept: %(acceptstr)s\r\n\r\n"
)
GUID_STR = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
STREAM = 0x0
TEXT = 0x1
BINARY = 0x2
CLOSE = 0x8
PING = 0x9
PONG = 0xA
HEADERB1 = 1
HEADERB2 = 3
LENGTHSHORT = 4
LENGTHLONG = 5
MASK = 6
PAYLOAD = 7
MAXHEADER = 65536
MAXPAYLOAD = 33554432
class WebSocket(object):
def __init__(self, server, sock, address):
self.server = server
self.client = sock
self.address = address
self.handshaked = False
self.headerbuffer = bytearray()
self.headertoread = 2048
self.fin = 0
self.data = bytearray()
self.opcode = 0
self.hasmask = 0
self.maskarray = None
self.length = 0
self.lengtharray = None
self.index = 0
self.request = None
self.usingssl = False
self.frag_start = False
self.frag_type = BINARY
self.frag_buffer = None
self.frag_decoder = codecs.getincrementaldecoder(
'utf-8')(errors='strict')
self.closed = False
self.sendq = deque()
self.state = HEADERB1
self.maxheader = MAXHEADER
self.maxpayload = MAXPAYLOAD
|
MIT License
|
tmthydvnprt/compfipy
|
compfipy/market.py
|
opening_time
|
python
|
def opening_time():
return datetime.time(9, 30)
|
Get opening time of the current date.
|
https://github.com/tmthydvnprt/compfipy/blob/82a40cb9a7acbd3111b3d3a86222084f96e7fd7a/compfipy/market.py#L171-L175
|
import os
import sys
import json
import urllib
import urllib2
import datetime
import StringIO
import multiprocessing
import calendar as cal
import cPickle as pickle
import numpy as np
import pandas as pd
import dateutil.easter
import tabulate
DATA_SET = False
NO_DATA_SET = 'DATA_LOCATION and DATA_SOURCE are not defined. See set locations with set_data_location().'
DATA_LOCATION = ''
DATA_SOURCE = 'google'
SYMBOL_MANIFEST = ''
HISTORY_STATUS = ''
LOG_FILE = ''
HISTORY_PATH = ''
GOOGLE_URL = 'http://www.google.com/finance/historical?q={symbol}&startdate={start}&enddate={end}&output=csv'
YAHOO_URL = 'http://ichart.finance.yahoo.com/table.csv?s={symbol}&c={start}'
EXCHANGES = {'', 'NYSE:', 'NASDAQ:', 'NYSEMKT:', 'NYSEARCA:'}
NASDAQ_URL = 'ftp://ftp.nasdaqtrader.com/SymbolDirectory/'
NASDAQ_FILE = 'nasdaqlisted.txt'
OTHERS_FILE = 'otherlisted.txt'
NASDAQ_SECTOR_URL = 'http://www.nasdaq.com/screening/companies-by-name.aspx?letter=0&exchange={}&render=download'
NASDAQ_SECTOR_EX = ['nasdaq', 'nyse', 'amex']
COLUMN_ORDER = ['Symbol', 'Security Name', 'Exchange', 'ETF', 'NASDAQ Symbol', 'Test Issue']
EXCHANGE_ABBR = {
'Q' : 'NASDAQ',
'A' : 'NYSE MKT',
'N' : 'NYSE',
'P' : 'ARCA',
'Z' : 'BATS'
}
def next_open_day(date=datetime.date.today()):
date = date.date() if isinstance(date, datetime.datetime) else date
date = date + datetime.timedelta(days=1)
while not is_open_on(date):
date = date + datetime.timedelta(days=1)
return date
def move_weekend_holiday(d):
if d.weekday() == 5:
return d - datetime.timedelta(days=1)
elif d.weekday() == 6:
return d + datetime.timedelta(days=1)
else:
return d
def nth_week_day_of_month(n, weekday, month=datetime.date.today().month, year=datetime.date.today().year):
if isinstance(weekday, str) and len(weekday) == 3:
weekday = list(cal.day_abbr).index(weekday)
elif isinstance(weekday, str) and len(weekday) > 3:
weekday = list(cal.day_name).index(weekday)
if n > 0:
first_day_of_month = datetime.date(year, month, 1)
weekday_difference = (weekday - first_day_of_month.weekday()) % 7
first_weekday_of_month = first_day_of_month + datetime.timedelta(days=weekday_difference)
return first_weekday_of_month + datetime.timedelta(days=(n - 1) * 7)
else:
last_day_of_month = datetime.date(year, month + 1, 1) - datetime.timedelta(days=1)
weekday_difference = (last_day_of_month.weekday() - weekday) % 7
last_weekday_of_month = last_day_of_month - datetime.timedelta(days=weekday_difference)
return last_weekday_of_month - datetime.timedelta(days=(abs(n) - 1) * 7)
def nyse_holidays(year=datetime.date.today().year):
if year < 1817:
print 'The NYSE was not open in ' + str(year) +'! It was founded in March 8, 1817. Returning empty list []'
return []
else:
typical_holidays = [
datetime.date(year, 1, 1),
nth_week_day_of_month(3, 'Mon', 1, year),
nth_week_day_of_month(3, 'Mon', 2, year),
dateutil.easter.easter(year) - datetime.timedelta(days=2),
nth_week_day_of_month(-1, 'Mon', 5, year),
datetime.date(year, 7, 4),
nth_week_day_of_month(1, 'Mon', 9, year),
nth_week_day_of_month(4, 'Thu', 11, year),
datetime.date(year, 12, 25)
]
historical_holidays = [
datetime.date(2012, 10, 29),
datetime.date(2012, 10, 30),
]
special_holidays = [v for v in historical_holidays if v.year == year]
holidays = [move_weekend_holiday(h) for h in typical_holidays] + special_holidays
holidays.sort()
return holidays
def nyse_close_early_dates(year=datetime.date.today().year):
return [
datetime.date(year, 6, 3),
nth_week_day_of_month(4, 'Wed', 11, year),
datetime.date(year, 12, 24)
]
def closing_time(date=datetime.date.today()):
return datetime.time(13, 0) if date in nyse_close_early_dates(date.year) else datetime.time(16, 0)
|
MIT License
|
but3k4/youtube-views
|
modules/youtube.py
|
YouTube.open_url
|
python
|
def open_url(self):
self.browser.get(self.url)
|
opens the URL
|
https://github.com/but3k4/youtube-views/blob/ddca3fcc32c6063d9342b8c24d8b10ff700ba3d3/modules/youtube.py#L155-L158
|
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import ElementNotInteractableException
from selenium.common.exceptions import ElementClickInterceptedException
from selenium.common.exceptions import JavascriptException
from modules import utils
class YouTube:
def __init__(self, url='https://youtube.com', proxy=None, verbose=False):
self.url = url
self.proxy = proxy
self.verbose = verbose
self.options = webdriver.ChromeOptions()
self.options.add_argument('--headless')
self.options.add_argument('--disable-gpu')
self.options.add_argument('--mute-audio')
self.options.add_argument('--single-process')
self.options.add_argument('--autoplay-policy=no-user-gesture-required')
if self.proxy:
self.options.add_argument('--proxy-server={0}'.format(self.proxy))
self.user_agent = utils.user_agent()
self.options.add_argument('--user-agent={0}'.format(self.user_agent))
self.browser = webdriver.Chrome(options=self.options)
self.default_timeout = 20
self.browser.implicitly_wait(self.default_timeout)
self.browser.set_window_size(1920, 1080)
self.open_url()
def find_by_class(self, class_name):
return self.browser.find_element_by_class_name(class_name)
def find_all_by_class(self, class_name):
return self.browser.find_elements_by_class_name(class_name)
def find_by_id(self, id_name):
return self.browser.find_element_by_id(id_name)
def find_all_by_id(self, id_name):
return self.browser.find_elements_by_id(id_name)
def find_by_name(self, name):
return self.browser.find_element_by_name(name)
def find_all_by_name(self, name):
return self.browser.find_elements_by_name(name)
def find_by_xpath(self, xpath):
return self.browser.find_element_by_xpath(xpath)
def find_all_by_xpath(self, xpath):
return self.browser.find_elements_by_xpath(xpath)
def click(self, how, what):
try:
wait = WebDriverWait(self.browser, self.default_timeout)
wait.until(EC.element_to_be_clickable((how, what))).click()
except (ElementClickInterceptedException, TimeoutException):
return False
return True
|
BSD 3-Clause New or Revised License
|
rcos/observatory-retired
|
observatory/lib/dulwich/repo.py
|
RefsContainer.allkeys
|
python
|
def allkeys(self):
raise NotImplementedError(self.allkeys)
|
All refs present in this container.
|
https://github.com/rcos/observatory-retired/blob/cada27eaf96998ca1ba97a4cca30d2b5ce5021ac/observatory/lib/dulwich/repo.py#L157-L159
|
from cStringIO import StringIO
import errno
import os
from lib.dulwich.errors import (
MissingCommitError,
NoIndexPresent,
NotBlobError,
NotCommitError,
NotGitRepository,
NotTreeError,
NotTagError,
PackedRefsException,
CommitError,
RefFormatError,
)
from lib.dulwich.file import (
ensure_dir_exists,
GitFile,
)
from lib.dulwich.object_store import (
DiskObjectStore,
MemoryObjectStore,
)
from lib.dulwich.objects import (
Blob,
Commit,
ShaFile,
Tag,
Tree,
hex_to_sha,
)
import warnings
OBJECTDIR = 'objects'
SYMREF = 'ref: '
REFSDIR = 'refs'
REFSDIR_TAGS = 'tags'
REFSDIR_HEADS = 'heads'
INDEX_FILENAME = "index"
BASE_DIRECTORIES = [
["branches"],
[REFSDIR],
[REFSDIR, REFSDIR_TAGS],
[REFSDIR, REFSDIR_HEADS],
["hooks"],
["info"]
]
def read_info_refs(f):
ret = {}
for l in f.readlines():
(sha, name) = l.rstrip("\r\n").split("\t", 1)
ret[name] = sha
return ret
def check_ref_format(refname):
if '/.' in refname or refname.startswith('.'):
return False
if '/' not in refname:
return False
if '..' in refname:
return False
for c in refname:
if ord(c) < 040 or c in '\177 ~^:?*[':
return False
if refname[-1] in '/.':
return False
if refname.endswith('.lock'):
return False
if '@{' in refname:
return False
if '\\' in refname:
return False
return True
class RefsContainer(object):
def set_ref(self, name, other):
warnings.warn("RefsContainer.set_ref() is deprecated."
"Use set_symblic_ref instead.",
category=DeprecationWarning, stacklevel=2)
return self.set_symbolic_ref(name, other)
def set_symbolic_ref(self, name, other):
raise NotImplementedError(self.set_symbolic_ref)
def get_packed_refs(self):
raise NotImplementedError(self.get_packed_refs)
def get_peeled(self, name):
return None
def import_refs(self, base, other):
for name, value in other.iteritems():
self["%s/%s" % (base, name)] = value
|
ISC License
|
jeroenzegers/nabu-msss
|
nabu/neuralnetworks/models/model.py
|
Model.__init__
|
python
|
def __init__(self, conf, name=None):
self.conf = conf
self.num_outputs = 1
self.trainable = 'trainable' not in conf or conf['trainable'] == 'True'
self.scope = tf.VariableScope(False, name or type(self).__name__)
|
Model constructor
Args:
conf: The model configuration as a configparser object
|
https://github.com/jeroenzegers/nabu-msss/blob/5e862cbf846d45b8a317f87588533f3fde9f0726/nabu/neuralnetworks/models/model.py#L12-L27
|
from abc import ABCMeta, abstractmethod
import tensorflow as tf
import numpy as np
class Model(object):
__metaclass__ = ABCMeta
|
MIT License
|
sibirrer/lenstronomy
|
lenstronomy/LensModel/Profiles/uldm.py
|
Uldm.mass_3d_lens
|
python
|
def mass_3d_lens(self, r, kappa_0, theta_c, slope=8):
m_3d = self.mass_3d(r, kappa_0, theta_c, slope)
return m_3d
|
mass enclosed a 3d sphere or radius r
:param r: radius over which the mass is computed
:param kappa_0: central convergence of profile
:param theta_c: core radius (in arcsec)
:param slope: exponent entering the profile
:return: mass enclosed in 3D ball
|
https://github.com/sibirrer/lenstronomy/blob/e6d0e179a98ecb0c4db25cdf7cfb73e83c6aeded/lenstronomy/LensModel/Profiles/uldm.py#L223-L233
|
__author__ = 'lucateo'
import numpy as np
import scipy.interpolate as interp
from scipy.special import gamma, hyp2f1
from mpmath import hyp3f2
from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase
import lenstronomy.Util.constants as const
__all__ = ['Uldm']
class Uldm(LensProfileBase):
_s = 0.000001
param_names = ['kappa_0', 'theta_c', 'slope', 'center_x', 'center_y']
lower_limit_default = {'kappa_0': 0, 'theta_c': 0, 'slope': 3.5, 'center_x': -100, 'center_y': -100}
upper_limit_default = {'kappa_0': 1., 'theta_c': 100, 'slope': 10, 'center_x': 100, 'center_y': 100}
def rhotilde(self, kappa_0, theta_c, slope=8):
a_factor_sqrt = np.sqrt( (0.5)**(-1/slope) -1)
num_factor = gamma(slope) / gamma(slope - 1/2) * a_factor_sqrt / np.sqrt(np.pi)
return kappa_0 * num_factor / theta_c
def function(self, x, y, kappa_0, theta_c, center_x=0, center_y=0, slope=8):
x_ = x - center_x
y_ = y - center_y
r = np.sqrt(x_** 2 + y_** 2)
r = np.maximum(r, self._s)
a_factor_sqrt = np.sqrt( (0.5)**(-1./slope) -1)
if np.isscalar(r) == True:
hypgeom = float(kappa_0 /2 * r**2 *
hyp3f2(1, 1, slope - 0.5, 2, 2, -(a_factor_sqrt * r /theta_c )**2))
else:
hypgeom = np.array([ kappa_0 /2. * r_i**2. *
hyp3f2(1, 1, slope - 0.5, 2, 2, -(a_factor_sqrt * r_i / theta_c)**2.) for r_i in r], dtype=float)
return hypgeom
def alpha_radial(self, r, kappa_0, theta_c, slope=8):
a_factor = (0.5)**(-1./slope) -1
prefactor = 2./(2*slope -3) * kappa_0 * theta_c**2 / a_factor
denominator_factor = (1 + a_factor * r**2/theta_c**2)**(slope - 3./2)
return prefactor/r * (1 - 1/denominator_factor)
def derivatives(self, x, y, kappa_0, theta_c, center_x=0, center_y=0, slope=8):
x_ = x - center_x
y_ = y - center_y
R = np.sqrt(x_**2 + y_**2)
R = np.maximum(R,0.00000001)
f_x = self.alpha_radial(R, kappa_0, theta_c, slope) * x_ / R
f_y = self.alpha_radial(R, kappa_0, theta_c, slope) * y_ / R
return f_x, f_y
def hessian(self, x, y, kappa_0, theta_c, center_x=0, center_y=0, slope=8):
x_ = x - center_x
y_ = y - center_y
R = np.sqrt(x_**2 + y_**2)
R = np.maximum(R,0.00000001)
a_factor = (0.5)**(-1./slope) -1
prefactor = 2./(2*slope -3) * kappa_0 * theta_c**2 / a_factor
denominator = 1 + a_factor * R**2/theta_c**2
factor1 = (2*slope - 3) * a_factor * denominator**(1./2 - slope) / (theta_c**2 * R**2)
factor2 = 1/R**4 * (1 - denominator**(3./2 - slope))
f_xx = prefactor * (factor1 * x_**2 + factor2 * (y_**2 - x_**2))
f_yy = prefactor * (factor1 * y_**2 + factor2 * (x_**2 - y_**2))
f_xy = prefactor * (factor1 * x_ * y_ - factor2 * 2*x_*y_)
return f_xx, f_xy, f_xy, f_yy
def density(self, R, kappa_0, theta_c, slope=8):
rhotilde = self.rhotilde(kappa_0, theta_c, slope)
a_factor = (0.5)**(-1./slope) -1
return rhotilde/(1 + a_factor* (R/theta_c)**2)**slope
def density_lens(self, r, kappa_0, theta_c, slope=8):
return self.density(r, kappa_0, theta_c, slope)
def kappa_r(self, R, kappa_0, theta_c, slope=8):
a_factor = (0.5)**(-1./slope) -1
return kappa_0 * (1 + a_factor * (R/theta_c)**2)**(1./2 - slope)
def density_2d(self, x, y, kappa_0, theta_c, center_x=0, center_y=0, slope=8):
x_ = x - center_x
y_ = y - center_y
R = np.sqrt(x_**2 + y_**2)
return self.kappa_r(R, kappa_0, theta_c, slope)
def _mass_integral(self, x, slope=8):
hypF = np.real(hyp2f1(3./2, slope, 5./2, - x**2))
return 1./3 * x**3 * hypF
def mass_3d(self, R, kappa_0, theta_c, slope=8):
rhotilde = self.rhotilde(kappa_0, theta_c, slope)
a_factor = (0.5)**(-1./slope) -1
prefactor = 4. * np.pi * rhotilde * theta_c**3 / (a_factor)**(1.5)
m_3d = prefactor * (self._mass_integral(R/theta_c * np.sqrt(a_factor), slope)
- self._mass_integral(0, slope) )
return m_3d
|
MIT License
|
pyplati/platipy
|
platipy/imaging/registration/utils.py
|
smooth_and_resample
|
python
|
def smooth_and_resample(
image,
isotropic_voxel_size_mm=None,
shrink_factor=None,
smoothing_sigma=None,
interpolator=sitk.sitkLinear,
):
if smoothing_sigma:
if hasattr(smoothing_sigma, "__iter__"):
smoothing_variance = [i * i for i in smoothing_sigma]
else:
smoothing_variance = (smoothing_sigma ** 2,) * 3
maximum_kernel_width = int(
max([8 * j * i for i, j in zip(image.GetSpacing(), smoothing_variance)])
)
image = sitk.DiscreteGaussian(image, smoothing_variance, maximum_kernel_width)
original_spacing = image.GetSpacing()
original_size = image.GetSize()
if shrink_factor and isotropic_voxel_size_mm:
raise AttributeError(
"Function must be called with either isotropic_voxel_size_mm or "
"shrink_factor, not both."
)
elif isotropic_voxel_size_mm:
scale_factor = (
isotropic_voxel_size_mm * np.ones_like(image.GetSize()) / np.array(image.GetSpacing())
)
new_size = [int(sz / float(sf) + 0.5) for sz, sf in zip(original_size, scale_factor)]
elif shrink_factor:
if isinstance(shrink_factor, list):
new_size = [int(sz / float(sf) + 0.5) for sz, sf in zip(original_size, shrink_factor)]
else:
new_size = [int(sz / float(shrink_factor) + 0.5) for sz in original_size]
else:
return image
new_spacing = [
((size_o_i - 1) * spacing_o_i) / (size_n_i - 1)
for size_o_i, spacing_o_i, size_n_i in zip(original_size, original_spacing, new_size)
]
return sitk.Resample(
image,
new_size,
sitk.Transform(),
interpolator,
image.GetOrigin(),
new_spacing,
image.GetDirection(),
0.0,
image.GetPixelID(),
)
|
Args:
image (SimpleITK.Image): The image we want to resample.
isotropic_voxel_size_mm (float | None): New voxel size in millimetres
shrink_factor (list | float): A number greater than one, such that the new image's size is
original_size/shrink_factor. Can also be specified independently for each
dimension (sagittal, coronal, axial).
smoothing_sigma (list | float): Scale for Gaussian smoothing, this is in physical
(image spacing) units, not pixels. Can also be specified independently for
each dimension (sagittal, coronal, axial).
Return:
SimpleITK.Image: Image which is a result of smoothing the input and then resampling
it using the specified Gaussian kernel and shrink factor.
|
https://github.com/pyplati/platipy/blob/f183ac5b459055376a7e61ccc1bfa0ccbdacfd2b/platipy/imaging/registration/utils.py#L194-L266
|
import numpy as np
import SimpleITK as sitk
from loguru import logger
def registration_command_iteration(method):
print("{0:3} = {1:10.5f}".format(method.GetOptimizerIteration(), method.GetMetricValue()))
def stage_iteration(method):
print(f"Number of parameters = {method.GetInitialTransform().GetNumberOfParameters()}")
def deformable_registration_command_iteration(method):
print("{0:3} = {1:10.5f}".format(method.GetElapsedIterations(), method.GetMetric()))
def control_point_spacing_distance_to_number(image, grid_spacing):
image_spacing = np.array(image.GetSpacing())
image_size = np.array(image.GetSize())
number_points = image_size * image_spacing / np.array(grid_spacing)
return (number_points + 0.5).astype(int)
def apply_linear_transform(
input_image,
reference_image,
transform,
is_structure=False,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
):
if is_structure:
if default_value != 0 or interpolator != sitk.sitkNearestNeighbor:
logger.warning(
"is_structure is set to True, but you have set default_value "
"and/or interpolator. default_value and/or interpolator will be overwritten."
)
default_value = 0
interpolator = sitk.sitkNearestNeighbor
return apply_transform(
input_image=input_image,
reference_image=reference_image,
transform=transform,
default_value=default_value,
interpolator=interpolator,
)
def apply_deformable_transform(
input_image,
transform,
is_structure=False,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
):
if is_structure:
if default_value != 0 or interpolator != sitk.sitkNearestNeighbor:
logger.warning(
"is_structure is set to True, but you have set default_value "
"and/or interpolator. default_value and/or interpolator will be overwritten."
)
default_value = 0
interpolator = sitk.sitkNearestNeighbor
return apply_transform(
input_image=input_image,
reference_image=None,
transform=transform,
default_value=default_value,
interpolator=interpolator,
)
def apply_transform(
input_image,
reference_image=None,
transform=None,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
):
original_image_type = input_image.GetPixelID()
resampler = sitk.ResampleImageFilter()
if reference_image:
resampler.SetReferenceImage(reference_image)
else:
resampler.SetReferenceImage(input_image)
if transform:
resampler.SetTransform(transform)
resampler.SetDefaultPixelValue(default_value)
resampler.SetInterpolator(interpolator)
output_image = resampler.Execute(input_image)
output_image = sitk.Cast(output_image, original_image_type)
return output_image
|
Apache License 2.0
|
stephenhky/pyqentangle
|
pyqentangle/quantumstates/harmonics.py
|
coupled_excited_harmonics
|
python
|
def coupled_excited_harmonics(n):
return lambda x1, x2: harmonic_wavefcn(0)(0.5*(x1+x2)) * harmonic_wavefcn(n)(x1-x2)
|
Return a bipartitite wavefunction, with ground state of center of mass,
but excited state for the interaction.
:param n: quantum harmonic state number for the interaction
:return: wavefunction of two variables
:type n: int
:rtype: function
|
https://github.com/stephenhky/pyqentangle/blob/401ad45b332ac279fedcf01dd14cf15aa1b7f4ea/pyqentangle/quantumstates/harmonics.py#L76-L85
|
from math import sqrt, pi
import numpy as np
from scipy.special import hermite
from scipy.integrate import dblquad
from ..utils import InvalidMatrix
def disentangled_gaussian_wavefcn():
return lambda x1, x2: np.exp(-0.5 * (x1 * x1 + x2 * x2)) / np.sqrt(np.pi)
def correlated_bipartite_gaussian_wavefcn(covmatrix):
if not covmatrix.shape == (2, 2):
raise InvalidMatrix("Invalid matrix shape: "+str(covmatrix.shape)+"; desired shape: (2, 2)")
if covmatrix[0, 1] != covmatrix[1, 0]:
raise InvalidMatrix("Not a symmetric covariance matrix")
norm = 2 * np.pi / np.sqrt(np.linalg.det(covmatrix))
const = 1 / np.sqrt(norm)
return lambda x1, x2: const * np.exp(-0.25* np.matmul(np.array([[x1, x2]]),
np.matmul(covmatrix,
np.array([[x1], [x2]])
)
)
)
def tail_factorial(n, accumulator=1):
if n == 0:
return accumulator
else:
return tail_factorial(n-1, accumulator * n)
def harmonic_wavefcn(n):
const = 1/sqrt(2**n * tail_factorial(n)) * 1/sqrt(sqrt(pi))
return lambda x: const * np.exp(-0.5*x*x) * hermite(n)(x)
|
MIT License
|
wdm0006/dummyrdd
|
dummy_spark/rdd.py
|
RDD.repartitionAndSortWithinPartitions
|
python
|
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=None, ascending=True, keyfunc=lambda x: x):
data = sorted(self._jrdd, key=keyfunc, reverse=ascending)
return RDD(data, self.ctx)
|
:param numPartitions:
:param partitionFunc:
:param ascending:
:param keyfunc:
:return:
|
https://github.com/wdm0006/dummyrdd/blob/d66c30495cbaa001a744128c89d41fb55741fba5/dummy_spark/rdd.py#L277-L287
|
import random
import uuid
from collections import OrderedDict
from functools import reduce
from dummy_spark.resultsiterable import ResultIterable
__author__ = 'willmcginnis'
class RDD(object):
def __init__(self, jrdd, ctx, jrdd_deserializer=None):
self._id = str(uuid.uuid4())
if jrdd is None:
self._jrdd = []
else:
if isinstance(jrdd, list):
self._jrdd = jrdd
elif isinstance(jrdd, set):
self._jrdd = list(jrdd)
else:
raise AttributeError('Type %s for jrdd not supported' % (type(jrdd), ))
self.ctx = ctx
self.is_cached = True
self._name = 'dummpy-rdd'
self.is_checkpointed = False
self._jrdd_deserializer = jrdd_deserializer
self.partitioner = None
def id(self):
return self._id
@property
def context(self):
return self.ctx
def name(self):
return self._name
def setName(self, name):
self._name = name
return self
def __repr__(self):
return str(self._jrdd)
def cache(self):
return self
def persist(self, storageLevel=None):
return self
def unpersist(self):
return self
def _reserialize(self, serializer=None):
return self
def checkpoint(self):
pass
def isCheckpointed(self):
return True
def getCheckpointFile(self):
return None
def map(self, f, preservesPartitioning=False):
data = list(map(f, self._jrdd))
return RDD(data, self.ctx)
def flatMap(self, f, preservesPartitioning=False):
data = [item for sl in map(f, self._jrdd) for item in sl]
return RDD(data, self.ctx)
def mapPartitions(self, f, preservesPartitioning=False):
return self.map(f, preservesPartitioning=preservesPartitioning)
def getNumPartitions(self):
return 1
def filter(self, f):
data = list(filter(f, self._jrdd))
return RDD(data, self.ctx)
def distinct(self, numPartitions=None):
data = set(self._jrdd)
return RDD(data, self.ctx)
def sample(self, withReplacement, fraction, seed=None):
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
if seed is not None:
random.seed(seed)
idx_list = list(range(len(self._jrdd)))
if withReplacement:
data = [self._jrdd[random.choice(idx_list)] for _ in list(range(int(fraction * len(self._jrdd))))]
else:
random.shuffle(idx_list)
data = [self._jrdd[idx] for idx in idx_list[:int(fraction * len(self._jrdd))]]
return RDD(data, self.ctx)
def randomSplit(self, weights, seed=None):
pass
def takeSample(self, withReplacement, num, seed=None):
assert num >= 0.0, "Negative sample num: %s" % num
if seed is not None:
random.seed(seed)
if withReplacement:
out = [self._jrdd[random.choice(list(range(len(self._jrdd))))] for _ in num]
else:
idx_list = list(range(len(self._jrdd)))
random.shuffle(idx_list)
out = [self._jrdd[idx] for idx in idx_list[:num]]
return out
def union(self, other):
return RDD(self._jrdd + other._jrdd, self.ctx)
def intersection(self, other):
data = [item for item in self._jrdd if item in other._jrdd]
return RDD(data, self.ctx)
def __add__(self, other):
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
|
BSD 3-Clause New or Revised License
|
byceps/byceps
|
byceps/services/newsletter/service.py
|
get_subscription_state
|
python
|
def get_subscription_state(
user_id: UserID, list_id: ListID
) -> SubscriptionState:
current_subscription = db.session .query(DbSubscriptionUpdate) .filter_by(user_id=user_id) .filter_by(list_id=list_id) .order_by(DbSubscriptionUpdate.expressed_at.desc()) .first()
if current_subscription is None:
return SubscriptionState.declined
return current_subscription.state
|
Return the user's current subscription state for that list.
|
https://github.com/byceps/byceps/blob/138f928e98fd1e3d79943e1a8744ea04cef465b5/byceps/services/newsletter/service.py#L212-L226
|
from __future__ import annotations
from operator import itemgetter
from typing import Iterable, Iterator, Optional, Sequence, Union
from ...database import db, Query
from ...typing import UserID
from ..user.dbmodels.user import User as DbUser
from .dbmodels import (
List as DbList,
Subscriber,
SubscriptionUpdate as DbSubscriptionUpdate,
)
from .transfer.models import List, ListID
from .types import SubscriptionState
def find_list(list_id: ListID) -> Optional[List]:
list_ = db.session.query(DbList).get(list_id)
if list_ is None:
return None
return _db_entity_to_list(list_)
def get_all_lists() -> Sequence[List]:
lists = db.session.query(DbList).all()
return [_db_entity_to_list(list_) for list_ in lists]
def count_subscribers_for_list(list_id: ListID) -> int:
return _build_query_for_current_subscribers(list_id).count()
def get_subscribers(list_id: ListID) -> Iterable[Subscriber]:
subscriber_id_rows = _build_query_for_current_subscribers(list_id).all()
subscriber_ids = set(map(itemgetter(0), subscriber_id_rows))
return _get_subscriber_details(subscriber_ids)
def _build_query_for_current_subscribers(list_id: ListID) -> Query:
subquery = _build_query_for_latest_expressed_at().subquery()
return db.session .query(
DbSubscriptionUpdate.user_id
) .join(subquery, db.and_(
DbSubscriptionUpdate.user_id == subquery.c.user_id,
DbSubscriptionUpdate.list_id == subquery.c.list_id,
DbSubscriptionUpdate.expressed_at == subquery.c.latest_expressed_at
)) .filter(DbSubscriptionUpdate._state == SubscriptionState.requested.name) .filter(DbSubscriptionUpdate.list_id == list_id)
def _get_subscriber_details(user_ids: set[UserID]) -> Iterator[Subscriber]:
if not user_ids:
return []
rows = db.session .query(
DbUser.screen_name,
DbUser.email_address,
) .filter(DbUser.id.in_(user_ids)) .filter(DbUser.email_address != None) .filter_by(initialized=True) .filter_by(email_address_verified=True) .filter_by(suspended=False) .filter_by(deleted=False) .all()
for row in rows:
yield Subscriber(row.screen_name, row.email_address)
def count_subscriptions_by_state(
list_id: ListID,
) -> dict[Union[SubscriptionState, str], int]:
rows = _build_query_for_current_state(list_id) .all()
totals: dict[Union[SubscriptionState, str], int] = {
state: 0 for state in SubscriptionState
}
for state_name, count in rows:
state = SubscriptionState[state_name]
totals[state] = count
totals['total'] = sum(totals.values())
return totals
def _build_query_for_current_state(list_id: ListID) -> Query:
subquery = _build_query_for_latest_expressed_at().subquery()
return db.session .query(
DbSubscriptionUpdate._state,
db.func.count(DbSubscriptionUpdate._state),
) .join(subquery, db.and_(
DbSubscriptionUpdate.user_id == subquery.c.user_id,
DbSubscriptionUpdate.list_id == subquery.c.list_id,
DbSubscriptionUpdate.expressed_at == subquery.c.latest_expressed_at
)) .filter_by(list_id=list_id) .group_by(
DbSubscriptionUpdate.list_id,
DbSubscriptionUpdate._state,
)
def _build_query_for_latest_expressed_at() -> Query:
return db.session .query(
DbSubscriptionUpdate.user_id,
DbSubscriptionUpdate.list_id,
db.func.max(DbSubscriptionUpdate.expressed_at)
.label('latest_expressed_at')
) .group_by(
DbSubscriptionUpdate.user_id,
DbSubscriptionUpdate.list_id
)
|
BSD 3-Clause New or Revised License
|
albertz/music-player
|
mac/pyobjc-core/Lib/objc/_descriptors.py
|
signature
|
python
|
def signature(signature, **kw):
warnings.warn("Usage objc.typedSelector instead of objc.signature", DeprecationWarning)
kw['signature'] = signature
def makeSignature(func):
return selector(func, **kw)
return makeSignature
|
A Python method decorator that allows easy specification
of Objective-C selectors.
Usage::
@objc.signature('i@:if')
def methodWithX_andY_(self, x, y):
return 0
|
https://github.com/albertz/music-player/blob/d23586f5bf657cbaea8147223be7814d117ae73d/mac/pyobjc-core/Lib/objc/_descriptors.py#L272-L287
|
__all__ = ['IBOutlet', 'IBAction', 'accessor', 'Accessor', 'typedAccessor', 'callbackFor', 'selectorFor', 'synthesize', 'namedselector', 'typedSelector', 'namedSelector', 'instancemethod', 'signature' ]
from objc._objc import ivar, selector, _makeClosure, selector, _C_SEL, _C_ID, _C_NSUInteger, _C_NSBOOL
import sys, textwrap
import warnings
from inspect import getargspec
_C_NSRange = [b"{_NSRange=II}", b"{_NSRange=QQ}"][sys.maxsize > 2**32]
def IBOutlet(name=None):
if name is None:
return ivar(isOutlet=1)
else:
return ivar(name, isOutlet=1)
def IBAction(func):
if func is None:
raise TypeError("IBAction argument must be a callable")
return selector(func, signature=b"v@:@")
def instancemethod(func):
if func is None:
raise TypeError("instancemethod argument must be a callable")
return selector(func, isClassMethod=False)
def accessor(func, typeSignature=b'@'):
args, varargs, varkw, defaults = getargspec(func)
funcName = func.__name__
maxArgs = len(args)
minArgs = maxArgs - len(defaults or ())
selArgs = 1 + funcName.count('_')
if varargs is not None or varkw is not None:
raise TypeError('%s can not be an accessor because it accepts varargs or varkw' % (funcName,))
if not (minArgs <= selArgs <= maxArgs):
if minArgs == maxArgs:
raise TypeError('%s expected to take %d args, but must accept %d from Objective-C (implicit self plus count of underscores)' % (funcName, maxArgs, selArgs))
else:
raise TypeError('%s expected to take between %d and %d args, but must accept %d from Objective-C (implicit self plus count of underscores)' % (funcName, minArgs, maxArgs, selArgs))
if selArgs == 3:
if funcName.startswith('validate') and funcName.endswith('_error_'):
return selector(func, signature=_C_NSBOOL + b'@:N^@o^@')
if funcName.startswith('insertObject_in') and funcName.endswith('AtIndex_'):
return selector(func, signature=b'v@:' + typeSignature + _C_NSUInteger)
elif funcName.startswith('replaceObjectIn') and funcName.endswith('AtIndex_withObject_'):
return selector(func, signature=b'v@:' + _C_NSUInteger + typeSignature)
elif funcName.startswith('get') and funcName.endswith('_range_'):
return selector(func, signature=b'v@:o^@' + _C_NSRange)
elif funcName.startswith('insert') and funcName.endswith('_atIndexes_'):
return selector(func, signature=b'v@:@@')
elif funcName.startswith('replace') and 'AtIndexes_with' in funcName:
return selector(func, signature=b'v@:@@')
elif selArgs == 2:
if funcName.startswith('objectIn') and funcName.endswith('AtIndex_'):
return selector(func, signature=typeSignature + b'@:' + _C_NSUInteger)
elif funcName.startswith('removeObjectFrom') and funcName.endswith('AtIndex_'):
return selector(func, signature=b'v@:' + _C_NSUInteger)
elif funcName.startswith('remove') and funcName.endswith('AtIndexes_'):
return selector(func, signature=b"v@:@")
elif funcName.endswith('AtIndexes_'):
return selector(func, signature=b"@@:@")
elif funcName.startswith('memberOf'):
return selector(func, signature=_C_NSBOOL + b"@:" + typeSignature)
elif funcName.startswith('add') and funcName.endswith('Object_'):
return selector(func, signature=b"v@:" + typeSignature)
elif funcName.startswith('add'):
return selector(func, signature=b"v@:@")
elif funcName.startswith('intersect'):
return selector(func, signature=b"v@:@")
return selector(func, signature=b"v@:" + typeSignature)
elif selArgs == 1:
if funcName.startswith('countOf'):
typeSignature = _C_NSUInteger
elif funcName.startswith('enumerator'):
typeSignature = b"@"
return selector(func, signature=typeSignature + b"@:")
raise TypeError("%s not recognized as an accessor" % (funcName,))
def typedSelector(signature):
def _typedSelector(func):
if func is None:
raise TypeError("typedSelector() function argument must be a callable")
return selector(func, signature=signature)
return _typedSelector
def namedSelector(name, signature=None):
if signature is not None:
def _namedselector(func):
if func is None:
raise TypeError("IBAction argument must be a callable")
return selector(func, selector=name, signature=signature)
else:
def _namedselector(func):
if func is None:
raise TypeError("IBAction argument must be a callable")
return selector(func, selector=name)
return _namedselector
def namedselector(name, signature=None):
warnings.warn("use objc.namedSelector instead of objc.namedselector", DeprecationWarning, stacklevel=2)
return namedSelector(name, signature)
def typedAccessor(typeSignature):
def _typedAccessor(func):
return accessor(func, typeSignature)
return _typedAccessor
def Accessor(func):
warnings.warn(
"Use objc.accessor instead of objc.Accessor", DeprecationWarning)
return accessor(func)
def callbackFor(callable, argIndex=-1):
def addClosure(function):
closure = _makeClosure(function, callable, argIndex)
function.pyobjc_closure = closure
return function
return addClosure
def selectorFor(callable, argIndex=-1):
if argIndex == -1:
for arg in callable.__metadata__()['arguments']:
if arg['type'] == _C_SEL and 'sel_of_type' in arg:
signature = arg['sel_of_type']
break
else:
raise ValueError("No selector argument with type information")
else:
try:
signature = callable.__metadata__()['arguments'][argIndex]['sel_of_type']
except (IndexError, KeyError):
raise ValueError("Not a selector argument with type information")
def addSignature(function):
return selector(function, signature=signature)
return addSignature
def synthesize(name, copy=False, readwrite=True, type=_C_ID, ivarName=None):
if not name:
raise ValueError("Empty property name")
if ivarName is None:
ivarName = '_' + name
classDict = sys._getframe(1).f_locals
setterName = 'set%s%s_'%(name[0].upper(), name[1:])
if copy:
setter = textwrap.dedent('''
def %(name)s(self, value):
self.%(ivar)s = value.copy()
''' % dict(name=setterName, ivar=ivarName))
else:
setter = textwrap.dedent('''
def %(name)s(self, value):
self.%(ivar)s = value
''' % dict(name=setterName, ivar=ivarName))
getter = textwrap.dedent('''
def %(name)s(self):
return self.%(ivar)s
''' % dict(name=name, ivar=ivarName))
if readwrite:
exec(setter, globals(), classDict)
exec(getter, globals(), classDict)
classDict[ivarName] = ivar(type=type)
|
BSD 2-Clause Simplified License
|
maljovec/topopy
|
topopy/TopologicalObject.py
|
TopologicalObject.get_normed_x
|
python
|
def get_normed_x(self, rows=None, cols=None):
if rows is None:
rows = list(range(0, self.get_sample_size()))
if cols is None:
cols = list(range(0, self.get_dimensionality()))
if not hasattr(rows, "__iter__"):
rows = [rows]
rows = sorted(list(set(rows)))
retValue = self.Xnorm[rows, :]
return retValue[:, cols]
|
Returns the normalized input data requested by the user.
Parameters
----------
rows : list of int
A list of non-negative integers specifying the row indices to return
cols : list of int
A list of non-negative integers specifying the column indices to
return
Returns
-------
np.ndarray
A matrix of floating point values specifying the normalized data
values used in internal computations filtered by the three input
parameters.
|
https://github.com/maljovec/topopy/blob/73ccc9510bd34be2ead875bc3bc1081ccad26b1f/topopy/TopologicalObject.py#L264-L294
|
import sys
import time
import warnings
import numpy as np
import sklearn.preprocessing
import nglpy as ngl
class TopologicalObject(object):
precision = 16
@staticmethod
def aggregate_duplicates(X, Y, aggregator="mean", precision=precision):
if callable(aggregator):
pass
elif "min" in aggregator.lower():
aggregator = np.min
elif "max" in aggregator.lower():
aggregator = np.max
elif "median" in aggregator.lower():
aggregator = np.median
elif aggregator.lower() in ["average", "mean"]:
aggregator = np.mean
elif "first" in aggregator.lower():
def aggregator(x):
return x[0]
elif "last" in aggregator.lower():
def aggregator(x):
return x[-1]
else:
warnings.warn(
'Aggregator "{}" not understood. Skipping sample '
"aggregation.".format(aggregator)
)
return X, Y
is_y_multivariate = Y.ndim > 1
X_rounded = X.round(decimals=precision)
unique_xs = np.unique(X_rounded, axis=0)
old_size = len(X_rounded)
new_size = len(unique_xs)
if old_size == new_size:
return X, Y
if not is_y_multivariate:
Y = np.atleast_2d(Y).T
reduced_y = np.empty((new_size, Y.shape[1]))
warnings.warn(
"Domain space duplicates caused a data reduction. "
+ "Original size: {} vs. New size: {}".format(old_size, new_size)
)
for col in range(Y.shape[1]):
for i, distinct_row in enumerate(unique_xs):
filtered_rows = np.all(X_rounded == distinct_row, axis=1)
reduced_y[i, col] = aggregator(Y[filtered_rows, col])
if not is_y_multivariate:
reduced_y = reduced_y.flatten()
return unique_xs, reduced_y
def __init__(
self,
graph=None,
gradient="steepest",
normalization=None,
aggregator=None,
debug=False,
):
super(TopologicalObject, self).__init__()
self.reset()
if graph is None:
graph = ngl.EmptyRegionGraph()
self.graph = graph
self.gradient = gradient
self.normalization = normalization
self.debug = debug
self.aggregator = aggregator
def reset(self):
self.X = []
self.Y = []
self.w = []
self.Xnorm = []
def __set_data(self, X, Y, w=None):
self.X = X
self.Y = Y
self.check_duplicates()
if w is not None:
self.w = np.array(w)
else:
self.w = np.ones(len(Y)) * 1.0 / float(len(Y))
if self.normalization == "feature":
min_max_scaler = sklearn.preprocessing.MinMaxScaler()
self.Xnorm = min_max_scaler.fit_transform(np.atleast_2d(self.X))
elif self.normalization == "zscore":
self.Xnorm = sklearn.preprocessing.scale(
self.X, axis=0, with_mean=True, with_std=True, copy=True
)
else:
self.Xnorm = np.array(self.X)
def build(self, X, Y, w=None):
self.reset()
if X is None or Y is None:
return
self.__set_data(X, Y, w)
if self.debug:
sys.stdout.write("Graph Preparation: ")
start = time.perf_counter()
self.graph.build(self.Xnorm)
if self.debug:
end = time.perf_counter()
sys.stdout.write("%f s\n" % (end - start))
def load_data_and_build(self, filename, delimiter=","):
data = np.genfromtxt(
filename, dtype=float, delimiter=delimiter, names=True
)
data = data.view(np.float64).reshape(data.shape + (-1,))
X = data[:, 0:-1]
Y = data[:, -1]
self.build(X=X, Y=Y)
|
BSD 3-Clause New or Revised License
|
azure/azure-event-hubs-python
|
azure/eventhub/common.py
|
parse_sas_token
|
python
|
def parse_sas_token(sas_token):
sas_data = {}
token = sas_token.partition(' ')[2]
fields = token.split('&')
for field in fields:
key, value = field.split('=', 1)
sas_data[key.lower()] = value
return sas_data
|
Parse a SAS token into its components.
:param sas_token: The SAS token.
:type sas_token: str
:rtype: dict[str, str]
|
https://github.com/azure/azure-event-hubs-python/blob/326f772f5cbe3d3eaf68b24485554aada463430a/azure/eventhub/common.py#L49-L62
|
from __future__ import unicode_literals
import datetime
import calendar
import json
import six
from uamqp import Message, BatchMessage
from uamqp import types, constants, errors
from uamqp.message import MessageHeader, MessageProperties
_NO_RETRY_ERRORS = (
b"com.microsoft:argument-out-of-range",
b"com.microsoft:entity-disabled",
b"com.microsoft:auth-failed",
b"com.microsoft:precondition-failed",
b"com.microsoft:argument-error"
)
def _error_handler(error):
if error.condition == b'com.microsoft:server-busy':
return errors.ErrorAction(retry=True, backoff=4)
if error.condition == b'com.microsoft:timeout':
return errors.ErrorAction(retry=True, backoff=2)
if error.condition == b'com.microsoft:operation-cancelled':
return errors.ErrorAction(retry=True)
if error.condition == b"com.microsoft:container-close":
return errors.ErrorAction(retry=True, backoff=4)
if error.condition in _NO_RETRY_ERRORS:
return errors.ErrorAction(retry=False)
return errors.ErrorAction(retry=True)
|
MIT License
|
google-tasks-backup/tasks-backup
|
apiclient/discovery.py
|
_add_query_parameter
|
python
|
def _add_query_parameter(url, name, value):
if value is None:
return url
else:
parsed = list(urlparse.urlparse(url))
q = dict(parse_qsl(parsed[4]))
q[name] = value
parsed[4] = urllib.urlencode(q)
return urlparse.urlunparse(parsed)
|
Adds a query parameter to a url.
Replaces the current value if it already exists in the URL.
Args:
url: string, url to add the query parameter to.
name: string, query parameter name.
value: string, query parameter value.
Returns:
Updated query parameter. Does not update the url if value is None.
|
https://github.com/google-tasks-backup/tasks-backup/blob/ffcb2044eb6089d20e1be3f93025fa33c2efbe3e/apiclient/discovery.py#L96-L116
|
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = [
'build',
'build_from_document'
'fix_method_name',
'key2param'
]
import copy
import httplib2
import logging
import os
import random
import re
import uritemplate
import urllib
import urlparse
import mimeparse
import mimetypes
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
from apiclient.errors import HttpError
from apiclient.errors import InvalidJsonError
from apiclient.errors import MediaUploadSizeError
from apiclient.errors import UnacceptableMimeTypeError
from apiclient.errors import UnknownApiNameOrVersion
from apiclient.errors import UnknownLinkType
from apiclient.http import HttpRequest
from apiclient.http import MediaFileUpload
from apiclient.http import MediaUpload
from apiclient.model import JsonModel
from apiclient.model import MediaModel
from apiclient.model import RawModel
from apiclient.schema import Schemas
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from oauth2client.util import positional
from oauth2client.anyjson import simplejson
logger = logging.getLogger(__name__)
URITEMPLATE = re.compile('{[^}]*}')
VARNAME = re.compile('[a-zA-Z0-9_-]+')
DISCOVERY_URI = ('https://www.googleapis.com/discovery/v1/apis/'
'{api}/{apiVersion}/rest')
DEFAULT_METHOD_DOC = 'A description of how to use this function'
STACK_QUERY_PARAMETERS = ['trace', 'pp', 'userip', 'strict']
RESERVED_WORDS = ['and', 'assert', 'break', 'class', 'continue', 'def', 'del',
'elif', 'else', 'except', 'exec', 'finally', 'for', 'from',
'global', 'if', 'import', 'in', 'is', 'lambda', 'not', 'or',
'pass', 'print', 'raise', 'return', 'try', 'while', 'body']
def fix_method_name(name):
if name in RESERVED_WORDS:
return name + '_'
else:
return name
|
Apache License 2.0
|
barrust/pyprobables
|
probables/cuckoo/cuckoo.py
|
CuckooFilter.expansion_rate
|
python
|
def expansion_rate(self):
return self.__expansion_rate
|
int: The rate at expansion when the filter grows
|
https://github.com/barrust/pyprobables/blob/f348fb878cdfbe6c1d997be093c073d26f9b05aa/probables/cuckoo/cuckoo.py#L162-L164
|
import os
import random
from numbers import Number
from struct import calcsize, pack, unpack
from ..exceptions import CuckooFilterFullError, InitializationError
from ..hashes import fnv_1a
from ..utilities import get_x_bits, is_valid_file
class CuckooFilter(object):
__slots__ = [
"_bucket_size",
"_cuckoo_capacity",
"__max_cuckoo_swaps",
"__expansion_rate",
"__auto_expand",
"__fingerprint_size",
"__hash_func",
"_inserted_elements",
"_buckets",
]
def __init__(
self,
capacity=10000,
bucket_size=4,
max_swaps=500,
expansion_rate=2,
auto_expand=True,
finger_size=4,
filepath=None,
hash_function=None,
):
valid_prms = (
isinstance(capacity, Number)
and capacity >= 1
and isinstance(bucket_size, Number)
and bucket_size >= 1
and isinstance(max_swaps, Number)
and max_swaps >= 1
)
if not valid_prms:
msg = "CuckooFilter: capacity, bucket_size, and max_swaps " "must be an integer greater than 0"
raise InitializationError(msg)
self._bucket_size = int(bucket_size)
self._cuckoo_capacity = int(capacity)
self.__max_cuckoo_swaps = int(max_swaps)
self.__expansion_rate = None
self.expansion_rate = expansion_rate
self.__auto_expand = None
self.auto_expand = auto_expand
self.__fingerprint_size = None
self.fingerprint_size = finger_size
if hash_function is None:
self.__hash_func = fnv_1a
else:
self.__hash_func = hash_function
self._inserted_elements = 0
if filepath is None:
self._buckets = list()
for _ in range(self.capacity):
self.buckets.append(list())
elif is_valid_file(filepath):
self._load(filepath)
else:
msg = "CuckooFilter: failed to load provided file"
raise InitializationError(msg)
def __contains__(self, key):
return self.check(key)
def __str__(self):
msg = (
"{0}:\n"
"\tCapacity: {1}\n"
"\tTotal Bins: {2}\n"
"\tLoad Factor: {3}%\n"
"\tInserted Elements: {4}\n"
"\tMax Swaps: {5}\n"
"\tExpansion Rate: {6}\n"
"\tAuto Expand: {7}"
)
return msg.format(
self.__class__.__name__,
self.capacity,
self.capacity * self.bucket_size,
self.load_factor() * 100,
self.elements_added,
self.max_swaps,
self.expansion_rate,
self.auto_expand,
)
@property
def elements_added(self):
return self._inserted_elements
@property
def capacity(self):
return self._cuckoo_capacity
@property
def max_swaps(self):
return self.__max_cuckoo_swaps
@property
def bucket_size(self):
return self._bucket_size
@property
def buckets(self):
return self._buckets
@property
|
MIT License
|
astrocatalogs/astrocats
|
astrocats/catalog/entry.py
|
Entry.__repr__
|
python
|
def __repr__(self):
jsonstring = dict_to_pretty_string({ENTRY.NAME: self})
return jsonstring
|
Return JSON representation of self.
|
https://github.com/astrocatalogs/astrocats/blob/11abc3131c6366ecd23964369e55ff264add7805/astrocats/catalog/entry.py#L161-L164
|
import codecs
import gzip as gz
import hashlib
import json
import logging
import os
import sys
from collections import OrderedDict
from copy import deepcopy
from decimal import Decimal
from astrocats.catalog.catdict import CatDict, CatDictError
from astrocats.catalog.error import ERROR, Error
from astrocats.catalog.key import KEY_TYPES, Key, KeyCollection
from astrocats.catalog.model import MODEL, Model
from astrocats.catalog.photometry import PHOTOMETRY, Photometry
from astrocats.catalog.quantity import QUANTITY, Quantity
from astrocats.catalog.source import SOURCE, Source
from astrocats.catalog.spectrum import SPECTRUM, Spectrum
from astrocats.catalog.utils import (alias_priority, dict_to_pretty_string,
is_integer, is_number, listify)
from past.builtins import basestring
from six import string_types
class ENTRY(KeyCollection):
_DIST_PREF_KINDS = [
'heliocentric', 'cmb', 'spectroscopic', 'photometric', 'host',
'cluster'
]
_HOST_DIST_PREF_KINDS = [
'heliocentric', 'cmb', 'spectroscopic', 'photometric', 'host',
'cluster'
]
ALIAS = Key('alias', KEY_TYPES.STRING)
COMOVING_DIST = Key('comovingdist',
KEY_TYPES.NUMERIC,
kind_preference=_DIST_PREF_KINDS,
replace_better=True)
DEC = Key('dec', KEY_TYPES.STRING)
DISCOVER_DATE = Key('discoverdate', KEY_TYPES.STRING, replace_better=True)
DISCOVERER = Key('discoverer', KEY_TYPES.STRING)
DISTINCT_FROM = Key('distinctfrom', KEY_TYPES.STRING)
EBV = Key('ebv', KEY_TYPES.NUMERIC, replace_better=True)
AV_CIRCUM = Key('avcircum', KEY_TYPES.NUMERIC, replace_better=True)
ERRORS = Key('errors', no_source=True)
HOST = Key('host', KEY_TYPES.STRING)
HOST_DEC = Key('hostdec', KEY_TYPES.STRING)
HOST_OFFSET_ANG = Key('hostoffsetang', KEY_TYPES.NUMERIC)
HOST_OFFSET_DIST = Key('hostoffsetdist', KEY_TYPES.NUMERIC)
HOST_RA = Key('hostra', KEY_TYPES.STRING)
HOST_REDSHIFT = Key('hostredshift',
KEY_TYPES.NUMERIC,
kind_preference=_HOST_DIST_PREF_KINDS,
replace_better=True)
HOST_VELOCITY = Key('hostvelocity',
KEY_TYPES.NUMERIC,
kind_preference=_HOST_DIST_PREF_KINDS,
replace_better=True)
HOST_LUM_DIST = Key('hostlumdist',
KEY_TYPES.NUMERIC,
kind_preference=_HOST_DIST_PREF_KINDS,
replace_better=True)
HOST_COMOVING_DIST = Key('hostcomovingdist',
KEY_TYPES.NUMERIC,
kind_preference=_HOST_DIST_PREF_KINDS,
replace_better=True)
LUM_DIST = Key('lumdist',
KEY_TYPES.NUMERIC,
kind_preference=_DIST_PREF_KINDS,
replace_better=True)
MAX_ABS_MAG = Key('maxabsmag', KEY_TYPES.NUMERIC)
MAX_APP_MAG = Key('maxappmag', KEY_TYPES.NUMERIC)
MAX_BAND = Key('maxband', KEY_TYPES.STRING)
MAX_DATE = Key('maxdate', KEY_TYPES.STRING, replace_better=True)
MODELS = Key('models')
NAME = Key('name', KEY_TYPES.STRING, no_source=True)
PHOTOMETRY = Key('photometry')
RA = Key('ra', KEY_TYPES.STRING)
REDSHIFT = Key('redshift',
KEY_TYPES.NUMERIC,
kind_preference=_DIST_PREF_KINDS,
replace_better=True)
SCHEMA = Key('schema', no_source=True)
SOURCES = Key('sources', no_source=True)
SPECTRA = Key('spectra')
VELOCITY = Key('velocity',
KEY_TYPES.NUMERIC,
kind_preference=_DIST_PREF_KINDS,
replace_better=True)
class Entry(OrderedDict):
_KEYS = ENTRY
def __init__(self, catalog=None, name=None, stub=False):
super(Entry, self).__init__()
self.catalog = catalog
self.filename = None
self.dupe_of = []
self._stub = stub
if catalog:
self._log = catalog.log
else:
from astrocats.catalog.catalog import Catalog
self._log = logging.getLogger()
self.catalog = Catalog(None, self._log)
self[self._KEYS.NAME] = name
return
|
MIT License
|
aleph-im/pyaleph
|
src/aleph/jobs.py
|
retry_messages_job
|
python
|
async def retry_messages_job(shared_stats: Optional[Dict]):
seen_ids: Dict = {}
actions: List = []
messages_actions: List = []
gtasks: List[Coroutine] = []
tasks: List = [asyncio.Task]
loop = asyncio.get_event_loop()
i: int = 0
j: int = 0
find_params: Dict = {}
while await PendingMessage.collection.count_documents(find_params):
async for pending in PendingMessage.collection.find(find_params).sort(
[("message.time", 1)]
).batch_size(256):
LOGGER.debug(
f"retry_message_job len_seen_ids={len(seen_ids)} "
f"len_gtasks={len(gtasks)} len_tasks={len(tasks)}"
)
if shared_stats is not None:
shared_stats["retry_messages_job_seen_ids"] = len(seen_ids)
shared_stats["retry_messages_job_gtasks"] = len(gtasks)
shared_stats["retry_messages_job_tasks"] = len(tasks)
shared_stats["retry_messages_job_actions"] = len(actions)
shared_stats["retry_messages_job_messages_actions"] = len(
messages_actions
)
shared_stats["retry_messages_job_i"] = i
shared_stats["retry_messages_job_j"] = j
if (
pending["message"]["item_type"] == ItemType.IPFS
or pending["message"]["type"] == "STORE"
):
i += 15
j += 100
else:
i += 1
j += 1
tasks.append(
asyncio.create_task(
handle_pending_message(pending, seen_ids, actions, messages_actions)
)
)
if j >= 20000:
gtasks.append(
asyncio.create_task(
join_pending_message_tasks(
tasks,
actions_list=actions,
messages_actions_list=messages_actions,
)
)
)
tasks = []
actions = []
messages_actions = []
i = 0
j = 0
if i >= 1024:
await join_pending_message_tasks(tasks)
tasks = []
i = 0
gtasks.append(
asyncio.create_task(
join_pending_message_tasks(
tasks, actions_list=actions, messages_actions_list=messages_actions
)
)
)
await asyncio.gather(*gtasks, return_exceptions=True)
gtasks = []
if await PendingMessage.collection.count_documents(find_params) > 100000:
LOGGER.info("Cleaning messages")
clean_actions = []
for key, height in seen_ids.items():
clean_actions.append(
DeleteMany(
{
"message.item_hash": key[0],
"message.sender": key[1],
"source.chain_name": key[2],
"source.height": {"$gt": height},
}
)
)
result = await PendingMessage.collection.bulk_write(clean_actions)
LOGGER.info(repr(result))
await asyncio.sleep(5)
|
Each few minutes, try to handle message that were added to the
pending queue (Unavailable messages).
|
https://github.com/aleph-im/pyaleph/blob/6e8348b8a3eb33aa3ca33f7ff24be5b3575e923b/src/aleph/jobs.py#L71-L184
|
import asyncio
from logging import getLogger
from multiprocessing import Process
from multiprocessing.managers import SyncManager, RemoteError
from typing import Coroutine, List, Dict, Optional
import aioipfs
from pymongo import DeleteOne, InsertOne, DeleteMany
from pymongo.errors import CursorNotFound
from aleph.chains.common import incoming, get_chaindata_messages
from aleph.model.messages import Message, CappedMessage
from aleph.model.p2p import get_peers
from aleph.model.pending import PendingMessage, PendingTX
from aleph.network import check_message
from aleph.services import filestore
from aleph.services.ipfs.common import connect_ipfs_peer
from aleph.types import ItemType
LOGGER = getLogger("JOBS")
MANAGER = None
RETRY_LOCK = asyncio.Lock()
class DBManager(SyncManager):
pass
async def handle_pending_message(
pending, seen_ids, actions_list, messages_actions_list
):
result = await incoming(
pending["message"],
chain_name=pending["source"].get("chain_name"),
tx_hash=pending["source"].get("tx_hash"),
height=pending["source"].get("height"),
seen_ids=seen_ids,
check_message=pending["source"].get("check_message", True),
retrying=True,
bulk_operation=True,
)
if result is not None:
if result is not True:
messages_actions_list.append(result)
actions_list.append(DeleteOne({"_id": pending["_id"]}))
async def join_pending_message_tasks(
tasks, actions_list=None, messages_actions_list=None
):
try:
await asyncio.gather(*tasks, return_exceptions=True)
except Exception:
LOGGER.exception("error in incoming task")
tasks.clear()
if messages_actions_list is not None and len(messages_actions_list):
await Message.collection.bulk_write(messages_actions_list)
await CappedMessage.collection.bulk_write(messages_actions_list)
messages_actions_list.clear()
if actions_list is not None and len(actions_list):
await PendingMessage.collection.bulk_write(actions_list)
actions_list.clear()
|
MIT License
|
gofrendiasgard/kokoropy
|
kokoropy/packages/alembic/script.py
|
ScriptDirectory.from_config
|
python
|
def from_config(cls, config):
script_location = config.get_main_option('script_location')
if script_location is None:
raise util.CommandError("No 'script_location' key "
"found in configuration.")
truncate_slug_length = config.get_main_option("truncate_slug_length")
if truncate_slug_length is not None:
truncate_slug_length = int(truncate_slug_length)
return ScriptDirectory(
util.coerce_resource_to_filename(script_location),
file_template=config.get_main_option(
'file_template',
_default_file_template),
truncate_slug_length=truncate_slug_length,
sourceless=config.get_main_option("sourceless") == "true"
)
|
Produce a new :class:`.ScriptDirectory` given a :class:`.Config`
instance.
The :class:`.Config` need only have the ``script_location`` key
present.
|
https://github.com/gofrendiasgard/kokoropy/blob/49c8ca4b7dd2a084f2ced33fc5987b8a8b62c995/kokoropy/packages/alembic/script.py#L49-L71
|
import datetime
import os
import re
import shutil
from . import util
_sourceless_rev_file = re.compile(r'(.*\.py)(c|o)?$')
_only_source_rev_file = re.compile(r'(.*\.py)$')
_legacy_rev = re.compile(r'([a-f0-9]+)\.py$')
_mod_def_re = re.compile(r'(upgrade|downgrade)_([a-z0-9]+)')
_slug_re = re.compile(r'\w+')
_default_file_template = "%(rev)s_%(slug)s"
_relative_destination = re.compile(r'(?:\+|-)\d+')
class ScriptDirectory(object):
def __init__(self, dir, file_template=_default_file_template,
truncate_slug_length=40,
sourceless=False):
self.dir = dir
self.versions = os.path.join(self.dir, 'versions')
self.file_template = file_template
self.truncate_slug_length = truncate_slug_length or 40
self.sourceless = sourceless
if not os.access(dir, os.F_OK):
raise util.CommandError("Path doesn't exist: %r. Please use "
"the 'init' command to create a new "
"scripts folder." % dir)
@classmethod
|
MIT License
|
mytechnotalent/python-for-kids
|
Part_6_Classes/0014_escape_room/Game.py
|
Game.ask_random_question
|
python
|
def ask_random_question(d_questions):
random_question = choice(list(d_questions))
answer_1 = d_questions[random_question][0]
answer_2 = d_questions[random_question][1]
answer_3 = d_questions[random_question][2]
correct_answer_index = d_questions[random_question][3]
correct_answer = d_questions[random_question][correct_answer_index]
return random_question, answer_1, answer_2, answer_3, correct_answer_index, correct_answer
|
Method to ask a random question from the database
Params:
d_questions: dict
Returns:
str, str, str, str, int, str
|
https://github.com/mytechnotalent/python-for-kids/blob/4487df5f8d39a200016a7fa4da5c66345e6ead56/Part_6_Classes/0014_escape_room/Game.py#L43-L58
|
from random import randint, choice
class Game:
@staticmethod
def generate_random_number(grid):
x = randint(1, grid.available_width)
return x
@staticmethod
def generate_random_numbers(grid):
x = randint(1, grid.available_width)
y = randint(1, grid.available_height)
while x == 1 and y == 1:
x = randint(1, grid.available_width)
y = randint(1, grid.available_width)
return x, y
@staticmethod
|
Apache License 2.0
|
hunch/hunch-gift-app
|
django/views/generic/list.py
|
MultipleObjectMixin.get_context_object_name
|
python
|
def get_context_object_name(self, object_list):
if self.context_object_name:
return self.context_object_name
elif hasattr(object_list, 'model'):
return smart_str(object_list.model._meta.verbose_name_plural)
else:
return None
|
Get the name of the item to be used in the context.
|
https://github.com/hunch/hunch-gift-app/blob/8c7cad24cc0d9900deb4175e6b768c64a3d7adcf/django/views/generic/list.py#L65-L74
|
from django.core.paginator import Paginator, InvalidPage
from django.core.exceptions import ImproperlyConfigured
from django.http import Http404
from django.utils.encoding import smart_str
from django.views.generic.base import TemplateResponseMixin, View
class MultipleObjectMixin(object):
allow_empty = True
queryset = None
model = None
paginate_by = None
context_object_name = None
def get_queryset(self):
if self.queryset is not None:
queryset = self.queryset
if hasattr(queryset, '_clone'):
queryset = queryset._clone()
elif self.model is not None:
queryset = self.model._default_manager.all()
else:
raise ImproperlyConfigured(u"'%s' must define 'queryset' or 'model'"
% self.__class__.__name__)
return queryset
def paginate_queryset(self, queryset, page_size):
if queryset.count() > page_size:
paginator = Paginator(queryset, page_size, allow_empty_first_page=self.get_allow_empty())
page = self.kwargs.get('page', None) or self.request.GET.get('page', 1)
try:
page_number = int(page)
except ValueError:
if page == 'last':
page_number = paginator.num_pages
else:
raise Http404("Page is not 'last', nor can it be converted to an int.")
try:
page = paginator.page(page_number)
return (paginator, page, page.object_list, True)
except InvalidPage:
raise Http404(u'Invalid page (%s)' % page_number)
else:
return (None, None, queryset, False)
def get_paginate_by(self, queryset):
return self.paginate_by
def get_allow_empty(self):
return self.allow_empty
|
MIT License
|
aldebaran/qibuild
|
python/qisys/ui.py
|
_unicode_representation
|
python
|
def _unicode_representation(data):
if isinstance(data, six.string_types):
return "'" + data + "'"
elif isinstance(data, tuple):
unicode_data = "("
for value in data:
if unicode_data != "(":
unicode_data += ", "
unicode_data += _unicode_representation(value)
unicode_data += ")"
return unicode_data
elif isinstance(data, list):
unicode_data = "["
for value in data:
if unicode_data != "[":
unicode_data += ", "
unicode_data += _unicode_representation(value)
unicode_data += "]"
return unicode_data
if six.PY3:
return str(data).encode("utf-8")
return unicode(data)
|
Return an unicode representation of a data
|
https://github.com/aldebaran/qibuild/blob/efea6fa3744664348717fe5e8df708a3cf392072/python/qisys/ui.py#L171-L193
|
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import sys
import struct
import difflib
import platform
import datetime
import functools
import traceback
import six
_console = None
HAS_PYREADLINE = True
if os.name == 'nt':
try:
from pyreadline.console import Console
_console = Console()
except Exception:
HAS_PYREADLINE = False
class _Color(object):
def __init__(self, code, modifier=None):
self.code = '\033[%d' % code
if modifier is not None:
self.code += ';%dm' % modifier
else:
self.code += 'm'
reset = _Color(0)
bold = _Color(1)
faint = _Color(2)
standout = _Color(3)
underline = _Color(4)
blink = _Color(5)
overline = _Color(6)
black = _Color(30)
darkred = _Color(31)
darkgreen = _Color(32)
brown = _Color(33)
darkblue = _Color(34)
purple = _Color(35)
teal = _Color(36)
lightgray = _Color(37)
darkgray = _Color(30, 1)
red = _Color(31, 1)
green = _Color(32, 1)
yellow = _Color(33, 1)
blue = _Color(34, 1)
fuchsia = _Color(35, 1)
turquoise = _Color(36, 1)
white = _Color(37, 1)
darkteal = turquoise
darkyellow = brown
fuscia = fuchsia
CONFIG = {
"verbose": os.environ.get("VERBOSE"),
"quiet": False,
"color": "auto",
"title": "auto",
"timestamp": False,
"interactive": True,
"record": False
}
_MESSAGES = list()
def configure_logging(args):
verbose = os.environ.get("VERBOSE", False)
if not verbose:
verbose = args.verbose
CONFIG["color"] = args.color
CONFIG["title"] = args.title
CONFIG["verbose"] = verbose
CONFIG["quiet"] = args.quiet
CONFIG["timestamp"] = args.timestamp
def config_title(fp):
_config_title = CONFIG["title"]
if _config_title.lower() == "never":
return False
if _config_title.lower() == "always":
return True
if os.name == 'nt':
return fp.isatty() and _console is not None
legal_terms = ["xterm", "xterm-256color", "xterm-color",
"Eterm", "aterm", "rxvt", "screen", "kterm",
"rxvt-unicode", "gnome", "interix", "cygwin",
"rxvt-unicode-256color"]
return fp.isatty() and 'TERM' in os.environ and os.environ['TERM'] in legal_terms
def config_color(fp):
_config_color = CONFIG["color"]
if _config_color.lower() == "never":
return False
if _config_color.lower() == "always":
return True
if os.name == 'nt' and not HAS_PYREADLINE or not fp.isatty():
return False
return True
_enable_xterm_title = None
def update_title(mystr, fp):
if os.name == "nt":
_update_title_windows(mystr)
else:
_update_title_unix(mystr, fp)
def _update_title_unix(mystr, fp):
global _enable_xterm_title
if _enable_xterm_title is None:
_enable_xterm_title = config_title(fp)
if _enable_xterm_title:
mystr = '\x1b]0;%s\x07' % mystr
fp.write(mystr)
fp.flush()
def _update_title_windows(mystr):
if _console and config_title(sys.stdout):
_console.title(txt=mystr)
|
BSD 3-Clause New or Revised License
|
yaqwsx/kikit
|
kikit/panelize.py
|
Panel.makeFrame
|
python
|
def makeFrame(self, width, hspace, vspace):
frameInnerRect = expandRect(shpBoxToRect(self.boardsBBox()),
hspace - SHP_EPSILON, vspace + SHP_EPSILON)
frameOuterRect = expandRect(frameInnerRect, width + SHP_EPSILON)
outerRing = rectToRing(frameOuterRect)
innerRing = rectToRing(frameInnerRect)
polygon = Polygon(outerRing, [innerRing])
self.appendSubstrate(polygon)
innerArea = self.substrates[0].boundingBox()
for s in self.substrates:
innerArea = combineBoundingBoxes(innerArea, s.boundingBox())
frameCutsV = self.makeFrameCutsV(innerArea, frameInnerRect, frameOuterRect)
frameCutsH = self.makeFrameCutsH(innerArea, frameInnerRect, frameOuterRect)
return frameCutsV, frameCutsH
|
Build a frame around the board. Specify width and spacing between the
boards substrates and the frame. Return a tuple of vertical and
horizontal cuts.
|
https://github.com/yaqwsx/kikit/blob/cddb09d784fbd84d3736538f466597f43e6ab045/kikit/panelize.py#L715-L734
|
from kikit import pcbnew_compatibility
from kikit.pcbnew_compatibility import pcbnew
from kikit.common import normalize
from pcbnew import (GetBoard, LoadBoard,
FromMM, ToMM, wxPoint, wxRect, wxRectMM, wxPointMM)
from enum import Enum, IntEnum
from shapely.geometry import (Polygon, MultiPolygon, Point, LineString, box,
GeometryCollection, MultiLineString)
from shapely.prepared import prep
import shapely
from itertools import product, chain
import numpy as np
import os
from kikit import substrate
from kikit import units
from kikit.substrate import Substrate, linestringToKicad, extractRings
from kikit.defs import STROKE_T, Layer, EDA_TEXT_HJUSTIFY_T, EDA_TEXT_VJUSTIFY_T
from kikit.common import *
class PanelError(RuntimeError):
pass
def identity(x):
return x
class BasicGridPosition:
def __init__(self, destination, boardSize, horSpace, verSpace):
self.destination = destination
self.boardSize = boardSize
self.horSpace = horSpace
self.verSpace = verSpace
def position(self, i, j):
return wxPoint(self.destination[0] + j * (self.boardSize.GetWidth() + self.horSpace),
self.destination[1] + i * (self.boardSize.GetHeight() + self.verSpace))
def rotation(self, i, j):
return 0
class OddEvenRowsPosition(BasicGridPosition):
def rotation(self, i, j):
if i % 2 == 0:
return 0
return 1800
class OddEvenColumnPosition(BasicGridPosition):
def rotation(self, i, j):
if j % 2 == 0:
return 0
return 1800
class OddEvenRowsColumnsPosition(BasicGridPosition):
def rotation(self, i, j):
if (i * j) % 2 == 0:
return 0
return 1800
class Origin(Enum):
Center = 0
TopLeft = 1
TopRight = 2
BottomLeft = 3
BottomRight = 4
def getOriginCoord(origin, bBox):
if origin == Origin.Center:
return wxPoint(bBox.GetX() + bBox.GetWidth() // 2,
bBox.GetY() + bBox.GetHeight() // 2)
if origin == Origin.TopLeft:
return wxPoint(bBox.GetX(), bBox.GetY())
if origin == Origin.TopRight:
return wxPoint(bBox.GetX() + bBox.GetWidth(), bBox.GetY())
if origin == Origin.BottomLeft:
return wxPoint(bBox.GetX(), bBox.GetY() + bBox.GetHeight())
if origin == Origin.BottomRight:
return wxPoint(bBox.GetX() + bBox.GetWidth(), bBox.GetY() + bBox.GetHeight())
def appendItem(board, item):
try:
newItem = item.Duplicate()
except TypeError:
newItem = pcbnew.Cast_to_BOARD_ITEM(item).Duplicate().Cast()
board.Add(newItem)
def transformArea(board, sourceArea, translate, origin, rotationAngle):
for drawing in collectItems(board.GetDrawings(), sourceArea):
drawing.Rotate(origin, rotationAngle)
drawing.Move(translate)
for footprint in collectItems(board.GetFootprints(), sourceArea):
footprint.Rotate(origin, rotationAngle)
footprint.Move(translate)
for track in collectItems(board.GetTracks(), sourceArea):
track.Rotate(origin, rotationAngle)
track.Move(translate)
for zone in collectItems(board.Zones(), sourceArea):
zone.Rotate(origin, rotationAngle)
zone.Move(translate)
def collectNetNames(board):
return [str(x) for x in board.GetNetInfo().NetsByName() if len(str(x)) > 0]
def remapNets(collection, mapping):
for item in collection:
item.SetNetCode(mapping[item.GetNetname()].GetNet())
def toPolygon(entity):
if isinstance(entity, list):
return list([toPolygon(e) for e in entity])
if isinstance(entity, Polygon) or isinstance(entity, MultiPolygon):
return entity
if isinstance(entity, wxRect):
return Polygon([
(entity.GetX(), entity.GetY()),
(entity.GetX() + entity.GetWidth(), entity.GetY()),
(entity.GetX() + entity.GetWidth(), entity.GetY() + entity.GetHeight()),
(entity.GetX(), entity.GetY() + entity.GetHeight())])
raise NotImplementedError("Cannot convert {} to Polygon".format(type(entity)))
def rectString(rect):
return "({}, {}) w: {}, h: {}".format(
ToMM(rect.GetX()), ToMM(rect.GetY()),
ToMM(rect.GetWidth()), ToMM(rect.GetHeight()))
def expandRect(rect, offsetX, offsetY=None):
if offsetY is None:
offsetY = offsetX
offsetX = int(offsetX)
offsetY = int(offsetY)
return wxRect(rect.GetX() - offsetX, rect.GetY() - offsetY,
rect.GetWidth() + 2 * offsetX, rect.GetHeight() + 2 * offsetY)
def rectToRing(rect):
return [
(rect.GetX(), rect.GetY()),
(rect.GetX() + rect.GetWidth(), rect.GetY()),
(rect.GetX() + rect.GetWidth(), rect.GetY() + rect.GetHeight()),
(rect.GetX(), rect.GetY() + rect.GetHeight())
]
def roundPoint(point, precision=-4):
if isinstance(point, Point):
return Point(round(point.x, precision), round(point.y, precision))
return Point(round(point[0], precision), round(point[1], precision))
def undoTransformation(point, rotation, origin, translation):
segment = pcbnew.PCB_SHAPE()
segment.SetShape(STROKE_T.S_SEGMENT)
segment.SetStart(wxPoint(point[0], point[1]))
segment.SetEnd(wxPoint(0, 0))
segment.Move(wxPoint(-translation[0], -translation[1]))
segment.Rotate(origin, -rotation)
return segment.GetStart()
def removeCutsFromFootprint(footprint):
edges = []
for edge in footprint.GraphicalItems():
if edge.GetLayerName() != "Edge.Cuts":
continue
footprint.Remove(edge)
edges.append(edge)
return edges
def renameNets(board, renamer):
originalNetNames = collectNetNames(board)
netinfo = board.GetNetInfo()
newNetMapping = { "": netinfo.GetNetItem("") }
newNames = set()
for name in originalNetNames:
newName = renamer(name)
newNet = pcbnew.NETINFO_ITEM(board, newName)
newNetMapping[name] = newNet
board.Add(newNet)
newNames.add(newName)
remapNets(board.GetPads(), newNetMapping)
remapNets(board.GetTracks(), newNetMapping)
remapNets(board.Zones(), newNetMapping)
for name in originalNetNames:
if name != "" and name not in newNames:
board.RemoveNative(netinfo.GetNetItem(name))
def renameRefs(board, renamer):
for footprint in board.GetFootprints():
ref = footprint.Reference().GetText()
footprint.Reference().SetText(renamer(ref))
def isBoardEdge(edge):
return isinstance(edge, pcbnew.PCB_SHAPE) and edge.GetLayerName() == "Edge.Cuts"
def increaseZonePriorities(board, amount=1):
for zone in board.Zones():
zone.SetPriority(zone.GetPriority() + amount)
def tabSpacing(width, count):
return [width * i / (count + 1) for i in range(1, count + 1)]
def prolongCut(cut, prolongation):
c = list([np.array(x) for x in cut.coords])
c[0] += normalize(c[0] - c[1]) * prolongation
c[-1] += normalize(c[-1] - c[-2]) * prolongation
return LineString(c)
def polygonToZone(polygon, board):
zone = pcbnew.ZONE(board)
boundary = polygon.exterior
zone.Outline().AddOutline(linestringToKicad(boundary))
for hole in polygon.interiors:
boundary = hole.exterior
zone.Outline().AddHole(linestringToKicad(boundary))
return zone
def isAnnotation(footprint):
info = footprint.GetFPID()
if info.GetLibNickname() != "kikit":
return False
return info.GetLibItemName() in ["Tab", "Board"]
def readKiKitProps(footprint):
for x in footprint.GraphicalItemsList():
if not isinstance(x, pcbnew.FP_TEXT):
continue
text = x.GetText()
if text.startswith("KIKIT:"):
return readParameterList(text[len("KIKIT:"):])
return {}
class TabAnnotation:
def __init__(self, ref, origin, direction, width, maxLength=fromMm(100)):
self.ref = ref
self.origin = origin
self.direction = direction
self.width = width
self.maxLength = maxLength
@staticmethod
def fromFootprint(footprint):
origin = footprint.GetPosition()
radOrientaion = footprint.GetOrientationRadians()
direction = (np.cos(radOrientaion), -np.sin(radOrientaion))
props = readKiKitProps(footprint)
width = units.readLength(props["width"])
return TabAnnotation(footprint.GetReference(), origin, direction, width)
def convertToAnnotation(footprint):
name = footprint.GetFPID().GetLibItemName()
if name == "Tab":
return [TabAnnotation.fromFootprint(footprint)]
return []
def buildTabs(substrate, partitionLines, tabAnnotations):
tabs, cuts = [], []
for annotation in tabAnnotations:
t, c = substrate.tab(annotation.origin, annotation.direction,
annotation.width, partitionLines, annotation.maxLength)
if t is not None:
tabs.append(t)
cuts.append(c)
return tabs, cuts
def normalizePartitionLineOrientation(line):
if isinstance(line, MultiLineString):
return MultiLineString([normalizePartitionLineOrientation(x) for x in line.geoms])
if isinstance(line, GeometryCollection):
return GeometryCollection([normalizePartitionLineOrientation(l) for l in line.geoms])
if not isLinestringCyclic(line):
return line
r = LinearRing(line.coords)
if not r.is_ccw:
return line
return LineString(list(r.coords)[::-1])
def maxTabCount(edgeLen, width, minDistance):
if edgeLen < width:
return 0
c = 1 + (edgeLen - minDistance) // (minDistance + width)
return max(0, int(c))
class Panel:
def __init__(self):
self.board = pcbnew.BOARD()
self.substrates = []
self.boardSubstrate = Substrate([])
self.backboneLines = []
self.hVCuts = set()
self.vVCuts = set()
self.vCutLayer = Layer.Cmts_User
self.vCutClearance = 0
self.copperLayerCount = None
self.zonesToRefill = pcbnew.ZONES()
def save(self, filename):
for edge in self.boardSubstrate.serialize():
self.board.Add(edge)
vcuts = self._renderVCutH() + self._renderVCutV()
keepouts = []
for cut, clearanceArea in vcuts:
self.board.Add(cut)
if clearanceArea is not None:
keepouts.append(self.addKeepout(clearanceArea))
fillerTool = pcbnew.ZONE_FILLER(self.board)
fillerTool.Fill(self.zonesToRefill)
self.board.Save(filename)
for cut, _ in vcuts:
self.board.Remove(cut)
for keepout in keepouts:
self.board.Remove(keepout)
for edge in collectEdges(self.board, "Edge.Cuts"):
self.board.Remove(edge)
def _uniquePrefix(self):
return "Board_{}-".format(len(self.substrates))
def inheritDesignSettings(self, boardFilename):
b = pcbnew.LoadBoard(boardFilename)
self.setDesignSettings(b.GetDesignSettings())
def setDesignSettings(self, designSettings):
self.board.SetDesignSettings(designSettings)
def inheritProperties(self, boardFilename):
b = pcbnew.LoadBoard(boardFilename)
self.board.SetProperties(b.GetProperties())
def setProperties(self, properties):
self.board.SetProperties(properties)
def appendBoard(self, filename, destination, sourceArea=None,
origin=Origin.Center, rotationAngle=0, shrink=False,
tolerance=0, bufferOutline=fromMm(0.001), netRenamer=None,
refRenamer=None):
board = LoadBoard(filename)
thickness = board.GetDesignSettings().GetBoardThickness()
if len(self.substrates) == 0:
self.board.GetDesignSettings().SetBoardThickness(thickness)
else:
panelThickness = self.board.GetDesignSettings().GetBoardThickness()
if panelThickness != thickness:
raise PanelError(f"Cannot append board {filename} as its " f"thickness ({toMm(thickness)} mm) differs from " f"thickness of the panel ({toMm(panelThickness)}) mm")
self.inheritCopperLayers(board)
if not sourceArea:
sourceArea = findBoardBoundingBox(board)
elif shrink:
sourceArea = findBoardBoundingBox(board, sourceArea)
enlargedSourceArea = expandRect(sourceArea, tolerance)
originPoint = getOriginCoord(origin, sourceArea)
translation = wxPoint(destination[0] - originPoint[0],
destination[1] - originPoint[1])
if netRenamer is None:
netRenamer = lambda x, y: self._uniquePrefix() + y
renameNets(board, lambda x: netRenamer(len(self.substrates), x))
if refRenamer is not None:
renameRefs(board, lambda x: refRenamer(len(self.substrates), x))
drawings = collectItems(board.GetDrawings(), enlargedSourceArea)
footprints = collectFootprints(board.GetFootprints(), enlargedSourceArea)
tracks = collectItems(board.GetTracks(), enlargedSourceArea)
zones = collectItems(board.Zones(), enlargedSourceArea)
edges = []
annotations = []
for footprint in footprints:
for item in (*footprint.GraphicalItems(), footprint.Value(), footprint.Reference()):
if isinstance(item, pcbnew.TEXTE_MODULE) and item.IsKeepUpright():
actualOrientation = item.GetDrawRotation()
item.SetKeepUpright(False)
item.SetTextAngle(actualOrientation - footprint.GetOrientation())
footprint.Rotate(originPoint, rotationAngle)
footprint.Move(translation)
edges += removeCutsFromFootprint(footprint)
if isAnnotation(footprint):
annotations.extend(convertToAnnotation(footprint))
else:
appendItem(self.board, footprint)
for track in tracks:
track.Rotate(originPoint, rotationAngle)
track.Move(translation)
appendItem(self.board, track)
for zone in zones:
zone.Rotate(originPoint, rotationAngle)
zone.Move(translation)
appendItem(self.board, zone)
for netId in board.GetNetInfo().NetsByNetcode():
self.board.Add(board.GetNetInfo().GetNetItem(netId))
for drawing in drawings:
drawing.Rotate(originPoint, rotationAngle)
drawing.Move(translation)
edges += [edge for edge in drawings if isBoardEdge(edge)]
otherDrawings = [edge for edge in drawings if not isBoardEdge(edge)]
try:
o = Substrate(edges, -bufferOutline)
s = Substrate(edges, bufferOutline)
self.boardSubstrate.union(s)
self.substrates.append(o)
self.substrates[-1].annotations = annotations
except substrate.PositionError as e:
point = undoTransformation(e.point, rotationAngle, originPoint, translation)
raise substrate.PositionError(filename + ": " + e.origMessage, point)
for drawing in otherDrawings:
appendItem(self.board, drawing)
return findBoundingBox(edges)
def appendSubstrate(self, substrate):
polygon = toPolygon(substrate)
self.boardSubstrate.union(polygon)
def boardsBBox(self):
if len(self.substrates) == 0:
raise RuntimeError("There are no substrates, cannot compute bounding box")
bbox = self.substrates[0].bounds()
for p in islice(self.substrates, 1, None):
bbox = shpBBoxMerge(bbox, p.bounds())
return bbox
def panelBBox(self):
return self.boardSubstrate.bounds()
def addVCutH(self, pos):
self.hVCuts.add(pos)
def addVCutV(self, pos):
self.vVCuts.add(pos)
def setVCutLayer(self, layer):
self.vCutLayer = layer
def setVCutClearance(self, clearance):
self.vCutClearance = clearance
def _setVCutSegmentStyle(self, segment, layer):
segment.SetShape(STROKE_T.S_SEGMENT)
segment.SetLayer(layer)
segment.SetWidth(fromMm(0.4))
def _setVCutLabelStyle(self, label, layer):
label.SetText("V-CUT")
label.SetLayer(layer)
label.SetTextThickness(fromMm(0.4))
label.SetTextSize(pcbnew.wxSizeMM(2, 2))
label.SetHorizJustify(EDA_TEXT_HJUSTIFY_T.GR_TEXT_HJUSTIFY_LEFT)
def _renderVCutV(self):
bBox = self.boardSubstrate.boundingBox()
minY, maxY = bBox.GetY() - fromMm(3), bBox.GetY() + bBox.GetHeight() + fromMm(3)
segments = []
for cut in self.vVCuts:
segment = pcbnew.PCB_SHAPE()
self._setVCutSegmentStyle(segment, self.vCutLayer)
segment.SetStart(pcbnew.wxPoint(cut, minY))
segment.SetEnd(pcbnew.wxPoint(cut, maxY))
keepout = None
if self.vCutClearance != 0:
keepout = shapely.geometry.box(
cut - self.vCutClearance / 2,
bBox.GetY(),
cut + self.vCutClearance / 2,
bBox.GetY() + bBox.GetHeight())
segments.append((segment, keepout))
label = pcbnew.PCB_TEXT(segment)
self._setVCutLabelStyle(label, self.vCutLayer)
label.SetPosition(wxPoint(cut, minY - fromMm(3)))
label.SetTextAngle(900)
segments.append((label, None))
return segments
def _renderVCutH(self):
bBox = self.boardSubstrate.boundingBox()
minX, maxX = bBox.GetX() - fromMm(3), bBox.GetX() + bBox.GetWidth() + fromMm(3)
segments = []
for cut in self.hVCuts:
segment = pcbnew.PCB_SHAPE()
self._setVCutSegmentStyle(segment, self.vCutLayer)
segment.SetStart(pcbnew.wxPoint(minX, cut))
segment.SetEnd(pcbnew.wxPoint(maxX, cut))
keepout = None
if self.vCutClearance != 0:
keepout = shapely.geometry.box(
bBox.GetX(),
cut - self.vCutClearance / 2,
bBox.GetX() + bBox.GetWidth(),
cut + self.vCutClearance / 2)
segments.append((segment, keepout))
label = pcbnew.PCB_TEXT(segment)
self._setVCutLabelStyle(label, self.vCutLayer)
label.SetPosition(wxPoint(maxX + fromMm(3), cut))
segments.append((label, None))
return segments
def _placeBoardsInGrid(self, boardfile, rows, cols, destination, sourceArea, tolerance,
verSpace, horSpace, rotation, netRenamer, refRenamer,
placementClass):
boardSize = wxRect(0, 0, 0, 0)
topLeftSize = None
placement = placementClass(destination, boardSize, horSpace, verSpace)
for i, j in product(range(rows), range(cols)):
placement.boardSize = boardSize
dest = placement.position(i, j)
boardRotation = rotation + placement.rotation(i, j)
boardSize = self.appendBoard(
boardfile, dest, sourceArea=sourceArea,
tolerance=tolerance, origin=Origin.Center,
rotationAngle=boardRotation, netRenamer=netRenamer,
refRenamer=refRenamer)
if not topLeftSize:
topLeftSize = boardSize
return topLeftSize
def makeGrid(self, boardfile, sourceArea, rows, cols, destination,
verSpace, horSpace, rotation,
placementClass=BasicGridPosition,
netRenamePattern="Board_{n}-{orig}",
refRenamePattern="Board_{n}-{orig}"):
substrateCount = len(self.substrates)
netRenamer = lambda x, y: netRenamePattern.format(n=x, orig=y)
refRenamer = lambda x, y: refRenamePattern.format(n=x, orig=y)
self._placeBoardsInGrid(boardfile, rows, cols, destination,
sourceArea, 0, verSpace, horSpace,
rotation, netRenamer, refRenamer, placementClass)
return self.substrates[substrateCount:]
|
MIT License
|
avast/retdec-regression-tests-framework
|
tests/parsers/c_parser/stmts/statement_tests.py
|
StatementTests.get_for_loop
|
python
|
def get_for_loop(self, code):
func = self.insert_into_function_body(code)
return func.for_loops[0]
|
Returns the first for loop in the given code.
|
https://github.com/avast/retdec-regression-tests-framework/blob/a8d024475bf76cd6acdee3c9df3a3d38a2ec63df/tests/parsers/c_parser/stmts/statement_tests.py#L32-L35
|
from unittest import mock
from regression_tests.parsers.c_parser.stmts.break_stmt import BreakStmt
from regression_tests.parsers.c_parser.stmts.continue_stmt import ContinueStmt
from regression_tests.parsers.c_parser.stmts.do_while_loop import DoWhileLoop
from regression_tests.parsers.c_parser.stmts.empty_stmt import EmptyStmt
from regression_tests.parsers.c_parser.stmts.for_loop import ForLoop
from regression_tests.parsers.c_parser.stmts.goto_stmt import GotoStmt
from regression_tests.parsers.c_parser.stmts.if_stmt import IfStmt
from regression_tests.parsers.c_parser.stmts.return_stmt import ReturnStmt
from regression_tests.parsers.c_parser.stmts.statement import Statement
from regression_tests.parsers.c_parser.stmts.switch_stmt import SwitchStmt
from regression_tests.parsers.c_parser.stmts.while_loop import WhileLoop
from tests.parsers.c_parser import WithModuleTests
class StatementTests(WithModuleTests):
def insert_into_function_body(self, code):
return self.get_func("""
void func() {
%s
}
""" % code, 'func')
|
MIT License
|
duerrp/pyexperiment
|
tests/test_replicate.py
|
TestTargetCreator.tearDown
|
python
|
def tearDown(self):
log.reset_instance()
|
Clean up after the test
|
https://github.com/duerrp/pyexperiment/blob/c426565d870d944bd5b9712629d8f1ba2527c67f/tests/test_replicate.py#L174-L177
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import unittest
import numpy as np
import logging
import io
import multiprocessing
import functools
if True:
from six.moves import range
from pyexperiment import state
from pyexperiment import Logger
from pyexperiment import log
from pyexperiment.replicate import replicate, collect_results, TargetCreator
from pyexperiment.replicate import SUBSTATE_KEY_PATTERN
FAKE_ERROR = RuntimeError("Foo")
def experiment():
state['result'] = "bla"
def experiment2():
state['result'] = "bla"
_bla = state['result']
del state['result']
def experiment3():
raise FAKE_ERROR
def experiment4():
np.random.seed()
state['result'] = np.random.rand(1)
class TestReplicate(unittest.TestCase):
def tearDown(self):
state.reset_instance()
def test_setting_state(self):
no_replicates = 25
replicate(experiment, no_replicates)
for i in range(no_replicates):
self.assertIn('result', state[SUBSTATE_KEY_PATTERN % i])
self.assertEqual(state[SUBSTATE_KEY_PATTERN % i]['result'], "bla")
def test_getting_state(self):
no_replicates = 25
replicate(experiment2, no_replicates)
for i in range(no_replicates):
self.assertNotIn('result', state[SUBSTATE_KEY_PATTERN % i])
def test_raises(self):
no_replicates = 25
try:
replicate(experiment3, no_replicates)
except RuntimeError as err:
self.assertEqual(err, FAKE_ERROR)
else:
assert False
def test_setting_state_parallel(self):
no_replicates = 25
replicate(experiment, no_replicates, parallel=True, no_processes=2)
for i in range(no_replicates):
self.assertIn('result', state[SUBSTATE_KEY_PATTERN % i])
self.assertEqual(state[SUBSTATE_KEY_PATTERN % i]['result'], "bla")
def test_getting_state_parallel(self):
no_replicates = 25
replicate(experiment2, no_replicates, parallel=True, no_processes=2)
for i in range(no_replicates):
self.assertNotIn(SUBSTATE_KEY_PATTERN % i + '.result', state)
def test_collecting(self):
no_replicates = 25
replicate(experiment4, no_replicates)
for i in range(no_replicates):
self.assertIn('result', state[SUBSTATE_KEY_PATTERN % i])
results = collect_results('result', no_replicates=no_replicates)
self.assertEqual(len(results), no_replicates)
for i, r_1 in enumerate(results):
for k, r_2 in enumerate(results):
if not i == k:
self.assertFalse((r_1 == r_2).all())
def test_collecting_parallel(self):
no_replicates = 25
replicate(experiment4, no_replicates, parallel=True, no_processes=2)
for i in range(no_replicates):
self.assertIn('result', state[SUBSTATE_KEY_PATTERN % i])
results = collect_results('result', no_replicates=no_replicates)
self.assertEqual(len(results), no_replicates)
for i, r_1 in enumerate(results):
for k, r_2 in enumerate(results):
if not i == k:
self.assertFalse((r_1 == r_2).all())
class TestTargetCreator(unittest.TestCase):
|
MIT License
|
apache/bloodhound
|
bloodhound_dashboard/bhdashboard/widgets/product.py
|
ProductWidget.get_widget_params
|
python
|
def get_widget_params(self, name):
return {
'max': {'desc': """Limit the number of products displayed""",
'type': int},
'cols': {'desc': """Number of columns""",
'type': int}
}
|
Return a dictionary containing arguments specification for
the widget with specified name.
|
https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_dashboard/bhdashboard/widgets/product.py#L48-L57
|
import itertools
from genshi.builder import tag
from trac.resource import Neighborhood
from trac.ticket.model import Milestone, Component, Version
from trac.ticket.query import Query
from bhdashboard.util import pretty_wrapper
from bhdashboard.util.widgets import WidgetBase, check_widget_name
from bhdashboard.util.translation import _
from multiproduct.env import Product, ProductEnvironment
__metaclass__ = type
class ProductWidget(WidgetBase):
|
Apache License 2.0
|
bio2bel/bio2bel
|
src/bio2bel/sources/biogrid.py
|
_process_pmid
|
python
|
def _process_pmid(s: str) -> str:
if not s.startswith('pubmed:'):
raise ValueError(f'Non pubmed: {s}')
return s[len('pubmed:')]
|
Process provenance column.
|
https://github.com/bio2bel/bio2bel/blob/f2c015c23e9e1f4b996716ec48f61687c5e347fe/src/bio2bel/sources/biogrid.py#L199-L203
|
import logging
import os
from functools import lru_cache
from typing import Iterable, List, Optional, Tuple
import click
import pandas as pd
import pyobo.sources.biogrid
from more_click import verbose_option
from pyobo.identifier_utils import normalize_curie
from tqdm import tqdm
import pybel.dsl
from pybel import BELGraph
from ..utils import ensure_path, get_data_dir
__all__ = [
'get_bel',
]
logger = logging.getLogger(__name__)
EVIDENCE = 'From BioGRID'
MODULE_NAME = 'biogrid'
VERSION = '3.5.186'
BASE_URL = 'https://downloads.thebiogrid.org/Download/BioGRID/Release-Archive'
URL = f'{BASE_URL}/BIOGRID-{VERSION}/BIOGRID-ALL-{VERSION}.mitab.zip'
BIOGRID_GENE_ASSOCIATION = {
'psi-mi:"MI:0794"(synthetic genetic interaction defined by inequality)',
'psi-mi:"MI:0799"(additive genetic interaction defined by inequality)',
'psi-mi:"MI:0796"(suppressive genetic interaction defined by inequality)',
}
BIOGRID_ASSOCIATION_ACTIONS = {
'psi-mi:"MI:0403"(colocalization)',
'psi-mi:"MI:0914"(association)',
'psi-mi:"MI:0915"(physical association)',
}
BIOGRID_BINDS_ACTIONS = {
'psi-mi:"MI:0407"(direct interaction)',
}
@lru_cache()
def _get_ncbigene_mapping():
return pyobo.sources.biogrid.get_ncbigene_mapping()
def _map_ncbigene(identifier):
return _get_ncbigene_mapping().get(identifier)
BIOGRID_NCBIGENE_REMAPPING = {
'4349295': None,
'4349491': None,
'4349337': None,
'4349775': None,
'4349716': None,
'4349853': None,
'4383869': None,
'4383875': None,
}
UNIPROT_NCBIGENE_REMAPPING = {
'P0DTC1': None,
'P0DTD2': '1489679',
'Q7TLC7': None,
}
def _process_interactor(s: str) -> Optional[str]:
prefix, identifier = normalize_curie(s)
if prefix is None:
logger.warning('could not parse %s', s)
return
if prefix == 'ncbigene':
return identifier
elif prefix == 'biogrid':
ncbigene_identifier = _map_ncbigene(identifier)
if ncbigene_identifier is not None:
return ncbigene_identifier
elif identifier in BIOGRID_NCBIGENE_REMAPPING:
remapped = BIOGRID_NCBIGENE_REMAPPING[identifier]
if not remapped:
logger.debug('tried but failed curation on %s', s)
return remapped
else:
logger.warning('need to curate: %s', s)
return
elif prefix == 'uniprot':
if identifier in UNIPROT_NCBIGENE_REMAPPING:
remapped = UNIPROT_NCBIGENE_REMAPPING[identifier]
if not remapped:
logger.debug('tried but failed curation on %s', s)
return remapped
else:
logger.warning('need to curate: %s', s)
return
else:
logger.warning('unhandled interactor: %s (%s:%s)', s, prefix, identifier)
def _process_xrefs(s: str) -> List[Tuple[str, str]]:
return list(_iter_process_xrefs(s))
def _iter_process_xrefs(s: str) -> Iterable[Tuple[str, str]]:
for curie in s.split('|'):
curie = curie.strip()
prefix, identifier = normalize_curie(curie)
if prefix is not None:
yield prefix, identifier
|
MIT License
|
apple-network/apple-blockchain
|
apple/full_node/full_node.py
|
FullNode.short_sync_backtrack
|
python
|
async def short_sync_backtrack(
self, peer: ws.WSAppleConnection, peak_height: uint32, target_height: uint32, target_unf_hash: bytes32
):
try:
if peer.peer_node_id not in self.sync_store.backtrack_syncing:
self.sync_store.backtrack_syncing[peer.peer_node_id] = 0
self.sync_store.backtrack_syncing[peer.peer_node_id] += 1
unfinished_block: Optional[UnfinishedBlock] = self.full_node_store.get_unfinished_block(target_unf_hash)
curr_height: int = target_height
found_fork_point = False
responses = []
while curr_height > peak_height - 5:
fetch_tx: bool = unfinished_block is None or curr_height != target_height
curr = await peer.request_block(full_node_protocol.RequestBlock(uint32(curr_height), fetch_tx))
if curr is None:
raise ValueError(f"Failed to fetch block {curr_height} from {peer.get_peer_logging()}, timed out")
if curr is None or not isinstance(curr, full_node_protocol.RespondBlock):
raise ValueError(
f"Failed to fetch block {curr_height} from {peer.get_peer_logging()}, wrong type {type(curr)}"
)
responses.append(curr)
if self.blockchain.contains_block(curr.block.prev_header_hash) or curr_height == 0:
found_fork_point = True
break
curr_height -= 1
if found_fork_point:
for response in reversed(responses):
await self.respond_block(response, peer)
except Exception as e:
self.sync_store.backtrack_syncing[peer.peer_node_id] -= 1
raise e
self.sync_store.backtrack_syncing[peer.peer_node_id] -= 1
return found_fork_point
|
Performs a backtrack sync, where blocks are downloaded one at a time from newest to oldest. If we do not
find the fork point 5 deeper than our peak, we return False and do a long sync instead.
Args:
peer: peer to sync from
peak_height: height of our peak
target_height: target height
target_unf_hash: partial hash of the unfinished block of the target
Returns:
True iff we found the fork point, and we do not need to long sync.
|
https://github.com/apple-network/apple-blockchain/blob/74351fd5cea80da40a84777c6e1659b845a22d1c/apple/full_node/full_node.py#L310-L360
|
import asyncio
import dataclasses
import logging
import random
import time
import traceback
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
import aiosqlite
from blspy import AugSchemeMPL
import apple.server.ws_connection as ws
from apple.consensus.block_creation import unfinished_block_to_full_block
from apple.consensus.block_record import BlockRecord
from apple.consensus.blockchain import Blockchain, ReceiveBlockResult
from apple.consensus.blockchain_interface import BlockchainInterface
from apple.consensus.constants import ConsensusConstants
from apple.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
from apple.consensus.make_sub_epoch_summary import next_sub_epoch_summary
from apple.consensus.multiprocess_validation import PreValidationResult
from apple.consensus.pot_iterations import calculate_sp_iters
from apple.full_node.block_store import BlockStore
from apple.full_node.bundle_tools import detect_potential_template_generator
from apple.full_node.coin_store import CoinStore
from apple.full_node.full_node_store import FullNodeStore
from apple.full_node.hint_store import HintStore
from apple.full_node.mempool_manager import MempoolManager
from apple.full_node.signage_point import SignagePoint
from apple.full_node.sync_store import SyncStore
from apple.full_node.weight_proof import WeightProofHandler
from apple.protocols import farmer_protocol, full_node_protocol, timelord_protocol, wallet_protocol
from apple.protocols.full_node_protocol import (
RequestBlocks,
RespondBlock,
RespondBlocks,
RespondSignagePoint,
)
from apple.protocols.protocol_message_types import ProtocolMessageTypes
from apple.protocols.wallet_protocol import CoinState, CoinStateUpdate
from apple.server.node_discovery import FullNodePeers
from apple.server.outbound_message import Message, NodeType, make_msg
from apple.server.server import AppleServer
from apple.types.blockchain_format.classgroup import ClassgroupElement
from apple.types.blockchain_format.pool_target import PoolTarget
from apple.types.blockchain_format.sized_bytes import bytes32
from apple.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from apple.types.blockchain_format.vdf import CompressibleVDFField, VDFInfo, VDFProof
from apple.types.coin_record import CoinRecord
from apple.types.end_of_slot_bundle import EndOfSubSlotBundle
from apple.types.full_block import FullBlock
from apple.types.header_block import HeaderBlock
from apple.types.mempool_inclusion_status import MempoolInclusionStatus
from apple.types.spend_bundle import SpendBundle
from apple.types.unfinished_block import UnfinishedBlock
from apple.util.bech32m import encode_puzzle_hash
from apple.util.check_fork_next_block import check_fork_next_block
from apple.util.db_wrapper import DBWrapper
from apple.util.errors import ConsensusError, Err
from apple.util.ints import uint8, uint32, uint64, uint128
from apple.util.path import mkdir, path_from_root
from apple.util.safe_cancel_task import cancel_task_safe
from apple.util.profiler import profile_task
from datetime import datetime
class FullNode:
block_store: BlockStore
full_node_store: FullNodeStore
full_node_peers: Optional[FullNodePeers]
sync_store: Any
coin_store: CoinStore
mempool_manager: MempoolManager
connection: aiosqlite.Connection
_sync_task: Optional[asyncio.Task]
_init_weight_proof: Optional[asyncio.Task] = None
blockchain: Blockchain
config: Dict
server: Any
log: logging.Logger
constants: ConsensusConstants
_shut_down: bool
root_path: Path
state_changed_callback: Optional[Callable]
timelord_lock: asyncio.Lock
initialized: bool
weight_proof_handler: Optional[WeightProofHandler]
_ui_tasks: Set[asyncio.Task]
def __init__(
self,
config: Dict,
root_path: Path,
consensus_constants: ConsensusConstants,
name: str = None,
):
self.initialized = False
self.root_path = root_path
self.config = config
self.server = None
self._shut_down = False
self.constants = consensus_constants
self.pow_creation: Dict[uint32, asyncio.Event] = {}
self.state_changed_callback: Optional[Callable] = None
self.full_node_peers = None
self.sync_store = None
self.signage_point_times = [time.time() for _ in range(self.constants.NUM_SPS_SUB_SLOT)]
self.full_node_store = FullNodeStore(self.constants)
self.uncompact_task = None
self.compact_vdf_requests: Set[bytes32] = set()
self.log = logging.getLogger(name if name else __name__)
self._ui_tasks = set()
db_path_replaced: str = config["database_path"].replace("CHALLENGE", config["selected_network"])
self.db_path = path_from_root(root_path, db_path_replaced)
self.coin_subscriptions: Dict[bytes32, Set[bytes32]] = {}
self.ph_subscriptions: Dict[bytes32, Set[bytes32]] = {}
self.peer_coin_ids: Dict[bytes32, Set[bytes32]] = {}
self.peer_puzzle_hash: Dict[bytes32, Set[bytes32]] = {}
self.peer_sub_counter: Dict[bytes32, int] = {}
mkdir(self.db_path.parent)
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
async def _start(self):
self.timelord_lock = asyncio.Lock()
self.compact_vdf_sem = asyncio.Semaphore(4)
self.new_peak_sem = asyncio.Semaphore(8)
self.connection = await aiosqlite.connect(self.db_path)
await self.connection.execute("pragma journal_mode=wal")
await self.connection.execute("pragma synchronous=OFF")
if self.config.get("log_sqlite_cmds", False):
sql_log_path = path_from_root(self.root_path, "log/sql.log")
self.log.info(f"logging SQL commands to {sql_log_path}")
def sql_trace_callback(req: str):
timestamp = datetime.now().strftime("%H:%M:%S.%f")
log = open(sql_log_path, "a")
log.write(timestamp + " " + req + "\n")
log.close()
await self.connection.set_trace_callback(sql_trace_callback)
self.db_wrapper = DBWrapper(self.connection)
self.block_store = await BlockStore.create(self.db_wrapper)
self.sync_store = await SyncStore.create()
self.hint_store = await HintStore.create(self.db_wrapper)
self.coin_store = await CoinStore.create(self.db_wrapper)
self.log.info("Initializing blockchain from disk")
start_time = time.time()
self.blockchain = await Blockchain.create(self.coin_store, self.block_store, self.constants, self.hint_store)
self.mempool_manager = MempoolManager(self.coin_store, self.constants)
self.weight_proof_handler = None
self._init_weight_proof = asyncio.create_task(self.initialize_weight_proof())
if self.config.get("enable_profiler", False):
asyncio.create_task(profile_task(self.root_path, "node", self.log))
self._sync_task = None
self._segment_task = None
time_taken = time.time() - start_time
if self.blockchain.get_peak() is None:
self.log.info(f"Initialized with empty blockchain time taken: {int(time_taken)}s")
else:
self.log.info(
f"Blockchain initialized to peak {self.blockchain.get_peak().header_hash} height"
f" {self.blockchain.get_peak().height}, "
f"time taken: {int(time_taken)}s"
)
pending_tx = await self.mempool_manager.new_peak(self.blockchain.get_peak())
assert len(pending_tx) == 0
peak: Optional[BlockRecord] = self.blockchain.get_peak()
if peak is not None:
full_peak = await self.blockchain.get_full_peak()
await self.peak_post_processing(full_peak, peak, max(peak.height - 1, 0), None, ([], {}))
if self.config["send_uncompact_interval"] != 0:
sanitize_weight_proof_only = False
if "sanitize_weight_proof_only" in self.config:
sanitize_weight_proof_only = self.config["sanitize_weight_proof_only"]
assert self.config["target_uncompact_proofs"] != 0
self.uncompact_task = asyncio.create_task(
self.broadcast_uncompact_blocks(
self.config["send_uncompact_interval"],
self.config["target_uncompact_proofs"],
sanitize_weight_proof_only,
)
)
self.initialized = True
if self.full_node_peers is not None:
asyncio.create_task(self.full_node_peers.start())
async def initialize_weight_proof(self):
self.weight_proof_handler = WeightProofHandler(self.constants, self.blockchain)
peak = self.blockchain.get_peak()
if peak is not None:
await self.weight_proof_handler.create_sub_epoch_segments()
def set_server(self, server: AppleServer):
self.server = server
dns_servers = []
try:
network_name = self.config["selected_network"]
default_port = self.config["network_overrides"]["config"][network_name]["default_full_node_port"]
except Exception:
self.log.info("Default port field not found in config.")
default_port = None
if "dns_servers" in self.config:
dns_servers = self.config["dns_servers"]
elif self.config["port"] == 26666:
dns_servers.append("dns.applecoin.in")
try:
self.full_node_peers = FullNodePeers(
self.server,
self.root_path,
self.config["target_peer_count"] - self.config["target_outbound_peer_count"],
self.config["target_outbound_peer_count"],
self.config["peer_db_path"],
self.config["introducer_peer"],
dns_servers,
self.config["peer_connect_interval"],
self.config["selected_network"],
default_port,
self.log,
)
except Exception as e:
error_stack = traceback.format_exc()
self.log.error(f"Exception: {e}")
self.log.error(f"Exception in peer discovery: {e}")
self.log.error(f"Exception Stack: {error_stack}")
def _state_changed(self, change: str):
if self.state_changed_callback is not None:
self.state_changed_callback(change)
async def short_sync_batch(self, peer: ws.WSAppleConnection, start_height: uint32, target_height: uint32) -> bool:
if (
peer.peer_node_id in self.sync_store.backtrack_syncing
and self.sync_store.backtrack_syncing[peer.peer_node_id] > 0
):
return True
if peer.peer_node_id in self.sync_store.batch_syncing:
return True
self.sync_store.batch_syncing.add(peer.peer_node_id)
self.log.info(f"Starting batch short sync from {start_height} to height {target_height}")
if start_height > 0:
first = await peer.request_block(full_node_protocol.RequestBlock(uint32(start_height), False))
if first is None or not isinstance(first, full_node_protocol.RespondBlock):
self.sync_store.batch_syncing.remove(peer.peer_node_id)
raise ValueError(f"Error short batch syncing, could not fetch block at height {start_height}")
if not self.blockchain.contains_block(first.block.prev_header_hash):
self.log.info("Batch syncing stopped, this is a deep chain")
self.sync_store.batch_syncing.remove(peer.peer_node_id)
return False
batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS
if self._segment_task is not None and (not self._segment_task.done()):
try:
self._segment_task.cancel()
except Exception as e:
self.log.warning(f"failed to cancel segment task {e}")
self._segment_task = None
try:
for height in range(start_height, target_height, batch_size):
end_height = min(target_height, height + batch_size)
request = RequestBlocks(uint32(height), uint32(end_height), True)
response = await peer.request_blocks(request)
if not response:
raise ValueError(f"Error short batch syncing, invalid/no response for {height}-{end_height}")
async with self.blockchain.lock:
success, advanced_peak, fork_height, coin_changes = await self.receive_block_batch(
response.blocks, peer, None
)
if not success:
raise ValueError(f"Error short batch syncing, failed to validate blocks {height}-{end_height}")
if advanced_peak:
peak = self.blockchain.get_peak()
peak_fb: Optional[FullBlock] = await self.blockchain.get_full_peak()
assert peak is not None and peak_fb is not None and fork_height is not None
await self.peak_post_processing(peak_fb, peak, fork_height, peer, coin_changes)
self.log.info(f"Added blocks {height}-{end_height}")
except Exception:
self.sync_store.batch_syncing.remove(peer.peer_node_id)
raise
self.sync_store.batch_syncing.remove(peer.peer_node_id)
return True
|
Apache License 2.0
|
jcmgray/cotengra
|
cotengra/core.py
|
ContractionTree.get_subtree
|
python
|
def get_subtree(self, node, size, search='bfs'):
branches = []
real_leaves = []
queue = [node]
while (len(queue) + len(real_leaves) < size) and queue:
if search == 'bfs':
p = queue.pop(0)
elif search == 'dfs':
p = queue.pop(-1)
elif search == 'random':
p = queue.pop(random.randint(0, len(queue) - 1))
if len(p) == 1:
real_leaves.append(p)
continue
l, r = self.children[p]
queue.append(r)
queue.append(l)
branches.append(p)
sub_leaves = queue + real_leaves
return tuple(sub_leaves), tuple(branches)
|
Get a subtree spanning down from ``node`` which will have ``size``
leaves (themselves not necessarily leaves of the actual tree).
Parameters
----------
node : node
The node of the tree to start with.
size : int
How many subtree leaves to aim for.
search : {'bfs', 'dfs', 'random'}, optional
How to build the tree:
- 'bfs': breadth first expansion
- 'dfs': depth first expansion (largest nodes first)
- 'random': random expansion
Returns
-------
sub_leaves : tuple[node]
Nodes which are subtree leaves.
branches : tuple[node]
Nodes which are between the subtree leaves and root.
|
https://github.com/jcmgray/cotengra/blob/9fa1ebb046009b83e874be1213d3c72817e36f76/cotengra/core.py#L930-L988
|
import re
import math
import random
import warnings
import operator
import itertools
import functools
import collections
from string import ascii_letters
from opt_einsum.helpers import compute_size_by_dict, flop_count
from opt_einsum.paths import get_path_fn, DynamicProgramming, linear_to_ssa
from autoray import do
from .utils import (
MaxCounter,
BitSet,
node_from_seq,
node_from_single,
node_supremum,
node_get_single_el,
is_valid_node,
oset,
groupby,
interleave,
unique,
prod,
dynary,
)
from .parallel import (
parse_parallel_arg,
maybe_leave_pool,
maybe_rejoin_pool,
submit,
)
from .plot import (
plot_tree_ring,
plot_tree_tent,
plot_tree_span,
plot_tree_rubberband,
plot_contractions,
plot_contractions_alt,
plot_hypergraph,
)
try:
from opt_einsum.paths import DEFAULT_COMBO_FACTOR
except ImportError:
DEFAULT_COMBO_FACTOR = 64
try:
from cotengra.cotengra import HyperGraph as HyperGraphRust
except ImportError:
HyperGraphRust = None
def cached_node_property(name):
def wrapper(meth):
@functools.wraps(meth)
def getter(self, node):
try:
return self.info[node][name]
except KeyError:
self.info[node][name] = value = meth(self, node)
return value
return getter
return wrapper
def union_it(bs):
b0, *bs = bs
return b0.union(*bs)
def get_with_default(k, obj, default):
return obj.get(k, default)
class ContractionTree:
def __init__(
self, inputs, output, size_dict,
track_childless=False,
track_flops=False,
track_write=False,
track_size=False,
):
self.inputs = inputs
self.output = output
self.size_dict = size_dict
self.N = len(self.inputs)
self.bitset_edges = BitSet(size_dict.keys())
self.inputs_legs = list(map(self.bitset_edges, self.inputs))
self.output_legs = self.bitset_edges(self.output)
self.children = {}
self.info = {}
self.root = node_supremum(self.N)
self.info[self.root] = {
'legs': self.output_legs,
'keep': self.output_legs,
'size': compute_size_by_dict(self.output, size_dict),
}
self.track_childless = track_childless
if self.track_childless:
self.childless = {self.root}
self._track_flops = track_flops
if track_flops:
self._flops = 0
self._track_write = track_write
if track_write:
self._write = 0
self._track_size = track_size
if track_size:
self._sizes = MaxCounter()
self.already_optimized = dict()
self.multiplicity = 1
self.sliced_inds = self.sliced_sizes = ()
self.sliced_inputs = frozenset()
self.contraction_cores = {}
def set_state_from(self, other):
for attr in ('inputs', 'output', 'size_dict', 'N', 'root',
'multiplicity', 'sliced_inds', 'sliced_sizes',
'sliced_inputs', 'bitset_edges'):
setattr(self, attr, getattr(other, attr))
for attr in ('children', 'inputs_legs',
'output_legs', 'contraction_cores'):
setattr(self, attr, getattr(other, attr).copy())
for attr in ('info', 'already_optimized'):
setattr(self, attr,
{k: v.copy() for k, v in getattr(other, attr).items()})
self.track_childless = other.track_childless
if other.track_childless:
self.childless = other.childless.copy()
self._track_flops = other._track_flops
if other._track_flops:
self._flops = other._flops
self._track_write = other._track_write
if other._track_write:
self._write = other._write
self._track_size = other._track_size
if other._track_size:
self._sizes = other._sizes.copy()
def copy(self):
tree = object.__new__(ContractionTree)
tree.set_state_from(self)
return tree
@property
def nslices(self):
return self.multiplicity
@property
def nchunks(self):
return prod(
d for ix, d in zip(self.sliced_inds, self.sliced_sizes)
if ix in self.output
)
def node_to_terms(self, node):
return map(self.inputs_legs.__getitem__, node)
def gen_leaves(self):
return map(node_from_single, range(self.N))
@classmethod
def from_path(cls, inputs, output, size_dict, *,
path=None, ssa_path=None, check=False, **kwargs):
if int(path is None) + int(ssa_path is None) != 1:
raise ValueError("Exactly one of ``path`` or ``ssa_path`` must be "
"supplied.")
if ssa_path is not None:
path = ssa_path
tree = cls(inputs, output, size_dict, **kwargs)
terms = list(tree.gen_leaves())
for p in path:
if ssa_path is not None:
merge = [terms[i] for i in p]
else:
merge = [terms.pop(i) for i in sorted(p, reverse=True)]
terms.append(tree.contract_nodes(merge, check=check))
return tree
@classmethod
def from_info(cls, info, **kwargs):
return cls.from_path(inputs=info.input_subscripts.split(','),
output=info.output_subscript,
size_dict=info.size_dict,
path=info.path, **kwargs)
@classmethod
def from_eq(cls, eq, size_dict, **kwargs):
lhs, output = eq.split('->')
inputs = lhs.split(',')
return cls(inputs, output, size_dict, **kwargs)
@classmethod
def from_edge_path(cls, edge_path, inputs, output, size_dict,
check=False, **kwargs):
tree = cls(inputs, output, size_dict, **kwargs)
nodes = list(tree.gen_leaves())
for e in edge_path:
new_terms, merge = [], []
for node in nodes:
term = union_it(tree.node_to_terms(node))
if e in term:
merge.append(node)
else:
new_terms.append(node)
if merge:
nodes = new_terms + [tree.contract_nodes(merge, check=check)]
nt = len(nodes)
if nt > 1:
warnings.warn(
f"Ended up with {nt} nodes - contracting all remaining.")
tree.contract_nodes(nodes, check=check)
return tree
def _add_node(self, node, check=False):
if check:
if len(self.info) > 2 * self.N - 1:
raise ValueError("There are too many children already.")
if len(self.children) > self.N - 1:
raise ValueError("There are too many branches already.")
if not is_valid_node(node):
raise ValueError("{} is not a valid node.".format(node))
self.info.setdefault(node, dict())
def _remove_node(self, node):
if self._track_flops:
self._flops -= self.get_flops(node)
if self._track_write:
self._write -= self.get_size(node)
if self._track_size:
self._sizes.discard(self.get_size(node))
del self.info[node]
del self.children[node]
@cached_node_property('keep')
def get_keep(self, node):
nodes_above = self.root.difference(node)
terms_above = self.node_to_terms(nodes_above)
return union_it((self.output_legs, *terms_above))
@cached_node_property('legs')
def get_legs(self, node):
if len(node) == 1:
return self.inputs_legs[node_get_single_el(node)]
try:
involved = self.get_involved(node)
except KeyError:
involved = union_it(self.node_to_terms(node))
keep = self.get_keep(node)
return involved.intersection(keep)
@cached_node_property('involved')
def get_involved(self, node):
if len(node) == 1:
return self.bitset_edges.infimum
sub_legs = map(self.get_legs, self.children[node])
return union_it(sub_legs)
@cached_node_property('removed')
def get_removed(self, node):
return self.get_involved(node).difference(self.get_legs(node))
@cached_node_property('size')
def get_size(self, node):
return compute_size_by_dict(self.get_legs(node), self.size_dict)
@cached_node_property('flops')
def get_flops(self, node):
if len(node) == 1:
return 0
involved = self.get_involved(node)
removed = self.get_removed(node)
return flop_count(involved, removed, 2, self.size_dict)
@cached_node_property('can_dot')
def get_can_dot(self, node):
l, r = self.children[node]
sp, sl, sr = map(self.get_legs, (node, l, r))
return sl.symmetric_difference(sr) == sp
@cached_node_property('inds')
def get_inds(self, node):
if len(node) == 1:
i = node_get_single_el(node)
if not self.sliced_inds:
return "".join(self.inputs[i])
legs = self.get_legs(node)
return "".join(filter(legs.__contains__, self.inputs[i]))
if len(node) == self.N:
if not self.sliced_inds:
return "".join(self.output)
return "".join(filter(self.output_legs.__contains__, self.output))
legs = self.get_legs(node)
l_inds, r_inds = map(self.get_inds, self.children[node])
return "".join(
unique(filter(legs.__contains__, itertools.chain(l_inds, r_inds)))
)
@cached_node_property('tensordot_axes')
def get_tensordot_axes(self, node):
l_inds, r_inds = map(self.get_inds, self.children[node])
l_axes, r_axes = [], []
for i, ind in enumerate(l_inds):
j = r_inds.find(ind)
if j != -1:
l_axes.append(i)
r_axes.append(j)
return tuple(l_axes), tuple(r_axes)
@cached_node_property('tensordot_perm')
def get_tensordot_perm(self, node):
l_inds, r_inds = map(self.get_inds, self.children[node])
p_inds = self.get_inds(node)
td_inds = "".join(sorted(p_inds, key=f"{l_inds}{r_inds}".find))
if td_inds == p_inds:
return None
return tuple(map(td_inds.find, p_inds))
@cached_node_property('einsum_eq')
def get_einsum_eq(self, node):
l, r = self.children[node]
l_inds, r_inds, p_inds = map(self.get_inds, (l, r, node))
char_mapping = {
ord(ix): ascii_letters[i] for i, ix in
enumerate(unique(itertools.chain(l_inds, r_inds)))
}
return f"{l_inds},{r_inds}->{p_inds}".translate(char_mapping)
def get_centrality(self, node):
try:
return self.info[node]['centrality']
except KeyError:
self.compute_centralities()
return self.info[node]['centrality']
def total_flops(self, dtype='float'):
if self._track_flops:
real_flops = self.multiplicity * self._flops
else:
self._flops = 0
for node, _, _ in self.traverse():
self._flops += self.get_flops(node)
self._track_flops = True
real_flops = self.multiplicity * self._flops
if dtype is None:
return real_flops // 2
if 'float' in dtype:
return real_flops
if 'complex' in dtype:
return real_flops * 4
def total_write(self):
if self._track_write:
return self.multiplicity * self._write
self._write = 0
for node, _, _ in self.traverse():
self._write += self.get_size(node)
self._track_write = True
return self.multiplicity * self._write
def total_cost(self, factor=DEFAULT_COMBO_FACTOR, combine=sum):
t = 0
for p in self.children:
f = self.get_flops(p) // 2
w = self.get_size(p)
t += combine((f, factor * w))
return self.multiplicity * t
def max_size(self):
if self._track_size:
return self._sizes.max()
self._sizes = MaxCounter()
for node, _, _ in self.traverse():
self._sizes.add(self.get_size(node))
self._track_size = True
return self._sizes.max()
def peak_size(self, order=None):
tot_size = sum(self.get_size(node) for node in self.gen_leaves())
peak = tot_size
for p, l, r in self.traverse(order=order):
tot_size -= self.get_size(l)
tot_size -= self.get_size(r)
tot_size += self.get_size(p)
peak = max(peak, tot_size)
return peak
def total_size(self):
tot_size = sum(self.get_size(node) for node in self.gen_leaves())
for node, _, _ in self.traverse():
tot_size += self.get_size(node)
return tot_size
def arithmetic_intensity(self):
return self.total_flops() / self.total_write()
def contraction_cost(self):
return float(self.total_flops(dtype=None))
def contraction_width(self):
return math.log2(self.max_size())
def compressed_contract_stats(
self,
chi,
order='surface_order',
compress_late=True,
):
hg = self.get_hypergraph(accel='auto')
tree_map = dict(zip(self.gen_leaves(), range(hg.get_num_nodes())))
max_size = 0
current_size = 0
for i in range(hg.get_num_nodes()):
s = hg.node_size(i)
max_size = max(max_size, s)
current_size += s
total_size = peak_size = current_size
for p, l, r in self.traverse(order):
li = tree_map[l]
ri = tree_map[r]
current_size -= hg.neighborhood_size((li, ri))
if compress_late:
hg.compress(chi=chi, edges=hg.get_node(li))
hg.compress(chi=chi, edges=hg.get_node(ri))
pi = tree_map[p] = hg.contract(li, ri)
pi_size = hg.node_size(pi)
max_size = max(max_size, pi_size)
current_size += hg.neighborhood_size((pi,))
peak_size = max(peak_size, current_size)
total_size += pi_size
if not compress_late:
hg.compress(chi=chi, edges=hg.get_node(pi))
return {
'max_size': max_size,
'total_size': total_size,
'peak_size': peak_size,
}
def max_size_compressed(self, chi, order='surface_order',
compress_late=True):
return self.compressed_contract_stats(
chi=chi,
order=order,
compress_late=compress_late,
)['max_size']
def peak_size_compressed(self, chi, order='surface_order',
compress_late=True, accel='auto'):
return self.compressed_contract_stats(
chi=chi,
order=order,
compress_late=compress_late,
)['peak_size']
def total_size_compressed(self, chi, order='surface_order',
compress_late=True, accel='auto'):
return self.compressed_contract_stats(
chi=chi,
order=order,
compress_late=compress_late,
)['total_size']
def contract_nodes_pair(self, x, y, check=False):
parent = x.union(y)
for node in (x, y, parent):
self._add_node(node, check=check)
nx, ny = len(x), len(y)
hx, hy = hash(x), hash(y)
if (nx, hx) > (ny, hy):
lr = (x, y)
else:
lr = (y, x)
self.children[parent] = lr
if self.track_childless:
self.childless.discard(parent)
if x not in self.children and nx > 1:
self.childless.add(x)
if y not in self.children and ny > 1:
self.childless.add(y)
if self._track_flops:
self._flops += self.get_flops(parent)
if self._track_write:
self._write += self.get_size(parent)
if self._track_size:
self._sizes.add(self.get_size(parent))
return parent
def contract_nodes(self, nodes, optimize='auto-hq', check=False):
if len(nodes) == 1:
return next(iter(nodes))
if len(nodes) == 2:
return self.contract_nodes_pair(*nodes, check=check)
grandparent = union_it(nodes)
self._add_node(grandparent, check=check)
for node in nodes:
self._add_node(node, check=check)
path_inputs = [oset(self.get_legs(x)) for x in nodes]
path_output = oset(self.get_legs(grandparent))
if isinstance(optimize, str):
path_fn = get_path_fn(optimize)
else:
path_fn = optimize
path = path_fn(path_inputs, path_output, self.size_dict)
temp_nodes = list(nodes)
for p in path:
to_contract = [
temp_nodes.pop(i) for i in sorted(p, reverse=True)
]
temp_nodes.append(
self.contract_nodes(
to_contract, optimize=optimize, check=check
)
)
parent, = temp_nodes
if check:
assert parent == grandparent
return parent
def is_complete(self):
too_many_nodes = len(self.info) > 2 * self.N - 1
too_many_branches = len(self.children) > self.N - 1
if too_many_nodes or too_many_branches:
raise ValueError("Contraction tree seems to be over complete!")
queue = [self.root]
while queue:
x = queue.pop()
if len(x) == 1:
continue
try:
queue.extend(self.children[x])
except KeyError:
return False
return True
def get_default_order(self):
return "dfs"
def _traverse_ordered(self, order):
from bisect import bisect
if order == 'surface_order':
order = self.surface_order
seen = set()
queue = [self.root]
scores = [order(self.root)]
while len(seen) != len(self.children):
i = 0
while i < len(queue):
node = queue[i]
if node not in seen:
for child in self.children[node]:
if len(child) > 1:
score = order(child)
ci = bisect(scores[:i], score)
scores.insert(ci, score)
queue.insert(ci, child)
i += 1
seen.add(node)
i += 1
for node in queue:
yield (node, *self.children[node])
def traverse(self, order=None):
if order is None:
order = self.get_default_order()
if order != "dfs":
yield from self._traverse_ordered(order=order)
return
ready = set(self.gen_leaves())
queue = [self.root]
while queue:
node = queue[-1]
l, r = self.children[node]
if (l in ready) and (r in ready):
ready.add(queue.pop())
yield node, l, r
continue
if r not in ready:
queue.append(r)
if l not in ready:
queue.append(l)
def descend(self, mode='dfs'):
queue = [self.root]
while queue:
if mode == 'dfs':
parent = queue.pop(-1)
elif mode == 'bfs':
parent = queue.pop(0)
l, r = self.children[parent]
yield parent, l, r
if len(l) > 1:
queue.append(l)
if len(r) > 1:
queue.append(r)
|
Apache License 2.0
|
cvignac/smp
|
models/utils/layers.py
|
NodeExtractor.forward
|
python
|
def forward(self, x: Tensor, u: Tensor, batch_info: dict):
new_u = self.lin2_u(torch.relu(self.lin1_u(u)))
index_tensor = batch_info['coloring'][:, :, None].expand(u.shape[0], 1, u.shape[-1])
x1 = torch.gather(new_u, 1, index_tensor)
x1 = x1[:, 0, :]
x2 = torch.sum(new_u / batch_info['n_batch'], dim=1)
x3 = torch.max(new_u, dim=1)[0]
x_full = torch.cat((x1, x2, x3), dim=1)
out = self.combine1(x_full)
return out
|
u: (num_nodes, num_nodes, in_features).
output: (num_nodes, out_feat).
this method can probably be made more efficient.
|
https://github.com/cvignac/smp/blob/95b55a880d0fc9149ddf32e8c2fdf5eac5b474b3/models/utils/layers.py#L193-L212
|
import torch
import torch.nn as nn
from torch import Tensor as Tensor
from torch.nn import Linear as Linear
import torch.nn.init as init
from torch.nn.init import _calculate_correct_fan, calculate_gain
import torch.nn.functional as F
from torch_geometric.nn import global_mean_pool, global_add_pool, global_max_pool, MessagePassing
import math
small_gain = 0.01
def pooling(x: torch.Tensor, batch_info, method):
if method == 'add':
return global_add_pool(x, batch_info['batch'], batch_info['num_graphs'])
elif method == 'mean':
return global_mean_pool(x, batch_info['batch'], batch_info['num_graphs'])
elif method == 'max':
return global_max_pool(x, batch_info['batch'], batch_info['num_graphs'])
else:
raise ValueError("Pooling method not implemented")
def kaiming_init_with_gain(x: Tensor, gain: float, a=0, mode='fan_in', nonlinearity='relu'):
fan = _calculate_correct_fan(x, mode)
non_linearity_gain = calculate_gain(nonlinearity, a)
std = non_linearity_gain / math.sqrt(fan)
bound = math.sqrt(3.0) * std * gain
with torch.no_grad():
return x.uniform_(-bound, bound)
class BatchNorm(nn.Module):
def __init__(self, channels: int, use_x: bool):
super().__init__()
self.bn = nn.BatchNorm1d(channels)
self.use_x = use_x
def reset_parameters(self):
self.bn.reset_parameters()
def forward(self, u):
if self.use_x:
return self.bn(u)
else:
return self.bn(u.transpose(1, 2)).transpose(1, 2)
class EdgeCounter(MessagePassing):
def __init__(self):
super().__init__(aggr='add')
def forward(self, x, edge_index, batch, batch_size):
n_edges = self.propagate(edge_index, size=(x.size(0), x.size(0)), x=x)
return global_mean_pool(n_edges, batch, batch_size)[batch]
class Linear(nn.Module):
__constants__ = ['bias', 'in_features', 'out_features']
def __init__(self, in_features, out_features, bias=True, gain: float = 1.0):
super().__init__()
self.gain = gain
self.lin = nn.Linear(in_features, out_features, bias)
def reset_parameters(self):
kaiming_init_with_gain(self.lin.weight, self.gain)
if self.lin.bias is not None:
nn.init.normal_(self.lin.bias, 0, self.gain / math.sqrt(self.lin.out_features))
def forward(self, x):
return self.lin.forward(x)
class XtoX(Linear):
def forward(self, x, batch_info: dict = None):
return self.lin.forward(x)
class XtoGlobal(Linear):
def forward(self, x: Tensor, batch_info: dict, method='mean'):
g = pooling(x, batch_info, method)
return self.lin.forward(g)
class EntrywiseU(nn.Module):
def __init__(self, in_features: int, out_features: int, num_towers=None):
super().__init__()
if num_towers is None:
num_towers = in_features
self.lin1 = torch.nn.Conv1d(in_features, out_features, kernel_size=1, groups=num_towers, bias=False)
def forward(self, u):
u = u.transpose(1, 2)
u = self.lin1(u)
return u.transpose(1, 2)
class EntryWiseX(nn.Module):
def __init__(self, in_features: int, out_features: int, n_groups=None, residual=False):
super().__init__()
self.residual = residual
if n_groups is None:
n_groups = in_features
self.lin1 = torch.nn.Conv1d(in_features, out_features, kernel_size=1, groups=n_groups, bias=False)
def forward(self, x, batch_info=None):
new_x = self.lin1(x.unsqueeze(-1)).squeeze()
return (new_x + x) if self.residual else new_x
class UtoU(nn.Module):
def __init__(self, in_features: int, out_features: int, residual=True, n_groups=None):
super().__init__()
if n_groups is None:
n_groups = 1
self.residual = residual
self.lin1 = torch.nn.Conv1d(in_features, out_features, kernel_size=1, groups=n_groups, bias=True)
self.lin2 = torch.nn.Conv1d(in_features, out_features, kernel_size=1, groups=n_groups, bias=False)
self.lin3 = torch.nn.Conv1d(in_features, out_features, kernel_size=1, groups=n_groups, bias=False)
def forward(self, u: Tensor, batch_info: dict = None):
old_u = u
n = batch_info['num_nodes']
num_colors = u.shape[1]
out_feat = self.lin1.out_channels
mask = batch_info['mask'][..., None].expand(n, num_colors, out_feat)
normalizer = batch_info['n_batch']
mean2 = torch.sum(u / normalizer, dim=1)
mean2 = mean2.unsqueeze(-1)
u = u.permute(0, 2, 1)
out = self.lin1(u).permute(0, 2, 1)
z2 = self.lin2(mean2) * 0.1
z2 = z2.transpose(1, 2)
index_tensor = batch_info['coloring'][:, :, None].expand(out.shape[0], 1, out_feat)
out.scatter_add_(1, index_tensor, z2)
z3 = self.lin3(mean2)
z3 = z3.transpose(1, 2)
out3 = z3.expand(n, num_colors, out_feat)
out += out3 * mask * 0.1
if self.residual:
return old_u + out
return out
class UtoGlobal(nn.Module):
def __init__(self, in_features: int , out_features: int, bias: bool, gain: float):
super().__init__()
self.lin1 = Linear(in_features, out_features, bias, gain=gain)
self.lin2 = Linear(in_features, out_features, bias, gain=gain)
def reset_parameters(self):
for layer in [self.lin1, self.lin2]:
layer.reset_parameters()
def forward(self, u, batch_info: dict, method='mean'):
coloring = batch_info['coloring']
index_tensor = coloring[:, :, None].expand(u.shape[0], 1, u.shape[2])
extended_diag = u.gather(1, index_tensor)[:, 0, :]
mean_batch_trace = pooling(extended_diag, batch_info, 'mean')
out1 = self.lin1(mean_batch_trace)
mean = torch.sum(u / batch_info['n_batch'], dim=1)
batch_sum = pooling(mean, batch_info, 'mean')
batch_sum = batch_sum - mean_batch_trace
out2 = self.lin2(batch_sum)
return out1 + out2
class NodeExtractor(nn.Module):
def __init__(self, in_features_u: int, out_features_u: int):
super().__init__()
self.lin1_u = nn.Linear(in_features_u, in_features_u)
self.lin2_u = nn.Linear(in_features_u, in_features_u)
self.combine1 = nn.Linear(3 * in_features_u, out_features_u)
|
MIT License
|
atreichel/numerapi
|
numerapi/numerapi.py
|
NumerAPI.__unzip_file
|
python
|
def __unzip_file(self, src_path, dest_path, filename):
self.logger.info("unzipping file...")
unzip_path = "{0}/{1}".format(dest_path, filename)
try:
os.makedirs(unzip_path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
with zipfile.ZipFile(src_path, "r") as z:
z.extractall(unzip_path)
return True
|
unzips file located at src_path into destination_path
|
https://github.com/atreichel/numerapi/blob/5de3d61ab34233c7ef21e021dc2db3b52665543e/numerapi/numerapi.py#L92-L110
|
import zipfile
import json
import os
from datetime import datetime, timedelta
import getpass
import errno
import logging
import requests
import numpy as np
class NumerAPI(object):
def __init__(self, verbosity="INFO"):
self.logger = logging.getLogger(__name__)
numeric_log_level = getattr(logging, verbosity.upper())
if not isinstance(numeric_log_level, int):
raise ValueError('invalid verbosity: %s' % verbosity)
log_format = "%(asctime)s %(levelname)s %(name)s: %(message)s"
self._date_format = "%Y-%m-%dT%H:%M:%S"
logging.basicConfig(format=log_format, level=numeric_log_level,
datefmt=self._date_format)
self.api_base_url = "https://api.numer.ai"
self._FIRST_ROUND = 51
not_logged_in_msg = "username not specified and not logged in"
self._not_logged_in_error = ValueError(not_logged_in_msg)
self._username = None
self._access_token = None
self.url_paths = None
def __get_url(self, url_path_name, query_params=None):
self.url_paths = {
"login": "/sessions",
"auth": "/submission_authorizations",
"dataset": "/competitions/current/dataset",
"submissions": "/submissions",
"users": "/users",
"competitions": "/competitions",
"competitions_by_id": "/competitions/id",
"current_leaderboard_url": "/currentLeaderboard"
}
if query_params is None:
query_params_str = ""
elif isinstance(query_params, dict):
query_params_str = "?" + json.dumps(query_params)
elif isinstance(query_params, str):
query_params_str = "?" + query_params
else:
self.logger.warning("invalid query params")
query_params = ""
return (self.api_base_url +
self.url_paths[url_path_name] +
query_params_str)
def __get_username(self, username):
if username is None:
if hasattr(self, "_username"):
username = self._username
else:
raise self._not_Logged_in_error
return username
|
MIT License
|
purestorage-openconnect/py-pure-client
|
pypureclient/flasharray/FA_2_5/models/snmp_managers.py
|
SNMPManagers.__init__
|
python
|
def __init__(
self,
name=None,
host=None,
notification=None,
v2c=None,
v3=None,
version=None,
):
if name is not None:
self.name = name
if host is not None:
self.host = host
if notification is not None:
self.notification = notification
if v2c is not None:
self.v2c = v2c
if v3 is not None:
self.v3 = v3
if version is not None:
self.version = version
|
Keyword args:
name (str): A user-specified name. The name must be locally unique and can be changed.
host (str): DNS hostname or IP address of a computer that hosts an SNMP manager to which Purity//FA is to send trap messages when it generates alerts.
notification (str): The type of notification the agent will send. Valid values are `inform` and `trap`.
v2c (SnmpV2c)
v3 (SnmpV3)
version (str): Version of the SNMP protocol to be used by Purity//FA to communicate with the specified manager. Valid values are `v2c` and `v3`.
|
https://github.com/purestorage-openconnect/py-pure-client/blob/2d9fdef0b73321cea9613e7d1eb881b42845099b/pypureclient/flasharray/FA_2_5/models/snmp_managers.py#L53-L82
|
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_5 import models
class SNMPManagers(object):
swagger_types = {
'name': 'str',
'host': 'str',
'notification': 'str',
'v2c': 'SnmpV2c',
'v3': 'SnmpV3',
'version': 'str'
}
attribute_map = {
'name': 'name',
'host': 'host',
'notification': 'notification',
'v2c': 'v2c',
'v3': 'v3',
'version': 'version'
}
required_args = {
}
|
BSD 2-Clause Simplified License
|
netflix/dispatch
|
src/dispatch/project/views.py
|
create_project
|
python
|
def create_project(*, db_session: Session = Depends(get_db), project_in: ProjectCreate):
project = get_by_name(db_session=db_session, name=project_in.name)
if project:
raise ValidationError(
[ErrorWrapper(ExistsError(msg="A project with this name already exists."), loc="name")],
model=ProjectCreate,
)
project = create(db_session=db_session, project_in=project_in)
return project
|
Create a new project.
|
https://github.com/netflix/dispatch/blob/e30705938e970d8ef0dfdd04246a3f3004a6a44f/src/dispatch/project/views.py#L40-L50
|
from fastapi import APIRouter, Depends, HTTPException, status
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from sqlalchemy.orm import Session
from dispatch.exceptions import ExistsError
from dispatch.auth.permissions import (
ProjectCreatePermission,
PermissionsDependency,
ProjectUpdatePermission,
)
from dispatch.database.core import get_db
from dispatch.database.service import common_parameters, search_filter_sort_paginate
from dispatch.models import PrimaryKey
from .models import (
ProjectCreate,
ProjectRead,
ProjectUpdate,
ProjectPagination,
)
from .service import create, delete, get, get_by_name, update
router = APIRouter()
@router.get("", response_model=ProjectPagination)
def get_projects(common: dict = Depends(common_parameters)):
return search_filter_sort_paginate(model="Project", **common)
@router.post(
"",
response_model=ProjectRead,
summary="Create a new project.",
dependencies=[Depends(PermissionsDependency([ProjectCreatePermission]))],
)
|
Apache License 2.0
|
openstack/zaqar
|
zaqar/storage/mongodb/messages.py
|
MessageController._inc_counter
|
python
|
def _inc_counter(self, queue_name, project=None, amount=1, window=None):
if hasattr(self._queue_ctrl, '_inc_counter'):
return self._queue_ctrl._inc_counter(queue_name, project,
amount, window)
now = timeutils.utcnow_ts()
update = {'$inc': {'c.v': amount}, '$set': {'c.t': now}}
query = _get_scoped_query(queue_name, project)
if window is not None:
threshold = now - window
query['c.t'] = {'$lt': threshold}
while True:
try:
collection = self._collection(queue_name, project).stats
doc = collection.find_one_and_update(
query, update,
return_document=pymongo.ReturnDocument.AFTER,
projection={'c.v': 1, '_id': 0})
break
except pymongo.errors.AutoReconnect:
LOG.exception('Auto reconnect error')
if doc is None:
if window is None:
message = (u'Failed to increment the message '
u'counter for queue %(name)s and '
u'project %(project)s')
message %= dict(name=queue_name, project=project)
LOG.warning(message)
raise errors.QueueDoesNotExist(queue_name, project)
return None
return doc['c']['v']
|
Increments the message counter and returns the new value.
:param queue_name: Name of the queue to which the counter is scoped
:param project: Queue's project name
:param amount: (Default 1) Amount by which to increment the counter
:param window: (Default None) A time window, in seconds, that
must have elapsed since the counter was last updated, in
order to increment the counter.
:returns: Updated message counter value, or None if window
was specified, and the counter has already been updated
within the specified time period.
:raises QueueDoesNotExist: if not found
|
https://github.com/openstack/zaqar/blob/5ec4277546e94e4dd2d1e3c5cde805debb5be1c8/zaqar/storage/mongodb/messages.py#L417-L482
|
import datetime
import time
from bson import objectid
from oslo_log import log as logging
from oslo_utils import timeutils
import pymongo.errors
import pymongo.read_preferences
from zaqar.i18n import _
from zaqar import storage
from zaqar.storage import errors
from zaqar.storage.mongodb import utils
from zaqar.storage import utils as s_utils
LOG = logging.getLogger(__name__)
MAX_RETRY_POST_DURATION = 45
COUNTER_STALL_WINDOW = 5
ID_INDEX_FIELDS = [('_id', 1)]
TTL_INDEX_FIELDS = [
('e', 1),
]
PROJ_QUEUE = utils.PROJ_QUEUE_KEY
ACTIVE_INDEX_FIELDS = [
(PROJ_QUEUE, 1),
('k', 1),
('c.e', 1),
]
COUNTING_INDEX_FIELDS = [
(PROJ_QUEUE, 1),
('c.e', 1),
]
CLAIMED_INDEX_FIELDS = [
(PROJ_QUEUE, 1),
('c.id', 1),
('k', 1),
('c.e', 1),
]
MARKER_INDEX_FIELDS = [
('k', 1),
(PROJ_QUEUE, 1),
]
TRANSACTION_INDEX_FIELDS = [
('tx', 1),
]
class MessageController(storage.Message):
def __init__(self, *args, **kwargs):
super(MessageController, self).__init__(*args, **kwargs)
self._num_partitions = self.driver.mongodb_conf.partitions
self._queue_ctrl = self.driver.queue_controller
self._retry_range = range(self.driver.mongodb_conf.max_attempts)
self._collections = [db.messages
for db in self.driver.message_databases]
for collection in self._collections:
self._ensure_indexes(collection)
def _ensure_indexes(self, collection):
collection.ensure_index(TTL_INDEX_FIELDS,
name='ttl',
expireAfterSeconds=0,
background=True)
collection.ensure_index(ACTIVE_INDEX_FIELDS,
name='active',
background=True)
collection.ensure_index(CLAIMED_INDEX_FIELDS,
name='claimed',
background=True)
collection.ensure_index(COUNTING_INDEX_FIELDS,
name='counting',
background=True)
collection.ensure_index(MARKER_INDEX_FIELDS,
name='queue_marker',
background=True)
collection.ensure_index(TRANSACTION_INDEX_FIELDS,
name='transaction',
background=True)
def _collection(self, queue_name, project=None):
return self._collections[utils.get_partition(self._num_partitions,
queue_name, project)]
def _backoff_sleep(self, attempt):
conf = self.driver.mongodb_conf
seconds = utils.calculate_backoff(attempt, conf.max_attempts,
conf.max_retry_sleep,
conf.max_retry_jitter)
time.sleep(seconds)
def _purge_queue(self, queue_name, project=None):
scope = utils.scope_queue_name(queue_name, project)
collection = self._collection(queue_name, project)
collection.delete_many({PROJ_QUEUE: scope})
def _list(self, queue_name, project=None, marker=None,
echo=False, client_uuid=None, projection=None,
include_claimed=False, include_delayed=False,
sort=1, limit=None):
if sort not in (1, -1):
raise ValueError(u'sort must be either 1 (ascending) '
u'or -1 (descending)')
now = timeutils.utcnow_ts()
query = {
PROJ_QUEUE: utils.scope_queue_name(queue_name, project),
'tx': None,
}
if not echo:
query['u'] = {'$ne': client_uuid}
if marker is not None:
query['k'] = {'$gt': marker}
collection = self._collection(queue_name, project)
if not include_claimed:
query['c.e'] = {'$lte': now}
if not include_delayed:
query['$or'] = [{'d': {'$lte': now}},
{'d': {'$exists': False}}]
cursor = collection.find(query,
projection=projection,
sort=[('k', sort)])
if limit is not None:
cursor.limit(limit)
return cursor.hint(ACTIVE_INDEX_FIELDS)
def _count(self, queue_name, project=None, include_claimed=False):
query = {
PROJ_QUEUE: utils.scope_queue_name(queue_name, project),
'tx': None,
}
if not include_claimed:
query['c.e'] = {'$lte': timeutils.utcnow_ts()}
collection = self._collection(queue_name, project)
return collection.count(filter=query, hint=COUNTING_INDEX_FIELDS)
def _active(self, queue_name, marker=None, echo=False,
client_uuid=None, projection=None, project=None,
limit=None, include_delayed=False):
return self._list(queue_name, project=project, marker=marker,
echo=echo, client_uuid=client_uuid,
projection=projection, include_claimed=False,
include_delayed=include_delayed, limit=limit)
def _claimed(self, queue_name, claim_id,
expires=None, limit=None, project=None):
if claim_id is None:
claim_id = {'$ne': None}
query = {
PROJ_QUEUE: utils.scope_queue_name(queue_name, project),
'c.id': claim_id,
'c.e': {'$gt': expires or timeutils.utcnow_ts()},
}
kwargs = {}
collection = self._collection(queue_name, project)
msgs = collection.find(query, sort=[('k', 1)], **kwargs).hint(
CLAIMED_INDEX_FIELDS)
if limit is not None:
msgs = msgs.limit(limit)
now = timeutils.utcnow_ts()
def denormalizer(msg):
doc = _basic_message(msg, now)
doc['claim'] = msg['c']
return doc
return utils.HookedCursor(msgs, denormalizer)
def _unclaim(self, queue_name, claim_id, project=None):
cid = utils.to_oid(claim_id)
if cid is None:
return
now = timeutils.utcnow_ts()
scope = utils.scope_queue_name(queue_name, project)
collection = self._collection(queue_name, project)
collection.update_many({PROJ_QUEUE: scope, 'c.id': cid},
{'$set': {'c': {'id': None, 'e': now}}},
upsert=False)
|
Apache License 2.0
|
the-academic-observatory/observatory-platform
|
observatory-platform/observatory/platform/terraform_api.py
|
TerraformApi.plan_variable_changes
|
python
|
def plan_variable_changes(
self, new_vars: List[TerraformVariable], workspace_id: str
) -> Tuple[
List[TerraformVariable],
List[Tuple[TerraformVariable, TerraformVariable]],
List[TerraformVariable],
List[TerraformVariable],
]:
add: List[TerraformVariable] = []
edit: List[Tuple[TerraformVariable, TerraformVariable]] = []
unchanged: List[TerraformVariable] = []
old_vars = self.list_workspace_variables(workspace_id)
old_var_ids = {}
for old_var in old_vars:
old_var_ids[old_var.key] = old_var
for new_var in new_vars:
if new_var.key in old_var_ids.keys():
old_var = old_var_ids[new_var.key]
if new_var.sensitive or new_var.value != old_var.value:
new_var.var_id = old_var.var_id
edit.append((old_var, new_var))
else:
unchanged.append(new_var)
else:
add.append(new_var)
delete = list(set(old_vars) - set(new_vars))
return add, edit, unchanged, delete
|
Compares the current variables in the workspace with a list of new variables. It sorts the new variables in
one of 4 different categories and adds them to the corresponding list. Sensitive variables can never be
'unchanged'.
:param new_vars: list of potential new variables where each variable is a variable attributes dict
:param workspace_id: the workspace id
:return: lists of variables in different categories (add, edit, unchanged, delete).
add: list of attributes dicts
edit: list of tuples with (attributes dict, var id, old value)
unchanged: list of attributes dicts
delete: list of tuples with (var key, var id, old value)
|
https://github.com/the-academic-observatory/observatory-platform/blob/4b7e80b92cc8c20d6b3356ded1b23e658ac5157f/observatory-platform/observatory/platform/terraform_api.py#L434-L486
|
from __future__ import annotations
import json
import logging
import os
from dataclasses import dataclass
from enum import Enum
from http import HTTPStatus
from typing import Tuple, List
import requests
class TerraformVariableCategory(Enum):
terraform = "terraform"
env = "env"
@dataclass
class TerraformVariable:
key: str
value: str
var_id: str = None
description: str = ""
category: TerraformVariableCategory = TerraformVariableCategory.terraform
hcl: bool = False
sensitive: bool = False
def __str__(self):
return self.key
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return self.key == other.key
@staticmethod
def from_dict(dict_) -> TerraformVariable:
var_id = dict_.get("id")
attributes = dict_["attributes"]
key = attributes.get("key")
value = attributes.get("value")
sensitive = attributes.get("sensitive")
category = attributes.get("category")
hcl = attributes.get("hcl")
description = attributes.get("description")
return TerraformVariable(
key,
value,
sensitive=sensitive,
category=TerraformVariableCategory(category),
hcl=hcl,
description=description,
var_id=var_id,
)
def to_dict(self):
var = {
"type": "vars",
"attributes": {
"key": self.key,
"value": self.value,
"description": self.description,
"category": self.category.value,
"hcl": self.hcl,
"sensitive": self.sensitive,
},
}
if self.var_id is not None:
var["id"] = self.var_id
return var
class TerraformApi:
TERRAFORM_WORKSPACE_VERSION = "0.13.5"
VERBOSITY_WARNING = 0
VERBOSITY_INFO = 1
VERBOSITY_DEBUG = 2
def __init__(self, token: str, verbosity: int = VERBOSITY_WARNING):
self.token = token
if verbosity == TerraformApi.VERBOSITY_WARNING:
logging.getLogger().setLevel(logging.WARNING)
elif verbosity == TerraformApi.VERBOSITY_INFO:
logging.getLogger().setLevel(logging.INFO)
elif verbosity >= TerraformApi.VERBOSITY_DEBUG:
logging.getLogger().setLevel(logging.DEBUG)
self.api_url = "https://app.terraform.io/api/v2"
self.headers = {"Content-Type": "application/vnd.api+json", "Authorization": f"Bearer {token}"}
@staticmethod
def token_from_file(file_path: str) -> str:
with open(file_path, "r") as file:
token = json.load(file)["credentials"]["app.terraform.io"]["token"]
return token
def create_workspace(
self,
organisation: str,
workspace: str,
auto_apply: bool,
description: str,
version: str = TERRAFORM_WORKSPACE_VERSION,
) -> int:
attributes = {
"name": workspace,
"auto-apply": str(auto_apply).lower(),
"description": description,
"terraform_version": version,
}
data = {"data": {"type": "workspaces", "attributes": attributes}}
response = requests.post(
f"{self.api_url}/organizations/{organisation}/workspaces", headers=self.headers, json=data
)
if response.status_code == HTTPStatus.CREATED:
logging.info(f"Created workspace {workspace}")
logging.debug(f"response: {response.text}")
elif response.status_code == HTTPStatus.UNPROCESSABLE_ENTITY:
logging.warning(f"Workspace with name {workspace} already exists")
logging.debug(f"response: {response.text}")
else:
logging.error(f"Response status: {response.status_code}")
logging.error(f"Unsuccessful creating workspace, response: {response.text}")
exit(os.EX_CONFIG)
return response.status_code
def delete_workspace(self, organisation: str, workspace: str) -> int:
response = requests.delete(
f"{self.api_url}/organizations/{organisation}/workspaces/{workspace}", headers=self.headers
)
return response.status_code
def workspace_id(self, organisation: str, workspace: str) -> str:
response = requests.get(
f"{self.api_url}/organizations/{organisation}/workspaces/{workspace}", headers=self.headers
)
if response.status_code == HTTPStatus.OK:
logging.info(f"Retrieved workspace id for workspace '{workspace}'.")
logging.debug(f"response: {response.text}")
else:
logging.error(f"Response status: {response.status_code}")
logging.error(
f"Unsuccessful retrieving workspace id for workspace '{workspace}', response: {response.text}"
)
exit(os.EX_CONFIG)
workspace_id = json.loads(response.text)["data"]["id"]
return workspace_id
def add_workspace_variable(self, variable: TerraformVariable, workspace_id: str) -> str:
response = requests.post(
f"{self.api_url}/workspaces/{workspace_id}/vars", headers=self.headers, json={"data": variable.to_dict()}
)
key = variable.key
if response.status_code == HTTPStatus.CREATED:
logging.info(f"Added variable {key}")
else:
msg = f"Unsuccessful adding variable {key}, response: {response.text}, status_code: {response.status_code}"
logging.error(msg)
raise ValueError(msg)
var_id = json.loads(response.text)["data"]["id"]
return var_id
def update_workspace_variable(self, variable: TerraformVariable, workspace_id: str) -> int:
response = requests.patch(
f"{self.api_url}/workspaces/{workspace_id}/vars/{variable.var_id}",
headers=self.headers,
json={"data": variable.to_dict()},
)
try:
key = json.loads(response.text)["data"]["attributes"]["key"]
except KeyError:
try:
key = variable.key
except KeyError:
key = None
if response.status_code == HTTPStatus.OK:
logging.info(f"Updated variable {key}")
else:
msg = f"Unsuccessful updating variable with id {variable.var_id} and key {key}, response: {response.text}, status_code: {response.status_code}"
logging.error(msg)
raise ValueError(msg)
return response.status_code
def delete_workspace_variable(self, var: TerraformVariable, workspace_id: str) -> int:
response = requests.delete(f"{self.api_url}/workspaces/{workspace_id}/vars/{var.var_id}", headers=self.headers)
if response.status_code == HTTPStatus.NO_CONTENT:
logging.info(f"Deleted variable with id {var.var_id}")
else:
msg = f"Unsuccessful deleting variable with id {var.var_id}, response: {response.text}, status_code: {response.status_code}"
logging.error(msg)
raise ValueError(msg)
return response.status_code
def list_workspace_variables(self, workspace_id: str) -> List[TerraformVariable]:
response = requests.get(f"{self.api_url}/workspaces/{workspace_id}/vars", headers=self.headers)
if response.status_code == HTTPStatus.OK:
logging.info(f"Retrieved workspace variables.")
logging.debug(f"response: {response.text}")
else:
logging.error(f"Response status: {response.status_code}")
logging.error(f"Unsuccessful retrieving workspace variables, response: {response.text}")
exit(os.EX_CONFIG)
workspace_vars = json.loads(response.text)["data"]
return [TerraformVariable.from_dict(dict_) for dict_ in workspace_vars]
def create_configuration_version(self, workspace_id: str) -> Tuple[str, str]:
data = {"data": {"type": "configuration-versions", "attributes": {"auto-queue-runs": "false"}}}
response = requests.post(
f"{self.api_url}/workspaces/{workspace_id}/configuration-versions", headers=self.headers, json=data
)
if response.status_code == 201:
logging.info(f"Created configuration version.")
logging.debug(f"response: {response.text}")
else:
logging.error(f"Response status: {response.status_code}")
logging.error(f"Unsuccessful creating configuration version, response: {response.text}")
exit(os.EX_CONFIG)
upload_url = json.loads(response.text)["data"]["attributes"]["upload-url"]
configuration_id = json.loads(response.text)["data"]["id"]
return upload_url, configuration_id
def get_configuration_version_status(self, configuration_id: str) -> str:
response = requests.get(f"{self.api_url}/configuration-versions/{configuration_id}", headers=self.headers)
if response.status_code == HTTPStatus.OK:
logging.info(f"Retrieved configuration version info.")
logging.debug(f"response: {response.text}")
else:
logging.error(f"Response status: {response.status_code}")
logging.error(f"Unsuccessful retrieving configuration version info, response: {response.text}")
exit(os.EX_CONFIG)
status = json.loads(response.text)["data"]["attributes"]["status"]
return status
@staticmethod
def upload_configuration_files(upload_url: str, configuration_path: str) -> int:
headers = {"Content-Type": "application/octet-stream"}
with open(configuration_path, "rb") as configuration:
response = requests.put(upload_url, headers=headers, data=configuration.read())
if response.status_code == HTTPStatus.OK:
logging.info(f"Uploaded configuration.")
logging.debug(f"response: {response.text}")
else:
logging.error(f"Response status: {response.status_code}")
logging.error(f"Unsuccessful uploading configuration, response: {response.text}")
exit(os.EX_CONFIG)
return response.status_code
def create_run(self, workspace_id: str, target_addrs: str = None, message: str = "") -> str:
data = {
"data": {
"attributes": {"message": message},
"type": "runs",
"relationships": {"workspace": {"data": {"type": "workspaces", "id": workspace_id}}},
}
}
if target_addrs:
data["data"]["attributes"]["target-addrs"] = [target_addrs]
response = requests.post(f"{self.api_url}/runs", headers=self.headers, json=data)
if response.status_code == HTTPStatus.CREATED:
logging.info(f"Created run.")
logging.debug(f"response: {response.text}")
else:
logging.error(f"Response status: {response.status_code}")
logging.error(f"Unsuccessful creating run, response: {response.text}")
exit(os.EX_CONFIG)
run_id = json.loads(response.text)["data"]["id"]
return run_id
def get_run_details(self, run_id: str) -> dict:
response = requests.get(f"{self.api_url}/runs/{run_id}", headers=self.headers)
if not response.status_code == HTTPStatus.OK:
logging.error(f"Response status: {response.status_code}")
logging.error(f"Unsuccessful retrieving run details, response: {response.text}")
exit(os.EX_CONFIG)
return json.loads(response.text)
|
Apache License 2.0
|
mindspore-ai/mindinsight
|
mindinsight/debugger/stream_handler/tensor_handler.py
|
TensorHandler.get_tensor_info_for_tensor_graph
|
python
|
def get_tensor_info_for_tensor_graph(self, tensor_name, node_type, step):
res = {}
tensor = self._get_tensor(tensor_name, node_type, step)
if tensor and (not tensor.empty or tensor.stats):
res['statistics'] = tensor.get_tensor_statistics()
res['shape'] = tensor.shape
missing_tensors_info = self.get_missing_tensor_info(tensor_name, node_type, step, self._check_tensor_stats)
if not missing_tensors_info and node_type == NodeTypeEnum.PARAMETER.value and step > 0:
res['has_prev_step'] = True
return res, missing_tensors_info
|
Get Tensor info for tensor graphs.
Args:
tensor_name (str): Tensor name, format like `node_name:slot`.
node_type (str): Node type.
step (int): The step of tensor info.
Returns:
dict, tensor infos, including overall statistics, tensor shape and has_prev_step info.
list, list of missing tensor basic information.
|
https://github.com/mindspore-ai/mindinsight/blob/253a210719dbb1e55b826f2e489322f402d66676/mindinsight/debugger/stream_handler/tensor_handler.py#L682-L703
|
import os
import tempfile
import threading
import time
from collections import OrderedDict
from collections import namedtuple
import numpy as np
from mindinsight.conf import settings
from mindinsight.datavisual.data_transform.graph.node import NodeTypeEnum
from mindinsight.debugger.common.exceptions.exceptions import DebuggerParamValueError, DebuggerDownloadOverQueue, DebuggerDownloadTensorNotExist
from mindinsight.debugger.common.log import LOGGER as log
from mindinsight.debugger.common.utils import MAX_CACHE_SPACE_BYTES, MAX_SINGLE_TENSOR_CACHE_BYTES
from mindinsight.debugger.stream_cache.tensor import OpTensor, ConstTensor, TensorStatusEnum, DownloadStatusEnum
from mindinsight.debugger.stream_handler.base_handler import StreamHandlerBase
from mindinsight.domain.graph.proto.ms_graph_pb2 import DataType
from mindinsight.utils.tensor import TensorUtils, TensorComparison
TensorBasicInfo = namedtuple('tensor_basic_info', ['full_name', 'node_type', 'iter'])
FILE_MODE = 0o600
DIR_MODE = 0o700
class MemoryMgr:
def __init__(self):
self._memory_queue = OrderedDict()
self._remaining_cache_space = MAX_CACHE_SPACE_BYTES
self._lock = threading.Lock()
@property
def remaining_cache_space(self):
return self._remaining_cache_space
def request(self, key, request_space, release_func):
if self.check_space(request_space):
release_func(True)
return
if request_space == 0:
return
with self._lock:
if key in self._memory_queue:
log.error("Key already exist error for memory queue.")
raise ValueError("Key already exist error for memory queue.")
self._remaining_cache_space -= request_space
while self._remaining_cache_space <= 0:
self.release()
with self._lock:
self._memory_queue[key] = (request_space, release_func)
def release(self, key=None):
with self._lock:
if key is not None:
if key not in self._memory_queue:
return
self._memory_queue.move_to_end(key, last=False)
_, value = self._memory_queue.popitem(last=False)
free_space, release_func = value
release_func()
self._remaining_cache_space += free_space
log.debug("Update cache space.")
@staticmethod
def check_space(space):
return space >= MAX_SINGLE_TENSOR_CACHE_BYTES
class DownloadMgr:
def __init__(self):
self._temp_base_dir = self.mk_temp_base_dir()
self.tensor_info = None
self.file_name = None
self.file_path = None
self.temp_dir = None
self._lock = threading.Lock()
self.status = DownloadStatusEnum.PENDING.value
@property
def temp_base_dir(self):
return self._temp_base_dir
def add(self, file_name, file_path, temp_dir, **tensor_info):
with self._lock:
if self.status != DownloadStatusEnum.SENDING.value:
self.file_name = file_name
self.file_path = file_path
self.temp_dir = temp_dir
self.tensor_info = tensor_info
return
log.error("There is already a tensor in download")
raise DebuggerDownloadOverQueue()
def get(self, **tensor_info):
with self._lock:
if self.tensor_info == tensor_info:
self.status = DownloadStatusEnum.SENDING.value
return self.file_name, self.file_path, self.clean
log.error("No such tensor to download")
raise DebuggerDownloadTensorNotExist()
def check_status(self):
if self.status == DownloadStatusEnum.SENDING.value:
log.error("There is already a tensor in download")
raise DebuggerDownloadOverQueue()
@staticmethod
def mk_temp_base_dir():
workspace = settings.WORKSPACE
temp_base_dir = os.path.join(workspace, 'tempdata')
os.makedirs(temp_base_dir, DIR_MODE, exist_ok=True)
return temp_base_dir
def clean(self):
with self._lock:
if self.temp_dir:
self.temp_dir.cleanup()
self.temp_dir = None
self.tensor_info = None
self.file_name = None
self.file_path = None
self.status = DownloadStatusEnum.PENDING.value
class MultiCardTensorHandler:
def __init__(self):
self._memory_mgr = MemoryMgr()
self._download_mgr = DownloadMgr()
self.tensor_handlers = {0: TensorHandler(self._memory_mgr, self._download_mgr, rank_id=0)}
@property
def download_mgr(self):
return self._download_mgr
def set_step(self, step_id):
for tensor_handler in self.tensor_handlers.values():
tensor_handler.cur_step = step_id
def get_tensor_handler_by_rank_id(self, rank_id=0, create_if_not_exit=False):
if rank_id in self.tensor_handlers:
return self.tensor_handlers.get(rank_id)
if create_if_not_exit:
tensor_handler = TensorHandler(self._memory_mgr, self._download_mgr, rank_id=rank_id)
self.tensor_handlers[rank_id] = tensor_handler
return tensor_handler
log.error("There is no rank id %d in MultiCardTensorHandler.", rank_id)
raise ValueError
def put(self, value):
for rank_id, tensor in value:
if rank_id not in self.tensor_handlers:
self.tensor_handlers[rank_id] = TensorHandler(self._memory_mgr, self._download_mgr, rank_id=rank_id)
self.tensor_handlers[rank_id].put(tensor)
def get(self, filter_condition=None, rank_id=0):
if rank_id in self.tensor_handlers:
return self.tensor_handlers.get(rank_id).get(filter_condition)
log.error("There is no rank id %d.", rank_id)
raise ValueError
def clean(self):
self.__init__()
@staticmethod
def tensor_basic_info(full_name, node_type, iter_step):
return TensorBasicInfo(full_name=full_name, node_type=node_type, iter=iter_step)
class TensorHandler(StreamHandlerBase):
def __init__(self, memory_mgr, download_mgr, rank_id):
self._param_names = set()
self._const_vals = {}
self._tensors = {}
self._cur_step = 0
self._memory_mgr = memory_mgr
self.download_mgr = download_mgr
self._rank_id = rank_id
self._hold_value = {}
@property
def cur_step(self):
return self._cur_step
@cur_step.setter
def cur_step(self, step_id):
self._cur_step = step_id
@property
def prev_step(self):
return self._cur_step - 1
def put(self, value):
tensor = self._deal_with_tensor(value)
stats = None
if value.get('stats', False) and tensor.status == TensorStatusEnum.CACHED.value:
tensor.calculate_stats()
stats = tensor.stats
flag = self._put_tensors(tensor)
new_tensor = self._tensors.get(tensor.name).get(tensor.step)
new_tensor.stats = stats
log.info("Put tensor %s of step: %d, into cache. Flag: %s", tensor.name, tensor.step, flag)
return flag
@staticmethod
def _deal_with_tensor(value):
step = value.get('step', 0)
tensor_content = b''.join(value.get('tensor_contents'))
tensor = OpTensor(value.get('name'), value.get('tensor_base'), tensor_content=tensor_content, step=step)
if value.get('oversize'):
tensor.clean_tensor_value(oversize=True)
return tensor
def put_empty_tensor(self, name, step):
cache_tensor = self._tensors.get(name)
if cache_tensor is None:
cache_tensor = {}
self._tensors[name] = cache_tensor
old_tensor = cache_tensor.get(step)
if old_tensor is None:
tensor = OpTensor(name, step=step)
cache_tensor[step] = tensor
return True
return False
def put_tensor_base(self, name, step, tensor_base):
cache_tensor = self._tensors.get(name)
if cache_tensor is None:
cache_tensor = {}
self._tensors[name] = cache_tensor
old_tensor = cache_tensor.get(step)
if old_tensor is None or old_tensor.tensor_base != tensor_base:
tensor = OpTensor(name, tensor_base, step=step)
cache_tensor[step] = tensor
return True
return False
def put_tensor_stats(self, name, step, tensor_base, tensor_stats):
cache_tensor = self._tensors.get(name)
if cache_tensor is None:
cache_tensor = {}
self._tensors[name] = cache_tensor
old_tensor = cache_tensor.get(step)
if old_tensor is None:
tensor = OpTensor(name, tensor_base, tensor_stats=tensor_stats, step=step)
cache_tensor[step] = tensor
return True
if old_tensor.get_tensor_statistics() != TensorUtils.get_overall_statistic_dict(tensor_stats):
old_tensor.stats = tensor_stats
return True
return False
def _put_tensors(self, tensor):
step = tensor.step
cache_tensor = self._tensors.get(tensor.name)
if cache_tensor is None:
cache_tensor = {}
self._tensors[tensor.name] = cache_tensor
if not self._hold_value.pop((tensor.name, tensor.step), False):
tensor.clean_tensor_value(oversize=False, remain_scalar=True)
old_tensor = cache_tensor.get(step)
if self._check_tensor_update(old_tensor, tensor):
self._put_tensor_into_cache(cache_tensor, tensor)
return True
return False
def _check_tensor_update(self, old_tensor, tensor):
if not old_tensor or (old_tensor.status == TensorStatusEnum.CACHED.value and self._is_value_diff(
old_tensor.value, tensor.value)) or (old_tensor.status == TensorStatusEnum.UNCACHED.value
and tensor.status == TensorStatusEnum.CACHED.value):
return True
return False
def _put_tensor_into_cache(self, cache_tensor, tensor):
step = tensor.step
self._memory_mgr.release((self._rank_id, tensor.name, step))
def release_func(over_size=False):
cache_tensor.get(step).clean_tensor_value(over_size)
self._memory_mgr.request((self._rank_id, tensor.name, step), tensor.nbytes, release_func)
cache_tensor[step] = tensor
log.debug("Put updated tensor value for %s of step %s.", tensor.name, step)
@staticmethod
def _is_value_diff(old_value, new_value):
log.debug("old value type: %s, new_value type: %s", type(old_value), type(new_value))
if old_value is None and new_value is None:
return False
flag = old_value != new_value
if isinstance(flag, np.ndarray):
return flag.any()
return flag
def put_const_vals(self, const_vals):
for const_val in const_vals:
if not (const_val.value and const_val.key):
continue
if DataType.Name(const_val.value.dtype) == "DT_TENSOR":
tensor_proto = const_val.value.tensor_val
tensor_value = tensor_proto.tensor_content
tensor_proto.ClearField('tensor_content')
tensor_proto.node_name = const_val.key
tensor_proto.slot = '0'
tensor_base = {
'dtype': tensor_proto.data_type,
'shape': tensor_proto.dims,
'data_size': len(tensor_value)
}
name = ':'.join([tensor_proto.node_name, tensor_proto.slot])
const_tensor = OpTensor(name, tensor_base, tensor_content=tensor_value)
else:
const_tensor = ConstTensor(const_val)
self._const_vals[const_tensor.name] = const_tensor
def record_parameter_names(self, names):
self._param_names.update(names)
log.debug("Record %d parameters in cache. Total parameter number: %d", len(names), len(self._param_names))
def get(self, filter_condition=None):
name = filter_condition.get('name')
node_type = filter_condition.get('node_type')
shape = filter_condition.get('shape')
if filter_condition.get('prev'):
step = self.prev_step
else:
step = self.cur_step
tensor = self._get_tensor(name, node_type, step)
if not tensor:
log.error("No tensor named %s at the step %s", name, step)
raise DebuggerParamValueError("No tensor named {}".format(name))
tensor_info = tensor.get_full_info(shape)
missing_tensors_info = self.get_missing_tensor_info(name, node_type, step, self._check_tensor_value)
if not missing_tensors_info and node_type == NodeTypeEnum.PARAMETER.value and step > 0:
tensor_info['has_prev_step'] = True
res = {
'tensor_value': tensor_info,
'view_cmd': False
}
if tensor.status == TensorStatusEnum.UNCACHED.value:
self._add_hold_value_tensors(name, step)
res['view_cmd'] = True
return res
def _get_tensor(self, tensor_name, node_type=None, step=None):
if step is None:
step = self._cur_step
tensor = self._tensors.get(tensor_name, {}).get(step)
if not tensor and node_type == NodeTypeEnum.CONST.value:
const_name = tensor_name.rsplit('/', 1)[-1]
tensor = self._const_vals.get(const_name)
if tensor:
self._tensors[tensor_name] = {step: tensor}
return tensor
def _get_basic_info(self, tensor_name, node_type, step):
tensor = self._get_tensor(tensor_name, node_type, step)
if tensor:
return tensor.get_basic_info()
return None
def update_tensor_history(self, tensor_history, step):
missed_tensors = []
for tensor_info in tensor_history.get('tensor_history'):
tensor_name = tensor_info.get('full_name')
node_type = tensor_info.get('node_type')
basic_info = self._get_basic_info(tensor_name, node_type, step)
missing_tensors_info = self.get_missing_tensor_info(tensor_name, node_type, step,
self._check_tensor_base)
if not missing_tensors_info and node_type == NodeTypeEnum.PARAMETER.value and step > 0:
basic_info['has_prev_step'] = True
if basic_info:
tensor_info.update(basic_info)
if missing_tensors_info:
missed_tensors.extend(missing_tensors_info)
return missed_tensors
def get_missing_tensor_info(self, tensor_name, node_type, step=None, check_func=None):
if step is None:
step = self._cur_step
if check_func is None:
check_func = self._check_tensor_value
missing_tensors_info = []
if check_func(tensor_name, step):
missing_tensors_info.append(TensorBasicInfo(full_name=tensor_name, node_type=node_type, iter=''))
log.debug("Add current step tensor base view cmd for %s", tensor_name)
if node_type == NodeTypeEnum.PARAMETER.value and step > 0 and check_func(tensor_name, step - 1):
missing_tensors_info.append(TensorBasicInfo(full_name=tensor_name, node_type=node_type, iter='prev'))
log.debug("Add previous step tensor base view cmd for %s", tensor_name)
return missing_tensors_info
def _check_tensor_base(self, tensor_name, step):
tensor = self._get_tensor(tensor_name, step=step)
if tensor is not None and ((tensor.tensor_base and tensor.tensor_base.get(
'data_size') > 0) or tensor.status == TensorStatusEnum.OVERSIZE.value):
return False
return True
def _check_tensor_stats(self, tensor_name, step):
tensor = self._get_tensor(tensor_name, step=step)
if tensor is not None and (tensor.stats or tensor.status == TensorStatusEnum.OVERSIZE.value):
return False
return True
def _check_tensor_value(self, tensor_name, step):
tensor = self._get_tensor(tensor_name, step=step)
res = bool(not tensor or tensor.status == TensorStatusEnum.OVERSIZE.value)
res = bool(res or tensor.status == TensorStatusEnum.UNCACHED.value)
return res
def get_valid_tensor_by_name(self, tensor_name, step, prev=False):
target_step = step - 1 if prev else step
if target_step < 0:
log.warning("Step %d has no previous value for tensor: %s", target_step, tensor_name)
return None
tensor = self._get_tensor(tensor_name, step=target_step)
return tensor
def clean_tensors(self, cur_step):
if cur_step != self._cur_step:
self._cur_step = cur_step
self._clean_expired_tensors(cur_step)
self._clean_parameters()
def _clean_expired_tensors(self, cur_step):
expired_tensor = []
for tensor_name, tensor in self._tensors.items():
expired_step = [step for step in tensor.keys() if step <= cur_step - 2]
for step in expired_step:
self._memory_mgr.release((self._rank_id, tensor_name, step))
if not tensor:
expired_tensor.append(tensor_name)
for tensor_name in expired_tensor:
self._tensors.pop(tensor_name)
def _clean_parameters(self):
for param in self._param_names:
if param in self._tensors:
params = self._tensors.pop(param)
for step in params:
self._memory_mgr.release((self._rank_id, param, step))
log.debug("Clean param %s in cache.", param)
def get_tensors_diff(self, tensor_name, shape, tolerance=0, step=None):
curr_tensor = self.get_valid_tensor_by_name(tensor_name, step=step)
prev_tensor = self.get_valid_tensor_by_name(tensor_name, prev=True, step=step)
if not (curr_tensor and prev_tensor) or self._check_no_comparison_status(
curr_tensor) or self._check_no_comparison_status(prev_tensor):
log.error("Get current step and previous step for this tensor name %s failed.", tensor_name)
raise DebuggerParamValueError(f"Get current step and previous step for this tensor name "
f"{tensor_name} failed.")
if self._check_not_cached_status(curr_tensor) or self._check_not_cached_status(prev_tensor):
self._add_hold_value_tensors(tensor_name, step)
reply = {
'tensor_status': TensorStatusEnum.UNCACHED.value,
'view_cmd': True
}
return reply
curr_tensor_slice = curr_tensor.get_tensor_value_by_shape(shape)
prev_tensor_slice = prev_tensor.get_tensor_value_by_shape(shape)
tensor_info = curr_tensor.get_basic_info()
tensor_info.pop('has_prev_step')
tensor_info.pop('value')
tensor_comparison = curr_tensor.tensor_comparison
if not tensor_comparison or tensor_comparison.tolerance != tolerance:
if curr_tensor.value.shape != prev_tensor.value.shape:
raise DebuggerParamValueError("The shape of these two step tensors is not the same.")
tensor_diff = TensorUtils.calc_diff_between_two_tensor(curr_tensor.value, prev_tensor.value, tolerance)
stats = TensorUtils.get_statistics_from_tensor(tensor_diff)
tensor_comparison = TensorComparison(tolerance, stats, tensor_diff)
curr_tensor.update_tensor_comparisons(tensor_comparison)
if isinstance(curr_tensor_slice, np.ndarray) and isinstance(prev_tensor_slice, np.ndarray):
if not shape:
tensor_diff_slice = tensor_comparison.value
else:
tensor_diff_slice = tensor_comparison.value[shape]
result = np.stack([prev_tensor_slice, curr_tensor_slice, tensor_diff_slice], axis=-1)
tensor_info['diff'] = result.tolist()
elif isinstance(curr_tensor_slice, str):
tensor_info['diff'] = curr_tensor_slice
tensor_info.update(self._get_comparison_statistics(curr_tensor, prev_tensor))
reply = {
'tensor_status': TensorStatusEnum.CACHED.value,
'tensor_value': tensor_info
}
return reply
def _add_hold_value_tensors(self, tensor_name, step):
self._hold_value[(tensor_name, step)] = True
if step - 1 >= 0:
self._hold_value[(tensor_name, step - 1)] = True
@staticmethod
def _check_no_comparison_status(tensor):
return tensor.status == TensorStatusEnum.EMPTY.value or tensor.status == TensorStatusEnum.OVERSIZE.value
@staticmethod
def _check_not_cached_status(tensor):
return tensor.status == TensorStatusEnum.UNCACHED.value
@staticmethod
def _get_comparison_statistics(curr_tensor, prev_tensor):
stats_info = {}
diff_tensor_stats = curr_tensor.tensor_comparison.stats
curr_tensor_stats = TensorUtils.get_statistics_from_tensor(curr_tensor.value)
prev_tensor_stats = TensorUtils.get_statistics_from_tensor(prev_tensor.value)
stats_info['curr_step_statistics'] = TensorUtils.get_overall_statistic_dict(overall_stats=curr_tensor_stats)
stats_info['prev_step_statistics'] = TensorUtils.get_overall_statistic_dict(overall_stats=prev_tensor_stats)
stats_info['statistics'] = TensorUtils.get_overall_statistic_dict(overall_stats=diff_tensor_stats)
return stats_info
|
Apache License 2.0
|
fishilico/shared
|
java/keystore/util_bin.py
|
run_process_with_input
|
python
|
def run_process_with_input(cmdline, data, fatal=False, indent=None):
if indent is None:
logger.info("Running %s", ' '.join(cmdline))
sys.stdout.flush()
if indent:
proc = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
indent_thread = threading.Thread(
target=indentify_byte_stream,
args=(indent.encode('ascii'), proc.stdout))
indent_thread.start()
else:
proc = subprocess.Popen(cmdline, stdin=subprocess.PIPE)
if data:
proc.stdin.write(data)
proc.stdin.close()
ret = proc.wait()
if indent:
indent_thread.join()
if ret != 0:
logger.error("Command %s returned %d", ' '.join(cmdline), ret)
if fatal:
raise ValueError("Command {} failed".format(cmdline[0]))
return False
return True
|
Run the given command with the given data and show its output in colors
|
https://github.com/fishilico/shared/blob/170a16abf8b8cd946ccb1c45c322151fd22a1e2c/java/keystore/util_bin.py#L58-L85
|
import binascii
import logging
import subprocess
import sys
import threading
logger = logging.getLogger(__name__)
def xx(data):
return binascii.hexlify(data).decode('ascii')
def indentify_byte_stream(indent_bytes, stream):
need_indent_before_data = True
stdout_buffer = sys.stdout.buffer if sys.version_info >= (3,) else sys.stdout
while True:
data = stream.read(4096)
if not data:
break
if need_indent_before_data:
stdout_buffer.write(indent_bytes)
if data.endswith(b'\n'):
need_indent_before_data = True
stdout_buffer.write(data[:-1].replace(b'\n', b'\n' + indent_bytes))
stdout_buffer.write(b'\n')
else:
need_indent_before_data = False
stdout_buffer.write(data.replace(b'\n', b'\n' + indent_bytes))
stdout_buffer.flush()
|
MIT License
|
pymeasure/pymeasure
|
pymeasure/instruments/ni/virtualbench.py
|
VirtualBench.validate_dmm_function
|
python
|
def validate_dmm_function(self, dmm_function):
try:
pyvb.DmmFunction(dmm_function)
except Exception:
try:
dmm_function = pyvb.DmmFunction[dmm_function.upper()]
except Exception:
raise ValueError(
"DMM Function may be 0-5, 'DC_VOLTS'," +
" 'AC_VOLTS', 'DC_CURRENT', 'AC_CURRENT'," +
" 'RESISTANCE' or 'DIODE'")
return dmm_function
|
Check if DMM function *dmm_function* exists
:param dmm_function: DMM function index or name:
- ``'DC_VOLTS'``, ``'AC_VOLTS'``
- ``'DC_CURRENT'``, ``'AC_CURRENT'``
- ``'RESISTANCE'``
- ``'DIODE'``
:type dmm_function: int or str
:return: DMM function index to pass to the instrument
:rtype: int
|
https://github.com/pymeasure/pymeasure/blob/658d8fb9a02bdb62f64cc3838875c0de12f49ca1/pymeasure/instruments/ni/virtualbench.py#L546-L570
|
import logging
import re
from ctypes import (c_bool, c_size_t, c_double, c_uint8, c_int32, c_uint32,
c_int64, c_uint64, c_wchar, c_wchar_p, Structure, c_int,
cdll, byref)
from datetime import datetime, timezone, timedelta
import numpy as np
import pandas as pd
from pymeasure.instruments import Instrument, RangeException
from pymeasure.instruments.validators import (
strict_discrete_set, strict_discrete_range,
truncated_discrete_set, strict_range
)
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
try:
import pyvirtualbench as pyvb
except ModuleNotFoundError as err:
log.info('Failed loading the pyvirtualbench package. '
+ 'Check the NI VirtualBench documentation on how to '
+ 'install this external dependency. '
+ 'ImportError: {}'.format(err))
raise
class VirtualBench_Direct(pyvb.PyVirtualBench):
def __init__(self, device_name='', name='VirtualBench'):
self.device_name = device_name
self.name = name
self.nilcicapi = cdll.LoadLibrary("nilcicapi")
self.library_handle = c_int(0)
status = self.nilcicapi.niVB_Initialize(pyvb.NIVB_LIBRARY_VERSION,
byref(self.library_handle))
if (status != pyvb.Status.SUCCESS):
raise pyvb.PyVirtualBenchException(status, self.nilcicapi,
self.library_handle)
log.info("Initializing %s." % self.name)
def __del__(self):
self.release()
class VirtualBench():
def __init__(self, device_name='', name='VirtualBench'):
self.device_name = device_name
self.name = name
self.vb = pyvb.PyVirtualBench(self.device_name)
log.info("Initializing %s." % self.name)
def __del__(self):
if self.vb.library_handle is not None:
self.vb.release()
def shutdown(self):
log.info("Shutting down %s" % self.name)
self.vb.release()
self.isShutdown = True
def get_library_version(self):
return self.vb.get_library_version()
def convert_timestamp_to_values(self, timestamp):
if not isinstance(timestamp, pyvb.Timestamp):
raise ValueError("{0} is not a VirtualBench Timestamp object"
.format(timestamp))
return self.vb.convert_timestamp_to_values(timestamp)
def convert_values_to_timestamp(self, seconds_since_1970,
fractional_seconds):
return self.vb.convert_values_to_timestamp(seconds_since_1970,
fractional_seconds)
def convert_values_to_datetime(self, timestamp):
(seconds_since_1970,
fractional_seconds) = self.convert_timestamp_to_values(timestamp)
fractional_seconds = timedelta(seconds=fractional_seconds)
return (datetime.fromtimestamp(seconds_since_1970, timezone.utc) +
fractional_seconds)
def collapse_channel_string(self, names_in):
if not isinstance(names_in, str):
raise ValueError("{0} is not a string".format(names_in))
return self.vb.collapse_channel_string(names_in)
def expand_channel_string(self, names_in):
return self.vb.expand_channel_string(names_in)
def get_calibration_information(self):
return self.vb.get_calibration_information(self.device_name)
def acquire_digital_input_output(self, lines, reset=False):
reset = strict_discrete_set(reset, [True, False])
self.dio = self.DigitalInputOutput(self.vb, lines, reset,
vb_name=self.name)
def acquire_power_supply(self, reset=False):
reset = strict_discrete_set(reset, [True, False])
self.ps = self.PowerSupply(self.vb, reset, vb_name=self.name)
def acquire_function_generator(self, reset=False):
reset = strict_discrete_set(reset, [True, False])
self.fgen = self.FunctionGenerator(self.vb, reset, vb_name=self.name)
def acquire_mixed_signal_oscilloscope(self, reset=False):
reset = strict_discrete_set(reset, [True, False])
self.mso = self.MixedSignalOscilloscope(self.vb, reset,
vb_name=self.name)
def acquire_digital_multimeter(self, reset=False):
reset = strict_discrete_set(reset, [True, False])
self.dmm = self.DigitalMultimeter(self.vb, reset=reset,
vb_name=self.name)
class VirtualBenchInstrument():
def __init__(self, acquire_instr, reset,
instr_identifier, vb_name=''):
self._vb_handle = acquire_instr.__self__
self._device_name = self._vb_handle.device_name
self.name = (vb_name + " " + instr_identifier.upper()).strip()
log.info("Initializing %s." % self.name)
self._instrument_handle = acquire_instr(self._device_name, reset)
self.isShutdown = False
def __del__(self):
if self.isShutdown is not True:
self._instrument_handle.release()
def shutdown(self):
log.info("Shutting down %s" % self.name)
self._instrument_handle.release()
self.isShutdown = True
class DigitalInputOutput(VirtualBenchInstrument):
def __init__(self, virtualbench, lines, reset, vb_name=''):
self._device_name = virtualbench.device_name
self._vb_handle = virtualbench
self.name = vb_name + " DIO"
(self._line_names, self._line_numbers) = self.validate_lines(
lines, return_single_lines=True, validate_init=False)
log.info("Initializing %s." % self.name)
self.dio = self._vb_handle.acquire_digital_input_output(
self._line_names, reset)
self._instrument_handle = self.dio
self.isShutdown = False
def validate_lines(self, lines, return_single_lines=False,
validate_init=False):
def error(lines=lines):
raise ValueError(
"Line specification {0} is not valid!".format(lines))
lines = self._vb_handle.expand_channel_string(lines)[0]
lines = lines.split(', ')
return_lines = []
single_lines = []
for line in lines:
if line == 'trig':
device = self._device_name
else:
try:
(device, line) = re.match(
r'(.*)(?:/)(.+)',
line).groups()
except IndexError:
error()
if (line == 'trig') and (device == self._device_name):
single_lines.append('trig')
return_lines.append(self._device_name + '/' + line)
elif int(line) in range(0, 8):
line = int(line)
single_lines.append(line)
if device == 'dig':
pass
else:
try:
device = re.match(
r'(VB[0-9]{4}-[0-9a-zA-Z]{7})(?:/dig)',
device).groups()[0]
except (IndexError, KeyError):
error()
if not device == self._device_name:
error()
return_lines.append((self._device_name + '/dig/%d') % line)
else:
error()
if validate_init is True:
if line not in self._line_numbers:
raise ValueError(
"Digital Line {} is not initialized".format(line))
return_lines = ', '.join(return_lines)
return_lines = self._vb_handle.collapse_channel_string(
return_lines)[0]
if return_single_lines is True:
return return_lines, single_lines
else:
return return_lines
def tristate_lines(self, lines):
lines = self.validate_lines(lines, validate_init=True)
self.dio.tristate_lines(lines)
def export_signal(self, line, digitalSignalSource):
line = self.validate_lines(line, validate_init=True)
digitalSignalSource_values = {"FGEN START": 0, "MSO TRIGGER": 1}
digitalSignalSource = strict_discrete_set(
digitalSignalSource.upper(), digitalSignalSource_values)
digitalSignalSource = digitalSignalSource_values[
digitalSignalSource.upper()]
self.dio.export_signal(line, digitalSignalSource)
def query_line_configuration(self):
return self.dio.query_line_configuration()
def query_export_signal(self, line):
line = self.validate_lines(line, validate_init=True)
return self.dio.query_export_signal(line)
def write(self, lines, data):
lines = self.validate_lines(lines, validate_init=True)
try:
for value in data:
strict_discrete_set(value, [True, False])
except Exception:
raise ValueError(
"Data {} is not iterable (list or tuple).".format(data))
log.debug("{}: {} output {}.".format(self.name, lines, data))
self.dio.write(lines, data)
def read(self, lines):
lines = self.validate_lines(lines, validate_init=False)
return self.dio.read(lines)
def reset_instrument(self):
self.dio.reset_instrument()
class DigitalMultimeter(VirtualBenchInstrument):
def __init__(self, virtualbench, reset, vb_name=''):
super().__init__(
virtualbench.acquire_digital_multimeter,
reset, 'dmm', vb_name)
self.dmm = self._instrument_handle
@staticmethod
def validate_range(dmm_function, range):
ref_ranges = {
0: [0.1, 1, 10, 100, 300],
1: [0.1, 1, 10, 100, 265],
2: [0.01, 0.1, 1, 10],
3: [0.005, 0.05, 0.5, 5],
4: [100, 1000, 10000, 100000, 1000000,
10000000, 100000000],
}
range = truncated_discrete_set(range, ref_ranges[dmm_function])
return range
|
MIT License
|
evankepner/mutatest
|
mutatest/cache.py
|
get_cache_file_loc
|
python
|
def get_cache_file_loc(src_file: Union[str, Path]) -> Path:
if not src_file:
raise ValueError("src_file cannot be an empty string.")
cache_file = importlib.util.cache_from_source(str(src_file))
if os.path.islink(cache_file):
msg = (
"{} is a symlink and will be changed into a regular file if "
"import writes a byte-compiled file to it"
)
raise FileExistsError(msg.format(cache_file))
elif os.path.exists(cache_file) and not os.path.isfile(cache_file):
msg = (
"{} is a non-regular file and will be changed into a regular "
"one if import writes a byte-compiled file to it"
)
raise FileExistsError(msg.format(cache_file))
return Path(cache_file)
|
Use importlib to determine the cache file location for the source file.
Reference: https://github.com/python/cpython/blob/master/Lib/py_compile.py#L130
Args:
src_file: source file to determine cache file
Returns:
Path to the cache file
Raises:
FileExistsError: if the cache-file path is symlink or irregular file
|
https://github.com/evankepner/mutatest/blob/d1a2aa15e9da99b252ee771f0ce0170e8c4a3bd7/mutatest/cache.py#L48-L81
|
import importlib
import logging
import os
from pathlib import Path
from py_compile import PycInvalidationMode
from typing import Union
LOGGER = logging.getLogger(__name__)
def check_cache_invalidation_mode() -> PycInvalidationMode:
if os.environ.get("SOURCE_DATE_EPOCH"):
raise EnvironmentError(
"SOURCE_DATE_EPOCH set, but only TIMESTAMP cache invalidation is supported. "
"Clear this environment variable so that timestamp invalidation of the Python "
"cache can be used to trigger mutations for the testing suite."
)
return PycInvalidationMode.TIMESTAMP
|
MIT License
|
centerforopenscience/osf.io
|
addons/gitlab/api.py
|
GitLabClient.branches
|
python
|
def branches(self, repo_id, branch=None):
if branch:
return self.gitlab.projects.get(repo_id).branches.get(branch)
return self.gitlab.projects.get(repo_id).branches.list()
|
List a repo's branches or get a single branch (in a list).
https://docs.gitlab.com/ce/api/branches.html#list-repository-branches
:param str user: GitLab user name
:param str repo: GitLab repo name
:param str branch: Branch name if getting a single branch
:return: List of branch dicts
|
https://github.com/centerforopenscience/osf.io/blob/6552a01fe250997cd3eb67cf72fc7157d9bc5af6/addons/gitlab/api.py#L70-L83
|
from future.moves.urllib.parse import urlencode
import requests
import gitlab
import cachecontrol
from requests.adapters import HTTPAdapter
from rest_framework import status as http_status
from framework.exceptions import HTTPError
from addons.gitlab.exceptions import NotFoundError, AuthError
from addons.gitlab.settings import DEFAULT_HOSTS
https_cache = cachecontrol.CacheControlAdapter()
default_adapter = HTTPAdapter()
class GitLabClient(object):
def __init__(self, external_account=None, access_token=None, host=None):
self.access_token = getattr(external_account, 'oauth_key', None) or access_token
self.host = getattr(external_account, 'oauth_secret', None) or host or DEFAULT_HOSTS[0]
if self.access_token:
self.gitlab = gitlab.Gitlab(self.host, private_token=self.access_token)
else:
self.gitlab = gitlab.Gitlab(self.host)
def user(self, user=None):
try:
self.gitlab.auth()
except gitlab.GitlabGetError as exc:
raise AuthError(exc.error_message)
return self.gitlab.users.get(self.gitlab.user.id)
def repo(self, repo_id):
try:
return self.gitlab.projects.get(repo_id)
except gitlab.GitlabGetError as exc:
if exc.response_code == 404:
raise NotFoundError(exc.error_message)
else:
raise exc
except gitlab.GitlabAuthenticationError as exc:
raise AuthError(exc.error_message)
def repos(self, all=False):
try:
return self.gitlab.projects.list(membership=True, all=all)
except gitlab.GitlabAuthenticationError:
raise HTTPError(http_status.HTTP_403_FORBIDDEN, data={
'message_long': 'Your Gitlab token is deleted or invalid you may disconnect your Gitlab account and '
'reconnect with a valid token <a href="/settings/addons/">here</a>.'
})
|
Apache License 2.0
|
asuiu/pyxtension
|
py3/pyxtension/streams.py
|
_IStream.head
|
python
|
def head(self, n: int) -> 'stream[_K]':
return stream(itertools.islice(self, n))
|
Return a stream over the first n items
|
https://github.com/asuiu/pyxtension/blob/feb4abcc26dcc4d0d13c6e8be6899e3514ee0358/py3/pyxtension/streams.py#L773-L775
|
import collections
import io
import itertools
import math
import numbers
import pickle
import struct
import sys
import threading
from abc import ABC
from collections import abc, defaultdict
from functools import partial, reduce
from itertools import groupby
from multiprocessing import cpu_count, Pool
from operator import itemgetter
from queue import Queue
from random import shuffle
from types import GeneratorType
from typing import AbstractSet, Any, BinaryIO, Callable, Dict, Generator, Iterable, Iterator, List, Mapping, MutableSet, NamedTuple, Optional, overload, Set, Tuple, TypeVar, Union
from tblib import pickling_support
from pyxtension import validate, PydanticValidated
ifilter = filter
imap = map
izip = zip
xrange = range
from pyxtension.fileutils import openByExtension
from tqdm import tqdm
__author__ = 'ASU'
_K = TypeVar('_K')
_V = TypeVar('_V')
_T = TypeVar('_T')
_T_co = TypeVar('_T_co', covariant=True)
_IDENTITY_FUNC: Callable[[_T], _T] = lambda _: _
class ItrFromFunc(Iterable[_K]):
def __init__(self, f: Callable[[], Iterable[_K]]):
if callable(f):
self._f = f
else:
raise TypeError(
"Argument f to %s should be callable, but f.__class__=%s" % (str(self.__class__), str(f.__class__)))
def __iter__(self) -> Iterator[_T_co]:
return iter(self._f())
class EndQueue:
pass
class MapException:
def __init__(self, exc_info):
self.exc_info = exc_info
class TqdmMapper:
def __init__(self, *args, **kwargs) -> None:
self._tqdm = tqdm(*args, **kwargs)
def __call__(self, el: _K) -> _K:
self._tqdm.update()
return el
class _QElement(NamedTuple):
i: int
el: Any
class _IStream(Iterable[_K], ABC):
@staticmethod
def _init_itr(itr: Optional[Union[Iterator[_K], Callable[[], Iterable[_K]]]] = None) -> Tuple[
Optional[Iterable[_K]],
Optional[Callable[[], Iterable[_K]]]
]:
_f = None
if itr is None:
_itr = []
elif isinstance(itr, (abc.Iterable, abc.Iterator)) or hasattr(itr, '__iter__') or hasattr(itr, '__getitem__'):
_itr = itr
elif callable(itr):
_f = itr
_itr = None
else:
raise TypeError(
"Argument f to _IStream should be callable or iterable, but itr.__class__=%s" % (
str(itr.__class__)))
return _itr, _f
@staticmethod
def __fastmap_thread(f, qin, qout):
while True:
el = qin.get()
if isinstance(el, EndQueue):
qin.put(el)
return
try:
newEl = f(el)
qout.put(newEl)
except:
qout.put(MapException(sys.exc_info()))
@staticmethod
def __mtmap_thread(f, qin, qout):
while True:
q_el = qin.get()
if isinstance(q_el, EndQueue):
qin.put(q_el)
return
try:
newEl = f(q_el.el)
qout.put(_QElement(q_el.i, newEl))
except:
qout.put(MapException(sys.exc_info()))
@staticmethod
def __fastFlatMap_thread(f, qin, qout):
while True:
itr = qin.get()
if isinstance(itr, EndQueue):
qin.put(itr)
qout.put(EndQueue())
return
try:
newItr = f(itr)
for el in newItr:
qout.put(el)
except:
qout.put(MapException(sys.exc_info()))
def __fastmap_generator(self, f: Callable[[_K], _V], poolSize: int, bufferSize: int):
qin = Queue(bufferSize)
qout = Queue(max(bufferSize, poolSize + 1))
threadPool = [threading.Thread(target=_IStream.__fastmap_thread, args=(f, qin, qout)) for _ in range(poolSize)]
for t in threadPool:
t.start()
i = 0
itr = iter(self)
hasNext = True
while i < bufferSize and hasNext:
try:
el = next(itr)
i += 1
qin.put(el)
except StopIteration:
hasNext = False
try:
while 1:
try:
el = next(itr)
except StopIteration:
qin.put(EndQueue())
for t in threadPool:
t.join()
while not qout.empty():
newEl = qout.get()
if isinstance(newEl, MapException):
raise newEl.exc_info[0](newEl.exc_info[1]).with_traceback(newEl.exc_info[2])
yield newEl
break
else:
qin.put(el)
newEl = qout.get()
if isinstance(newEl, MapException):
raise newEl.exc_info[0](newEl.exc_info[1]).with_traceback(newEl.exc_info[2])
yield newEl
finally:
while not qin.empty():
qin.get()
qin.put(EndQueue())
while not qout.empty() or not qout.empty():
qout.get()
for t in threadPool:
t.join()
def __mtmap_generator(self, f: Callable[[_K], _V], poolSize: int, bufferSize: int):
qin = Queue(bufferSize)
qout = Queue(max(bufferSize, poolSize + 1))
threadPool = [threading.Thread(target=_IStream.__mtmap_thread, args=(f, qin, qout)) for _ in range(poolSize)]
for t in threadPool:
t.start()
in_i = 0
itr = iter(self)
hasNext = True
while in_i < bufferSize and hasNext:
try:
el = next(itr)
except StopIteration:
hasNext = False
else:
in_i += 1
qin.put(_QElement(in_i, el))
cache = {}
out_i = 1
def extract_all_from_cache():
nonlocal out_i
nonlocal in_i
nonlocal cache
while out_i in cache:
yield cache[out_i]
out_i += 1
def wait_for_all():
nonlocal out_i
nonlocal in_i
nonlocal cache
while not qout.empty():
q_el = qout.get()
if isinstance(q_el, MapException):
raise q_el.exc_info[0](q_el.exc_info[1]).with_traceback(q_el.exc_info[2])
cache[q_el.i] = q_el.el
for el in extract_all_from_cache():
yield el
validate(out_i == in_i + 1, "__mtmap_generator Expecting for all elements to be in cache")
try:
while 1:
try:
el = next(itr)
except StopIteration:
qin.put(EndQueue())
for t in threadPool:
t.join()
for el in wait_for_all():
yield el
break
else:
in_i += 1
qin.put(_QElement(in_i, el))
q_el = qout.get()
if isinstance(q_el, MapException):
raise q_el.exc_info[0](q_el.exc_info[1]).with_traceback(q_el.exc_info[2])
cache[q_el.i] = q_el.el
for el in extract_all_from_cache():
yield el
finally:
while not qin.empty():
qin.get()
qin.put(EndQueue())
while not qout.empty() or not qout.empty():
qout.get()
for t in threadPool:
t.join()
@staticmethod
def __fastFlatMap_input_thread(itr: Iterator[_K], qin: Queue):
while 1:
try:
el = next(itr)
except StopIteration:
qin.put(EndQueue())
return
else:
qin.put(el)
def __fastFlatMap_generator(self, predicate, poolSize: int, bufferSize: int):
qin = Queue(bufferSize)
qout = Queue(bufferSize * 2)
threadPool = [threading.Thread(target=_IStream.__fastFlatMap_thread, args=(predicate, qin, qout)) for i in
range(poolSize)]
for t in threadPool:
t.start()
i = 0
itr = iter(self)
hasNext = True
while i < bufferSize and hasNext:
try:
el = next(itr)
i += 1
qin.put(el)
except StopIteration:
hasNext = False
inputThread = threading.Thread(target=_IStream.__fastFlatMap_input_thread, args=(itr, qin))
inputThread.start()
qout_counter = 0
while qout_counter < len(threadPool):
newEl = qout.get()
if isinstance(newEl, MapException):
raise newEl.exc_info[0](newEl.exc_info[1]).with_traceback(newEl.exc_info[2])
if isinstance(newEl, EndQueue):
qout_counter += 1
if qout_counter >= len(threadPool):
inputThread.join()
for t in threadPool:
t.join()
while not qout.empty():
newEl = qout.get()
if isinstance(newEl, MapException):
raise newEl.exc_info[0](newEl.exc_info[1]).with_traceback(newEl.exc_info[2])
yield newEl
else:
yield newEl
@staticmethod
def exc_info_decorator(f: Callable[[_K], _V], el: _K) -> Union[MapException, _V]:
try:
return f(el)
except Exception as e:
pickling_support.install(e)
return MapException(sys.exc_info())
def __mp_pool_generator(self, f: Callable[[_K], _V], poolSize: int, bufferSize: int) -> Generator[_V, None, None]:
p = Pool(poolSize)
decorated_f_with_exc_passing = partial(self.exc_info_decorator, f)
for el in p.imap(decorated_f_with_exc_passing, self, chunksize=bufferSize):
if isinstance(el, MapException):
raise el.exc_info[0](el.exc_info[1]).with_traceback(el.exc_info[2])
yield el
p.close()
p.join()
def __mp_fast_pool_generator(self, f: Callable[[_K], _V], poolSize: int, bufferSize: int
) -> Generator[_V, None, None]:
p = Pool(poolSize)
try:
decorated_f_with_exc_passing = partial(self.exc_info_decorator, f)
for el in p.imap_unordered(decorated_f_with_exc_passing, iter(self), chunksize=bufferSize):
if isinstance(el, MapException):
raise el.exc_info[0](el.exc_info[1]).with_traceback(el.exc_info[2])
yield el
except GeneratorExit:
p.terminate()
finally:
p.close()
p.join()
@staticmethod
def __unique_generator(itr, f):
st = set()
for el in itr:
m_el = f(el)
if m_el not in st:
st.add(m_el)
yield el
def map(self, f: Callable[[_K], _V]) -> 'stream[_V]':
return stream(partial(map, f, self))
def starmap(self, f: Callable[[_K], _V]) -> 'stream[_V]':
return stream(partial(itertools.starmap, f, self))
def mpmap(self, f: Callable[[_K], _V], poolSize: int = cpu_count(),
bufferSize: Optional[int] = 1) -> 'stream[_V]':
if not isinstance(poolSize, int) or poolSize <= 0 or poolSize > 2 ** 12:
raise ValueError("poolSize should be an integer between 1 and 2^12. Received: %s" % str(poolSize))
elif poolSize == 1:
return self.map(f)
if bufferSize is None:
bufferSize = poolSize * 2
if not isinstance(bufferSize, int) or bufferSize <= 0 or bufferSize > 2 ** 12:
raise ValueError("bufferSize should be an integer between 1 and 2^12. Received: %s" % str(poolSize))
return stream(self.__mp_pool_generator(f, poolSize, bufferSize))
def mpfastmap(self, f: Callable[[_K], _V], poolSize: int = cpu_count(),
bufferSize: Optional[int] = 1) -> 'stream[_V]':
if not isinstance(poolSize, int) or poolSize <= 0 or poolSize > 2 ** 12:
raise ValueError("poolSize should be an integer between 1 and 2^12. Received: %s" % str(poolSize))
elif poolSize == 1:
return self.map(f)
if bufferSize is None:
bufferSize = poolSize * 2
if not isinstance(bufferSize, int) or bufferSize <= 0 or bufferSize > 2 ** 12:
raise ValueError("bufferSize should be an integer between 1 and 2^12. Received: %s" % str(poolSize))
return stream(self.__mp_fast_pool_generator(f, poolSize, bufferSize))
def fastmap(self, f: Callable[[_K], _V], poolSize: int = cpu_count(), bufferSize: Optional[int] = None) -> 'stream[_V]':
if not isinstance(poolSize, int) or poolSize <= 0 or poolSize > 2 ** 12:
raise ValueError("poolSize should be an integer between 1 and 2^12. Received: %s" % str(poolSize))
elif poolSize == 1:
return self.map(f)
if bufferSize is None:
bufferSize = poolSize
if not isinstance(bufferSize, int) or bufferSize <= 0 or bufferSize > 2 ** 12:
raise ValueError("bufferSize should be an integer between 1 and 2^12. Received: %s" % str(poolSize))
return stream(ItrFromFunc(lambda: self.__fastmap_generator(f, poolSize, bufferSize)))
def mtmap(self, f: Callable[[_K], _V], poolSize: int = cpu_count(),
bufferSize: Optional[int] = None) -> 'stream[_V]':
if not isinstance(poolSize, int) or poolSize <= 0 or poolSize > 2 ** 12:
raise ValueError("poolSize should be an integer between 1 and 2^12. Received: %s" % str(poolSize))
elif poolSize == 1:
return self.map(f)
if bufferSize is None:
bufferSize = poolSize
if not isinstance(bufferSize, int) or bufferSize <= 0 or bufferSize > 2 ** 12:
raise ValueError("bufferSize should be an integer between 1 and 2^12. Received: %s" % str(poolSize))
return stream(ItrFromFunc(lambda: self.__mtmap_generator(f, poolSize, bufferSize)))
def fastFlatMap(self, predicate: Callable[[_K], Iterable[_V]] = _IDENTITY_FUNC, poolSize: int = cpu_count(),
bufferSize: Optional[int] = None) -> 'stream[_V]':
if not isinstance(poolSize, int) or poolSize <= 0 or poolSize > 2 ** 12:
raise ValueError("poolSize should be an integer between 1 and 2^12. Received: %s" % str(poolSize))
elif poolSize == 1:
return self.flatMap(predicate)
if bufferSize is None:
bufferSize = poolSize
if not isinstance(bufferSize, int) or bufferSize <= 0 or bufferSize > 2 ** 12:
raise ValueError("bufferSize should be an integer between 1 and 2^12. Received: %s" % str(poolSize))
return stream(lambda: self.__fastFlatMap_generator(predicate, poolSize, bufferSize))
def enumerate(self) -> 'stream[Tuple[int,_K]]':
return stream(zip(range(0, sys.maxsize), self))
def flatMap(self, predicate: Callable[[_K], Iterable[_V]] = _IDENTITY_FUNC) -> 'stream[_V]':
if id(predicate) == id(_IDENTITY_FUNC):
return stream(ItrFromFunc(lambda: itertools.chain.from_iterable(self)))
return stream(ItrFromFunc(lambda: itertools.chain.from_iterable(self.map(predicate))))
def filter(self, predicate: Optional[Callable[[_K], bool]] = None) -> 'stream[_K]':
return stream(ItrFromFunc(lambda: filter(predicate, self)))
def reversed(self) -> 'stream[_K]':
try:
return stream(self.__reversed__())
except AttributeError:
return stream(lambda: reversed(self.toList()))
def exists(self, f: Callable[[_K], bool]) -> bool:
for e in self:
if f(e):
return True
return False
def keyBy(self, keyfunc: Callable[[_K], _V] = _IDENTITY_FUNC) -> 'stream[Tuple[_K, _V]]':
return self.map(lambda h: (keyfunc(h), h))
def keystream(self: 'stream[Tuple[_T,_V]]') -> 'stream[_T]':
return self.map(itemgetter(0))
def values(self: 'stream[Tuple[_T,_V]]') -> 'stream[_V]':
return self.map(itemgetter(1))
def groupBy(self, keyfunc: Callable[[_K], _T] = _IDENTITY_FUNC) -> 'slist[Tuple[_T, slist[_K]]]':
h = defaultdict(slist)
for v in self:
h[keyfunc(v)].append(v)
return slist(h.items())
@staticmethod
def __stream_on_second_el(t: Tuple[_K, Iterable[_T]]) -> 'Tuple[_K, stream[_T]]':
return t[0], stream(t[1])
@staticmethod
def __slist_on_second_el(t: Tuple[_K, Iterable[_T]]) -> 'Tuple[_K, slist[_T]]':
return t[0], slist(t[1])
def groupBySorted(self, keyfunc: Optional[Callable[[_K], _T]] = None) -> 'stream[Tuple[_T, stream[_K]]]':
return stream(partial(groupby, iterable=self, key=keyfunc)).map(self.__stream_on_second_el)
def groupBySortedToList(self, keyfunc: Callable[[_K], _T] = _IDENTITY_FUNC) -> 'stream[Tuple[_T, slist[_K]]]':
return stream(partial(groupby, iterable=self, key=keyfunc)).map(self.__slist_on_second_el)
def countByValue(self) -> 'sdict[_K,int]':
return sdict(collections.Counter(self))
def distinct(self) -> 'stream[_K]':
return self.unique()
@overload
def reduce(self, f: Callable[[_K, _K], _K], init: Optional[_K] = None) -> _K:
...
@overload
def reduce(self, f: Callable[[_T, _K], _T], init: _T = None) -> _T:
...
@overload
def reduce(self, f: Callable[[Union[_K, _T], _K], _T], init: Optional[_T] = None) -> _T:
...
@overload
def reduce(self, f: Callable[[Union[_K, _T], _K], _T], init: Optional[_K] = None) -> _T:
...
@overload
def reduce(self, f: Callable[[_T, _K], _T], init: _T = None) -> _T:
...
def reduce(self, f, init=None):
if init is None:
return reduce(f, self)
else:
return reduce(f, self, init)
def transform(self, f: Callable[[Iterable[_K]], Iterable[_V]]) ->'stream[_V]':
return stream(partial(f, self))
def shuffle(self, random=None) -> 'slist[_K]':
lst = self.toList()
shuffle(lst, random=random)
return lst
def toSet(self) -> 'sset[_K]':
return sset(self)
def toList(self) -> 'slist[_K]':
return slist(self)
def sorted(self, key=None, reverse=False):
return slist(sorted(self, key=key, reverse=reverse))
def toMap(self: 'stream[Tuple[_T,_V]]') -> 'sdict[_T,_V]':
return sdict(self)
def toSumCounter(self: 'stream[Tuple[_T,_V]]') -> 'sdict[_T,_V]':
res = sdict()
for k, v in self:
if k in res:
res[k] += v
else:
res[k] = v
return res
def toJson(self) -> 'JsonList':
from pyxtension.Json import JsonList
return JsonList(self)
@overload
def __getitem__(self, i: slice) -> 'stream[_K]':
...
@overload
def __getitem__(self, i: int) -> _K:
...
def __getitem__(self, i: Union[slice, int]):
if isinstance(i, slice):
return self.__getslice(i.start, i.stop, i.step)
else:
tk = 0
while tk < i:
self.next()
tk += 1
return self.next()
def __getslice(self, start: Optional[int] = None,
stop: Optional[int] = None,
step: Optional[int] = None) -> 'stream[_K]':
return stream(lambda: itertools.islice(self, start, stop, step))
def __add__(self, other) -> 'stream[_K]':
if not isinstance(other, (ItrFromFunc, stream)):
othItr = stream(lambda: other)
else:
othItr = other
if isinstance(self._itr, (ItrFromFunc, stream)):
i = self._itr
elif self._itr is None:
i = ItrFromFunc(self._f)
else:
i = ItrFromFunc(lambda: self._itr)
return stream(partial(itertools.chain.from_iterable, (i, othItr)))
def __iadd__(self, other) -> 'stream[_K]':
if not isinstance(other, (ItrFromFunc, stream)):
othItr = stream(lambda: other)
else:
othItr = other
if isinstance(self._itr, ItrFromFunc):
i = self._itr
elif self._itr is None:
i = ItrFromFunc(self._f)
else:
j = self._itr
i = ItrFromFunc(lambda: j)
self._itr, self._f = self._init_itr(partial(itertools.chain.from_iterable, (i, othItr)))
return self
def size(self) -> int:
try:
return len(self)
except TypeError:
pass
return sum(1 for _ in iter(self))
def join(self, f: Callable[[_K], _V] = None) -> Union[_K, str]:
if f is None:
return ''.join(self)
elif isinstance(f, str):
return f.join(self)
else:
itr = iter(self)
r = next(itr)
last = r
while True:
try:
n = next(itr)
r += f(last)
last = n
r += n
except StopIteration:
break
return r
def mkString(self, c) -> str:
return self.join(c)
def batch(self, size: int) -> 'stream[_K]':
def batch_gen(itr):
while True:
batch = slist(itertools.islice(itr, 0, size))
if not batch:
break
yield batch
return stream(lambda: stream(batch_gen(iter(self))))
def take(self, n: int) -> 'stream[_K]':
def gen(other_gen: GeneratorType, n):
count = 0
while count < n:
if count < n:
try:
el = next(other_gen)
count += 1
yield el
except StopIteration:
break
other_gen.close()
if isinstance(self._itr, GeneratorType):
return stream(gen(self._itr, n))
else:
return self[:n]
def takeWhile(self, predicate: Callable[[_K], bool]) -> 'stream[_K]':
def gen(other_gen: Union[GeneratorType, Iterable[_K]], pred: Callable[[_K], bool]):
isGen = True
if not isinstance(other_gen, GeneratorType):
isGen = False
other_gen = iter(other_gen)
while True:
try:
el = next(other_gen)
if pred(el):
yield el
else:
break
except StopIteration:
break
if isGen: other_gen.close()
return stream(gen(self, predicate))
def dropWhile(self, predicate: Callable[[_K], bool]):
return stream(partial(itertools.dropwhile, predicate, self))
def next(self) -> _K:
if self._itr is not None:
try:
n = next(self._itr)
return n
except TypeError:
self._itr = iter(self._itr)
return next(self._itr)
else:
self._itr = iter(self)
self._f = None
return next(self._itr)
|
MIT License
|
rwl/muntjac
|
muntjac/ui/form.py
|
Form.addField
|
python
|
def addField(self, propertyId, field):
self.registerField(propertyId, field)
self.attachField(propertyId, field)
self.requestRepaint()
|
Registers the field with the form and adds the field to the
form layout.
The property id must not be already used in the form.
This field is added to the layout using the L{attachField} method.
@param propertyId:
the Property id the the field.
@param field:
the field which should be added to the form.
|
https://github.com/rwl/muntjac/blob/8db97712edd81b4d25deaaa48587d2a08010f2c8/muntjac/ui/form.py#L374-L389
|
from warnings import warn
from muntjac.data.item import IEditor, IItem
from muntjac.data.buffered import IBuffered, SourceException
from muntjac.data.validatable import IValidatable
from muntjac.data.validator import InvalidValueException
from muntjac.data.property import IValueChangeListener
from muntjac.event.action_manager import ActionManager
from muntjac.ui.abstract_field import AbstractField
from muntjac.ui.default_field_factory import DefaultFieldFactory
from muntjac.ui.abstract_component import AbstractComponent
from muntjac.ui.field import IField
from muntjac.ui.custom_layout import CustomLayout
from muntjac.ui.component_container import IComponentContainer
from muntjac.ui.grid_layout import GridLayout
from muntjac.ui.field_factory import IFieldFactory
from muntjac.ui.form_layout import FormLayout
from muntjac.ui.horizontal_layout import HorizontalLayout
from muntjac.ui.select import Select
from muntjac.event.action import INotifier
from muntjac.terminal.composite_error_message import CompositeErrorMessage
class Form(AbstractField, IEditor, IBuffered, IItem, IValidatable, INotifier):
CLIENT_WIDGET = None
def __init__(self, formLayout=None, fieldFactory=None):
self._propertyValue = None
self._layout = None
self._itemDatasource = None
self._propertyIds = list()
self._currentBufferedSourceException = None
self._writeThrough = True
self._readThrough = True
self._fields = dict()
self._ownProperties = dict()
self._fieldFactory = None
self._visibleItemProperties = None
self._fieldValueChangeListener = FieldValueChangeListener(self)
self._formFooter = None
self._validationVisibleOnCommit = True
self._gridlayoutCursorX = -1
self._gridlayoutCursorY = -1
self._ownActionManager = ActionManager(self)
if fieldFactory is None:
fieldFactory = DefaultFieldFactory.get()
super(Form, self).__init__()
self.setLayout(formLayout)
self.setFormFieldFactory(fieldFactory)
self.setValidationVisible(False)
self.setWidth(100, self.UNITS_PERCENTAGE)
def paintContent(self, target):
super(Form, self).paintContent(target)
self._layout.paint(target)
if self._formFooter is not None:
self._formFooter.paint(target)
if self._ownActionManager is not None:
self._ownActionManager.paintActions(None, target)
def changeVariables(self, source, variables):
super(Form, self).changeVariables(source, variables)
if self._ownActionManager is not None:
self._ownActionManager.handleActions(variables, self)
def getErrorMessage(self):
validationError = None
if self.isValidationVisible():
for i in self._propertyIds:
f = self._fields.get(i)
if isinstance(f, AbstractComponent):
field = f
validationError = field.getErrorMessage()
if validationError is not None:
if '' == str(validationError):
e = InvalidValueException(field.getCaption())
validationError = e
break
elif isinstance(f, IField) and not f.isValid():
e = InvalidValueException(field.getCaption())
validationError = e
break
if (self.getComponentError() is None and validationError is None
and self._currentBufferedSourceException is None):
return None
return CompositeErrorMessage([self.getComponentError(),
validationError, self._currentBufferedSourceException])
def setValidationVisibleOnCommit(self, makeVisible):
self._validationVisibleOnCommit = makeVisible
def isValidationVisibleOnCommit(self):
return self._validationVisibleOnCommit
def commit(self):
problems = None
if not self.isInvalidCommitted() and not self.isValid():
if self._validationVisibleOnCommit:
self.setValidationVisible(True)
self.validate()
for i in self._propertyIds:
try:
f = self._fields.get(i)
if not f.isReadOnly():
f.commit()
except SourceException, e:
if problems is None:
problems = list()
problems.append(e)
if problems is None:
if self._currentBufferedSourceException is not None:
self._currentBufferedSourceException = None
self.requestRepaint()
return
causes = [None] * len(problems)
index = 0
for i in problems:
causes[index] = i
index += 1
e = SourceException(self, causes)
self._currentBufferedSourceException = e
self.requestRepaint()
raise e
def discard(self):
problems = None
for i in self._propertyIds:
try:
self._fields.get(i).discard()
except SourceException, e:
if problems is None:
problems = list()
problems.append(e)
if problems is None:
if self._currentBufferedSourceException is not None:
self._currentBufferedSourceException = None
self.requestRepaint()
return
causes = [None] * len(problems)
index = 0
for i in problems:
causes[index] = i
index += 1
e = SourceException(self, causes)
self._currentBufferedSourceException = e
self.requestRepaint()
raise e
def isModified(self):
for i in self._propertyIds:
f = self._fields.get(i)
if f is not None and f.isModified():
return True
return False
def isReadThrough(self):
return self._readThrough
def isWriteThrough(self):
return self._writeThrough
def setReadThrough(self, readThrough):
if readThrough != self._readThrough:
self._readThrough = readThrough
for i in self._propertyIds:
self._fields.get(i).setReadThrough(readThrough)
def setWriteThrough(self, writeThrough):
if writeThrough != self._writeThrough:
self._writeThrough = writeThrough
for i in self._propertyIds:
self._fields.get(i).setWriteThrough(writeThrough)
def addItemProperty(self, idd, prop):
if (idd is None) or (prop is None):
raise ValueError, 'Id and property must be non-null'
if self._propertyIds.contains(idd):
return False
self._propertyIds.add(idd)
self._ownProperties[idd] = prop
field = self._fieldFactory.createField(self, idd, self)
if field is None:
return False
field.setPropertyDataSource(prop)
self.addField(idd, field)
return True
|
Apache License 2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.