repository_name
stringlengths 7
107
| function_path
stringlengths 4
190
| function_identifier
stringlengths 1
236
| language
stringclasses 1
value | function
stringlengths 9
647k
| docstring
stringlengths 5
488k
| function_url
stringlengths 71
285
| context
stringlengths 0
2.51M
| license
stringclasses 5
values |
---|---|---|---|---|---|---|---|---|
qarnot/qarnot-sdk-python
|
qarnot/bucket.py
|
Bucket.sync_files
|
python
|
def sync_files(self, files, verbose=False, remote=None):
class Comparable(object):
def __init__(self, name_, e_tag, filepath_):
self.name = name_
self.e_tag = e_tag
self.filepath = filepath_
def __repr__(self):
return "Name {0}, ETag {1}".format(self.name, self.e_tag)
def __eq__(self, other):
return self.name == other.name and self.e_tag == other.e_tag
def __hash__(self):
return hash(self.name) ^ hash(self.e_tag)
def aws_md5sum(sourcepath):
if os.stat(sourcepath).st_size < AWS_UPLOAD_MAX_SIZE:
hash_md5 = hashlib.md5()
with open(sourcepath, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return "\"{0}\"".format(hash_md5.hexdigest())
else:
md5s = []
with open(sourcepath, 'rb') as fp:
while True:
data = fp.read(AWS_UPLOAD_PART_SIZE)
if not data:
break
md5s.append(hashlib.md5(data))
digests = b"".join(m.digest() for m in md5s)
new_md5 = hashlib.md5(digests)
return "\"{0}-{1}\"".format(new_md5.hexdigest(), len(md5s))
def localtocomparable(name_, filepath_, remote):
if remote is not None:
name_ = os.path.join(remote, name_.lstrip('/'))
return Comparable(name_.replace(os.sep, '/'), aws_md5sum(filepath_), filepath_)
def objectsummarytocomparable(object_):
return Comparable(object_.key, object_.e_tag, None)
localfiles = set()
for name, filepath in files.items():
localfiles.add(localtocomparable(name.replace(os.path.sep, '/'), filepath, remote))
remotefiles = set(map(objectsummarytocomparable, self.list_files()))
adds = localfiles - remotefiles
removes = remotefiles - localfiles
for file_ in removes:
if remote is not None and not file_.name.startswith(remote):
continue
renames = (x for x in adds if x.e_tag == file_.e_tag and all(rem.name != x.name for rem in remotefiles))
for dup in renames:
if verbose:
print("Copy", file_.name, "to", dup.name)
self.copy_file(file_.name, dup.name)
if verbose:
print("Remove:", file_.name)
self.delete_file(file_.name)
remotefiles = set(map(objectsummarytocomparable, self.list_files()))
sadds = sorted(adds, key=lambda x: x.e_tag)
groupedadds = (list(g) for _, g in itertools.groupby(sadds, lambda x: x.e_tag))
for entry in groupedadds:
try:
rem = next(x for x in remotefiles if x.e_tag == entry[0].e_tag)
if rem.name == entry[0].name:
continue
if verbose:
print("Copy", rem.name, "to", entry[0].name)
self.copy_file(rem.name, entry[0].name)
except StopIteration:
if verbose:
print("Upload:", entry[0].filepath, '->', entry[0].name)
self.add_file(entry[0].filepath, entry[0].name)
for link in entry[1:]:
if verbose:
print("Copy", entry[0].name, "to", link.name)
self.copy_file(entry[0].name, link.name)
|
Synchronize files with the remote buckets.
:param dict files: Dictionary of synchronized files
:param bool verbose: Print information about synchronization operations
:param str remote: path of the directory on remote node (defaults to *local*)
Dictionary key is the remote file path while value is the local file
path.
.. warning::
Local changes are reflected on the server, a file present on the
bucket but
not in the local directory will be deleted from the bucket.
A file present in the directory but not in the bucket will be uploaded.
.. note::
The following parameters are used to determine whether
synchronization is required :
* name
* size
* sha1sum
|
https://github.com/qarnot/qarnot-sdk-python/blob/733b8b31fd016c7dba50cdd0e39c7434ea1bd4e9/qarnot/bucket.py#L231-L344
|
from __future__ import print_function
import hashlib
import sys
import os
import posixpath
import shutil
import itertools
from typing import Optional
from boto3.s3.transfer import TransferConfig
from itertools import groupby
from operator import attrgetter
from . import _util
from .exceptions import BucketStorageUnavailableException
from .storage import Storage
from .advanced_bucket import Filtering, ResourcesTransformation
AWS_UPLOAD_MAX_SIZE = 8 * 1024 * 1024
AWS_UPLOAD_PART_SIZE = 8 * 1024 * 1024
s3_multipart_config = TransferConfig(
multipart_threshold=AWS_UPLOAD_MAX_SIZE,
multipart_chunksize=AWS_UPLOAD_PART_SIZE,
max_concurrency=10,
num_download_attempts=10,
)
class Bucket(Storage):
def __init__(self, connection, name, create=True, filtering: Filtering = None, resources_transformation: ResourcesTransformation = None):
if connection.s3client is None:
raise BucketStorageUnavailableException()
self._connection = connection
self._uuid = name
self._filtering: Optional[Filtering] = filtering or Filtering()
self._resources_transformation: Optional[ResourcesTransformation] = resources_transformation or ResourcesTransformation()
if create:
self._connection.s3client.create_bucket(Bucket=name)
def to_json(self):
as_json_dict = dict()
as_json_dict['bucketName'] = self._uuid
as_json_dict['filtering'] = None if self._filtering is None else self._filtering.to_json()
as_json_dict['resourcesTransformation'] = (None if self._resources_transformation is None
else self._resources_transformation.to_json())
return as_json_dict
@classmethod
def from_json(cls, connection, json_bucket):
filtering = Filtering.from_json(json_bucket['filtering'])
resource_transformation = ResourcesTransformation.from_json(json_bucket['resourcesTransformation'])
bucket = Bucket(connection, json_bucket['bucketName'], create=False, filtering=filtering, resources_transformation=resource_transformation)
return bucket
def with_filtering(self, filtering):
bucket_copy = Bucket(self._connection, self._uuid,
create=False, filtering=Filtering(), resources_transformation=self._resources_transformation)
bucket_copy._filtering.append(filtering)
return bucket_copy
def with_resource_transformation(self, resource):
bucket_copy = Bucket(self._connection, self._uuid,
create=False, filtering=self._filtering, resources_transformation=ResourcesTransformation())
bucket_copy._resources_transformation.append(resource)
return bucket_copy
@classmethod
def _retrieve(cls, connection, bucket_uuid):
return connection.retrieve_bucket(uuid=bucket_uuid)
def delete(self):
n = 1000
bucket = self._connection.s3resource.Bucket(self._uuid)
versioned_bucket = self._connection.s3resource.BucketVersioning(self._uuid)
if versioned_bucket.status == 'None':
objectlist = list(bucket.objects.all())
if sys.version_info >= (3, 0):
listofobjectlist = [[{'Key': x.key} for x in objectlist[i:i + n]] for i in range(0, len(objectlist), n)]
else:
listofobjectlist = [[{'Key': x.key} for x in objectlist[i:i + n]] for i in xrange(0, len(objectlist), n)]
else:
objectlist = list(bucket.object_versions.all())
if sys.version_info >= (3, 0):
listofobjectlist = [[{'Key': x.key, 'VersionId': x.id} for x in objectlist[i:i + n]] for i in range(0, len(objectlist), n)]
else:
listofobjectlist = [[{'Key': x.key, 'VersionId': x.id} for x in objectlist[i:i + n]] for i in xrange(0, len(objectlist), n)]
for item in listofobjectlist:
bucket.delete_objects(
Delete={
'Objects': item
}
)
self._connection.s3client.delete_bucket(Bucket=self._uuid)
def list_files(self):
bucket = self._connection.s3resource.Bucket(self._uuid)
return bucket.objects.all()
def directory(self, directory=''):
bucket = self._connection.s3resource.Bucket(self._uuid)
return bucket.objects.filter(Prefix=directory)
def sync_directory(self, directory, verbose=False, remote=None):
if not directory.endswith(os.sep):
directory += os.sep
filesdict = {}
for root, _, files in os.walk(directory):
root = _util.decode(root)
files = map(_util.decode, files)
for file_ in files:
filepath = os.path.join(root, file_)
name = filepath[len(directory):]
name = name.replace(os.sep, '/')
filesdict[name] = filepath
self.sync_files(filesdict, verbose, remote)
|
Apache License 2.0
|
michaelhly/solana-py
|
src/spl/token/instructions.py
|
get_associated_token_address
|
python
|
def get_associated_token_address(owner: PublicKey, mint: PublicKey) -> PublicKey:
key, _ = PublicKey.find_program_address(
seeds=[bytes(owner), bytes(TOKEN_PROGRAM_ID), bytes(mint)], program_id=ASSOCIATED_TOKEN_PROGRAM_ID
)
return key
|
Derives the associated token address for the given wallet address and token mint.
|
https://github.com/michaelhly/solana-py/blob/b3855f39f25bd632f78c9f33c4bcd15a574b3320/src/spl/token/instructions.py#L925-L930
|
from enum import IntEnum
from typing import Any, List, NamedTuple, Optional, Union
from solana.publickey import PublicKey
from solana.system_program import SYS_PROGRAM_ID
from solana.sysvar import SYSVAR_RENT_PUBKEY
from solana.transaction import AccountMeta, TransactionInstruction
from solana.utils.validate import validate_instruction_keys, validate_instruction_type
from spl.token._layouts import INSTRUCTIONS_LAYOUT, InstructionType
from spl.token.constants import ASSOCIATED_TOKEN_PROGRAM_ID, TOKEN_PROGRAM_ID
class AuthorityType(IntEnum):
MINT_TOKENS = 0
FREEZE_ACCOUNT = 1
ACCOUNT_OWNER = 2
CLOSE_ACCOUNT = 3
class InitializeMintParams(NamedTuple):
decimals: int
program_id: PublicKey
mint: PublicKey
mint_authority: PublicKey
freeze_authority: Optional[PublicKey] = None
class InitializeAccountParams(NamedTuple):
program_id: PublicKey
account: PublicKey
mint: PublicKey
owner: PublicKey
class InitializeMultisigParams(NamedTuple):
program_id: PublicKey
multisig: PublicKey
m: int
signers: List[PublicKey] = []
class TransferParams(NamedTuple):
program_id: PublicKey
source: PublicKey
dest: PublicKey
owner: PublicKey
amount: int
signers: List[PublicKey] = []
class ApproveParams(NamedTuple):
program_id: PublicKey
source: PublicKey
delegate: PublicKey
owner: PublicKey
amount: int
signers: List[PublicKey] = []
class RevokeParams(NamedTuple):
program_id: PublicKey
account: PublicKey
owner: PublicKey
signers: List[PublicKey] = []
class SetAuthorityParams(NamedTuple):
program_id: PublicKey
account: PublicKey
authority: AuthorityType
current_authority: PublicKey
signers: List[PublicKey] = []
new_authority: Optional[PublicKey] = None
class MintToParams(NamedTuple):
program_id: PublicKey
mint: PublicKey
dest: PublicKey
mint_authority: PublicKey
amount: int
signers: List[PublicKey] = []
class BurnParams(NamedTuple):
program_id: PublicKey
account: PublicKey
mint: PublicKey
owner: PublicKey
amount: int
signers: List[PublicKey] = []
class CloseAccountParams(NamedTuple):
program_id: PublicKey
account: PublicKey
dest: PublicKey
owner: PublicKey
signers: List[PublicKey] = []
class FreezeAccountParams(NamedTuple):
program_id: PublicKey
account: PublicKey
mint: PublicKey
authority: PublicKey
multi_signers: List[PublicKey] = []
class ThawAccountParams(NamedTuple):
program_id: PublicKey
account: PublicKey
mint: PublicKey
authority: PublicKey
multi_signers: List[PublicKey] = []
class TransferCheckedParams(NamedTuple):
program_id: PublicKey
source: PublicKey
mint: PublicKey
dest: PublicKey
owner: PublicKey
amount: int
decimals: int
signers: List[PublicKey] = []
class ApproveCheckedParams(NamedTuple):
program_id: PublicKey
source: PublicKey
mint: PublicKey
delegate: PublicKey
owner: PublicKey
amount: int
decimals: int
signers: List[PublicKey] = []
class MintToCheckedParams(NamedTuple):
program_id: PublicKey
mint: PublicKey
dest: PublicKey
mint_authority: PublicKey
amount: int
decimals: int
signers: List[PublicKey] = []
class BurnCheckedParams(NamedTuple):
program_id: PublicKey
mint: PublicKey
account: PublicKey
owner: PublicKey
amount: int
decimals: int
signers: List[PublicKey] = []
def __parse_and_validate_instruction(
instruction: TransactionInstruction,
expected_keys: int,
expected_type: InstructionType,
) -> Any:
validate_instruction_keys(instruction, expected_keys)
data = INSTRUCTIONS_LAYOUT.parse(instruction.data)
validate_instruction_type(data, expected_type)
return data
def decode_initialize_mint(instruction: TransactionInstruction) -> InitializeMintParams:
parsed_data = __parse_and_validate_instruction(instruction, 2, InstructionType.INITIALIZE_MINT)
return InitializeMintParams(
decimals=parsed_data.args.decimals,
program_id=instruction.program_id,
mint=instruction.keys[0].pubkey,
mint_authority=PublicKey(parsed_data.args.mint_authority),
freeze_authority=PublicKey(parsed_data.args.freeze_authority)
if parsed_data.args.freeze_authority_option
else None,
)
def decode_initialize_account(instruction: TransactionInstruction) -> InitializeAccountParams:
_ = __parse_and_validate_instruction(instruction, 4, InstructionType.INITIALIZE_ACCOUNT)
return InitializeAccountParams(
program_id=instruction.program_id,
account=instruction.keys[0].pubkey,
mint=instruction.keys[1].pubkey,
owner=instruction.keys[2].pubkey,
)
def decode_initialize_multisig(instruction: TransactionInstruction) -> InitializeMultisigParams:
parsed_data = __parse_and_validate_instruction(instruction, 2, InstructionType.INITIALIZE_MULTISIG)
num_signers = parsed_data.args.m
validate_instruction_keys(instruction, 2 + num_signers)
return InitializeMultisigParams(
program_id=instruction.program_id,
multisig=instruction.keys[0].pubkey,
signers=[signer.pubkey for signer in instruction.keys[-num_signers:]],
m=num_signers,
)
def decode_transfer(instruction: TransactionInstruction) -> TransferParams:
parsed_data = __parse_and_validate_instruction(instruction, 3, InstructionType.TRANSFER)
return TransferParams(
program_id=instruction.program_id,
source=instruction.keys[0].pubkey,
dest=instruction.keys[1].pubkey,
owner=instruction.keys[2].pubkey,
signers=[signer.pubkey for signer in instruction.keys[3:]],
amount=parsed_data.args.amount,
)
def decode_approve(instruction: TransactionInstruction) -> ApproveParams:
parsed_data = __parse_and_validate_instruction(instruction, 3, InstructionType.APPROVE)
return ApproveParams(
program_id=instruction.program_id,
source=instruction.keys[0].pubkey,
delegate=instruction.keys[1].pubkey,
owner=instruction.keys[2].pubkey,
signers=[signer.pubkey for signer in instruction.keys[3:]],
amount=parsed_data.args.amount,
)
def decode_revoke(instruction: TransactionInstruction) -> RevokeParams:
_ = __parse_and_validate_instruction(instruction, 2, InstructionType.REVOKE)
return RevokeParams(
program_id=instruction.program_id,
account=instruction.keys[0].pubkey,
owner=instruction.keys[1].pubkey,
signers=[signer.pubkey for signer in instruction.keys[2:]],
)
def decode_set_authority(instruction: TransactionInstruction) -> SetAuthorityParams:
parsed_data = __parse_and_validate_instruction(instruction, 2, InstructionType.SET_AUTHORITY)
return SetAuthorityParams(
program_id=instruction.program_id,
account=instruction.keys[0].pubkey,
authority=AuthorityType(parsed_data.args.authority_type),
new_authority=PublicKey(parsed_data.args.new_authority) if parsed_data.args.new_authority_option else None,
current_authority=instruction.keys[1].pubkey,
signers=[signer.pubkey for signer in instruction.keys[2:]],
)
def decode_mint_to(instruction: TransactionInstruction) -> MintToParams:
parsed_data = __parse_and_validate_instruction(instruction, 3, InstructionType.MINT_TO)
return MintToParams(
program_id=instruction.program_id,
amount=parsed_data.args.amount,
mint=instruction.keys[0].pubkey,
dest=instruction.keys[1].pubkey,
mint_authority=instruction.keys[2].pubkey,
signers=[signer.pubkey for signer in instruction.keys[3:]],
)
def decode_burn(instruction: TransactionInstruction) -> BurnParams:
parsed_data = __parse_and_validate_instruction(instruction, 3, InstructionType.BURN)
return BurnParams(
program_id=instruction.program_id,
amount=parsed_data.args.amount,
account=instruction.keys[0].pubkey,
mint=instruction.keys[1].pubkey,
owner=instruction.keys[2].pubkey,
signers=[signer.pubkey for signer in instruction.keys[3:]],
)
def decode_close_account(instruction: TransactionInstruction) -> CloseAccountParams:
_ = __parse_and_validate_instruction(instruction, 3, InstructionType.CLOSE_ACCOUNT)
return CloseAccountParams(
program_id=instruction.program_id,
account=instruction.keys[0].pubkey,
dest=instruction.keys[1].pubkey,
owner=instruction.keys[2].pubkey,
signers=[signer.pubkey for signer in instruction.keys[3:]],
)
def decode_freeze_account(instruction: TransactionInstruction) -> FreezeAccountParams:
_ = __parse_and_validate_instruction(instruction, 3, InstructionType.FREEZE_ACCOUNT)
return FreezeAccountParams(
program_id=instruction.program_id,
account=instruction.keys[0].pubkey,
mint=instruction.keys[1].pubkey,
authority=instruction.keys[2].pubkey,
multi_signers=[signer.pubkey for signer in instruction.keys[3:]],
)
def decode_thaw_account(instruction: TransactionInstruction) -> ThawAccountParams:
_ = __parse_and_validate_instruction(instruction, 3, InstructionType.THAW_ACCOUNT)
return ThawAccountParams(
program_id=instruction.program_id,
account=instruction.keys[0].pubkey,
mint=instruction.keys[1].pubkey,
authority=instruction.keys[2].pubkey,
multi_signers=[signer.pubkey for signer in instruction.keys[3:]],
)
def decode_transfer_checked(instruction: TransactionInstruction) -> TransferCheckedParams:
parsed_data = __parse_and_validate_instruction(instruction, 4, InstructionType.TRANSFER2)
return TransferCheckedParams(
program_id=instruction.program_id,
amount=parsed_data.args.amount,
decimals=parsed_data.args.decimals,
source=instruction.keys[0].pubkey,
mint=instruction.keys[1].pubkey,
dest=instruction.keys[2].pubkey,
owner=instruction.keys[3].pubkey,
signers=[signer.pubkey for signer in instruction.keys[4:]],
)
def decode_approve_checked(instruction: TransactionInstruction) -> ApproveCheckedParams:
parsed_data = __parse_and_validate_instruction(instruction, 4, InstructionType.APPROVE2)
return ApproveCheckedParams(
program_id=instruction.program_id,
amount=parsed_data.args.amount,
decimals=parsed_data.args.decimals,
source=instruction.keys[0].pubkey,
mint=instruction.keys[1].pubkey,
delegate=instruction.keys[2].pubkey,
owner=instruction.keys[3].pubkey,
signers=[signer.pubkey for signer in instruction.keys[4:]],
)
def decode_mint_to_checked(instruction: TransactionInstruction) -> MintToCheckedParams:
parsed_data = __parse_and_validate_instruction(instruction, 3, InstructionType.MINT_TO2)
return MintToCheckedParams(
program_id=instruction.program_id,
amount=parsed_data.args.amount,
decimals=parsed_data.args.decimals,
mint=instruction.keys[0].pubkey,
dest=instruction.keys[1].pubkey,
mint_authority=instruction.keys[2].pubkey,
signers=[signer.pubkey for signer in instruction.keys[3:]],
)
def decode_burn_checked(instruction: TransactionInstruction) -> BurnCheckedParams:
parsed_data = __parse_and_validate_instruction(instruction, 3, InstructionType.BURN2)
return BurnCheckedParams(
program_id=instruction.program_id,
amount=parsed_data.args.amount,
decimals=parsed_data.args.decimals,
account=instruction.keys[0].pubkey,
mint=instruction.keys[1].pubkey,
owner=instruction.keys[2].pubkey,
signers=[signer.pubkey for signer in instruction.keys[3:]],
)
def __add_signers(keys: List[AccountMeta], owner: PublicKey, signers: List[PublicKey]) -> None:
if signers:
keys.append(AccountMeta(pubkey=owner, is_signer=False, is_writable=False))
for signer in signers:
keys.append(AccountMeta(pubkey=signer, is_signer=True, is_writable=False))
else:
keys.append(AccountMeta(pubkey=owner, is_signer=True, is_writable=False))
def __burn_instruction(params: Union[BurnParams, BurnCheckedParams], data: Any) -> TransactionInstruction:
keys = [
AccountMeta(pubkey=params.account, is_signer=False, is_writable=True),
AccountMeta(pubkey=params.mint, is_signer=False, is_writable=True),
]
__add_signers(keys, params.owner, params.signers)
return TransactionInstruction(keys=keys, program_id=params.program_id, data=data)
def __freeze_or_thaw_instruction(
params: Union[FreezeAccountParams, ThawAccountParams], instruction_type: InstructionType
) -> TransactionInstruction:
data = INSTRUCTIONS_LAYOUT.build(dict(instruction_type=instruction_type, args=None))
keys = [
AccountMeta(pubkey=params.account, is_signer=False, is_writable=True),
AccountMeta(pubkey=params.mint, is_signer=False, is_writable=False),
]
__add_signers(keys, params.authority, params.multi_signers)
return TransactionInstruction(keys=keys, program_id=params.program_id, data=data)
def __mint_to_instruction(params: Union[MintToParams, MintToCheckedParams], data: Any) -> TransactionInstruction:
keys = [
AccountMeta(pubkey=params.mint, is_signer=False, is_writable=True),
AccountMeta(pubkey=params.dest, is_signer=False, is_writable=True),
]
__add_signers(keys, params.mint_authority, params.signers)
return TransactionInstruction(keys=keys, program_id=params.program_id, data=data)
def initialize_mint(params: InitializeMintParams) -> TransactionInstruction:
freeze_authority, opt = (params.freeze_authority, 1) if params.freeze_authority else (PublicKey(0), 0)
data = INSTRUCTIONS_LAYOUT.build(
dict(
instruction_type=InstructionType.INITIALIZE_MINT,
args=dict(
decimals=params.decimals,
mint_authority=bytes(params.mint_authority),
freeze_authority_option=opt,
freeze_authority=bytes(freeze_authority),
),
)
)
return TransactionInstruction(
keys=[
AccountMeta(pubkey=params.mint, is_signer=False, is_writable=True),
AccountMeta(pubkey=SYSVAR_RENT_PUBKEY, is_signer=False, is_writable=False),
],
program_id=params.program_id,
data=data,
)
def initialize_account(params: InitializeAccountParams) -> TransactionInstruction:
data = INSTRUCTIONS_LAYOUT.build(dict(instruction_type=InstructionType.INITIALIZE_ACCOUNT, args=None))
return TransactionInstruction(
keys=[
AccountMeta(pubkey=params.account, is_signer=False, is_writable=True),
AccountMeta(pubkey=params.mint, is_signer=False, is_writable=False),
AccountMeta(pubkey=params.owner, is_signer=False, is_writable=False),
AccountMeta(pubkey=SYSVAR_RENT_PUBKEY, is_signer=False, is_writable=False),
],
program_id=params.program_id,
data=data,
)
def initialize_multisig(params: InitializeMultisigParams) -> TransactionInstruction:
data = INSTRUCTIONS_LAYOUT.build(dict(instruction_type=InstructionType.INITIALIZE_MULTISIG, args=dict(m=params.m)))
keys = [
AccountMeta(pubkey=params.multisig, is_signer=False, is_writable=True),
AccountMeta(pubkey=SYSVAR_RENT_PUBKEY, is_signer=False, is_writable=False),
]
for signer in params.signers:
keys.append(AccountMeta(pubkey=signer, is_signer=False, is_writable=False))
return TransactionInstruction(keys=keys, program_id=params.program_id, data=data)
def transfer(params: TransferParams) -> TransactionInstruction:
data = INSTRUCTIONS_LAYOUT.build(dict(instruction_type=InstructionType.TRANSFER, args=dict(amount=params.amount)))
keys = [
AccountMeta(pubkey=params.source, is_signer=False, is_writable=True),
AccountMeta(pubkey=params.dest, is_signer=False, is_writable=True),
]
__add_signers(keys, params.owner, params.signers)
return TransactionInstruction(keys=keys, program_id=params.program_id, data=data)
def approve(params: ApproveParams) -> TransactionInstruction:
data = INSTRUCTIONS_LAYOUT.build(dict(instruction_type=InstructionType.APPROVE, args=dict(amount=params.amount)))
keys = [
AccountMeta(pubkey=params.source, is_signer=False, is_writable=True),
AccountMeta(pubkey=params.delegate, is_signer=False, is_writable=False),
]
__add_signers(keys, params.owner, params.signers)
return TransactionInstruction(keys=keys, program_id=params.program_id, data=data)
def revoke(params: RevokeParams) -> TransactionInstruction:
data = INSTRUCTIONS_LAYOUT.build(dict(instruction_type=InstructionType.REVOKE, args=None))
keys = [AccountMeta(pubkey=params.account, is_signer=False, is_writable=True)]
__add_signers(keys, params.owner, params.signers)
return TransactionInstruction(keys=keys, program_id=params.program_id, data=data)
def set_authority(params: SetAuthorityParams) -> TransactionInstruction:
new_authority, opt = (params.new_authority, 1) if params.new_authority else (PublicKey(0), 0)
data = INSTRUCTIONS_LAYOUT.build(
dict(
instruction_type=InstructionType.SET_AUTHORITY,
args=dict(authority_type=params.authority, new_authority_option=opt, new_authority=bytes(new_authority)),
)
)
keys = [AccountMeta(pubkey=params.account, is_signer=False, is_writable=True)]
__add_signers(keys, params.current_authority, params.signers)
return TransactionInstruction(keys=keys, program_id=params.program_id, data=data)
def mint_to(params: MintToParams) -> TransactionInstruction:
data = INSTRUCTIONS_LAYOUT.build(dict(instruction_type=InstructionType.MINT_TO, args=dict(amount=params.amount)))
return __mint_to_instruction(params, data)
def burn(params: BurnParams) -> TransactionInstruction:
data = INSTRUCTIONS_LAYOUT.build(dict(instruction_type=InstructionType.BURN, args=dict(amount=params.amount)))
return __burn_instruction(params, data)
def close_account(params: CloseAccountParams) -> TransactionInstruction:
data = INSTRUCTIONS_LAYOUT.build(dict(instruction_type=InstructionType.CLOSE_ACCOUNT, args=None))
keys = [
AccountMeta(pubkey=params.account, is_signer=False, is_writable=True),
AccountMeta(pubkey=params.dest, is_signer=False, is_writable=True),
]
__add_signers(keys, params.owner, params.signers)
return TransactionInstruction(keys=keys, program_id=params.program_id, data=data)
def freeze_account(params: FreezeAccountParams) -> TransactionInstruction:
return __freeze_or_thaw_instruction(params, InstructionType.FREEZE_ACCOUNT)
def thaw_account(params: ThawAccountParams) -> TransactionInstruction:
return __freeze_or_thaw_instruction(params, InstructionType.THAW_ACCOUNT)
def transfer_checked(params: TransferCheckedParams) -> TransactionInstruction:
data = INSTRUCTIONS_LAYOUT.build(
dict(instruction_type=InstructionType.TRANSFER2, args=dict(amount=params.amount, decimals=params.decimals))
)
keys = [
AccountMeta(pubkey=params.source, is_signer=False, is_writable=True),
AccountMeta(pubkey=params.mint, is_signer=False, is_writable=False),
AccountMeta(pubkey=params.dest, is_signer=False, is_writable=True),
]
__add_signers(keys, params.owner, params.signers)
return TransactionInstruction(keys=keys, program_id=params.program_id, data=data)
def approve_checked(params: ApproveCheckedParams) -> TransactionInstruction:
data = INSTRUCTIONS_LAYOUT.build(
dict(instruction_type=InstructionType.APPROVE2, args=dict(amount=params.amount, decimals=params.decimals))
)
keys = [
AccountMeta(pubkey=params.source, is_signer=False, is_writable=True),
AccountMeta(pubkey=params.mint, is_signer=False, is_writable=False),
AccountMeta(pubkey=params.delegate, is_signer=False, is_writable=False),
]
__add_signers(keys, params.owner, params.signers)
return TransactionInstruction(keys=keys, program_id=params.program_id, data=data)
def mint_to_checked(params: MintToCheckedParams) -> TransactionInstruction:
data = INSTRUCTIONS_LAYOUT.build(
dict(instruction_type=InstructionType.MINT_TO2, args=dict(amount=params.amount, decimals=params.decimals))
)
return __mint_to_instruction(params, data)
def burn_checked(params: BurnCheckedParams) -> TransactionInstruction:
data = INSTRUCTIONS_LAYOUT.build(
dict(instruction_type=InstructionType.BURN2, args=dict(amount=params.amount, decimals=params.decimals))
)
return __burn_instruction(params, data)
|
MIT License
|
zmk5/ros2_robotarium
|
robotarium_node/robotarium_node/robotarium_abc.py
|
RobotariumABC.set_velocities
|
python
|
def set_velocities(self, ids, velocities) -> None:
idxs = np.where(np.abs(velocities[0, :]) > self.max_linear_velocity)
velocities[0, idxs] = self.max_linear_velocity * np.sign(velocities[0, idxs])
idxs = np.where(np.abs(velocities[1, :]) > self.max_angular_velocity)
velocities[1, idxs] = self.max_angular_velocity * np.sign(velocities[1, idxs])
self.velocities = velocities
|
Set the velocities of the robots.
|
https://github.com/zmk5/ros2_robotarium/blob/6fc31bb6d3cdd89cf0b0b063d674897287ec1f8d/robotarium_node/robotarium_node/robotarium_abc.py#L149-L158
|
from typing import Dict
from abc import ABC, abstractmethod
import numpy as np
import robotarium_node.utilities.misc as misc
class RobotariumABC(ABC):
def __init__(
self,
number_of_robots: int = -1,
show_figure: bool = True,
sim_in_real_time: bool = True,
initial_conditions: np.ndarray = np.array([])) -> None:
assert isinstance(number_of_robots, int), f'The number of robots used argument (number_of_robots) provided to create the Robotarium object must be an integer type. Recieved type {type(number_of_robots).__name__}.'
assert isinstance(initial_conditions, np.ndarray), f'The initial conditions array argument (initial_conditions) provided to create the Robotarium object must be a numpy ndarray. Recieved type {type(initial_conditions).__name__}.'
assert isinstance(show_figure, bool), f'The display figure window argument (show_figure) provided to create the Robotarium object must be boolean type. Recieved type {type(show_figure).__name__}.'
assert isinstance(sim_in_real_time, bool), f'The simulation running at 0.033s per loop (sim_real_time) provided to create the Robotarium object must be boolean type. Recieved type {type(show_figure).__name__}.'
assert (0 <= number_of_robots <= 50), f'Requested {number_of_robots} robots to be used when creating the Robotarium object. The deployed number of robots must be between 0 and 50.'
if (initial_conditions.size > 0):
assert initial_conditions.shape == (3, number_of_robots), f'Initial conditions provided when creating the Robotarium object must of size 3xN, where N is the number of robots used. Expected a 3 x {number_of_robots} array but recieved a {initial_conditions.shape[0]} x {initial_conditions.shape[1]} array.'
self.number_of_robots = number_of_robots
self.show_figure = show_figure
self.initial_conditions = initial_conditions
self.boundaries = [-1.6, -1, 3.2, 2]
self.file_path = None
self.current_file_size = 0
self.time_step = 0.033
self.robot_diameter = 0.11
self.wheel_radius = 0.016
self.base_length = 0.105
self.max_linear_velocity = 0.2
self.max_angular_velocity = 2 * (
(self.wheel_radius / self.robot_diameter) *
(self.max_linear_velocity / self.wheel_radius))
self.max_wheel_velocity = self.max_linear_velocity / self.wheel_radius
self.robot_radius = self.robot_diameter/2
self.velocities = np.zeros((2, number_of_robots))
self.poses = self.initial_conditions
if self.initial_conditions.size == 0:
self.poses = misc.generate_initial_conditions(
self.number_of_robots, spacing=0.2, width=2.5, height=1.5)
self.left_led_commands = []
self.right_led_commands = []
""" # Visualization
self.figure = []
self.axes = []
self.left_led_patches = []
self.right_led_patches = []
self.chassis_patches = []
self.right_wheel_patches = []
self.left_wheel_patches = []
if self.show_figure:
self.figure, self.axes = plt.subplots()
self.axes.set_axis_off()
for i in range(number_of_robots):
p = patches.RegularPolygon(
self.poses[:2, i], 4, math.sqrt(2) * self.robot_radius,
self.poses[2, i] + math.pi / 4,
facecolor='#FFD700', edgecolor='k')
rled = patches.Circle(
self.poses[:2, i] + 0.75 * self.robot_radius *
np.array((np.cos(self.poses[2, i]), np.sin(self.poses[2, i])) +
0.04 * np.array((-np.sin(self.poses[2, i] + math.pi / 2), np.cos(self.poses[2, i]+math.pi/2)))),
self.robot_radius / 5, fill=False)
lled = patches.Circle(
self.poses[:2, i] + 0.75 * self.robot_radius *
np.array((np.cos(self.poses[2, i]), np.sin(self.poses[2, i])) + 0.015 *
np.array((-np.sin(self.poses[2, i] + math.pi / 2), np.cos(self.poses[2, i] + math.pi / 2)))),
self.robot_radius / 5, fill=False)
rw = patches.Circle(
self.poses[:2, i] + self.robot_radius * np.array((np.cos(self.poses[2, i] + math.pi / 2), np.sin(self.poses[2, i] + math.pi / 2))) +
0.04 * np.array((-np.sin(self.poses[2, i] + math.pi / 2), np.cos(self.poses[2, i] + math.pi / 2))),
0.02, facecolor='k')
lw = patches.Circle(
self.poses[:2, i]+self.robot_radius*np.array((np.cos(self.poses[2, i] - math.pi / 2), np.sin(self.poses[2, i] - math.pi / 2))) +
0.04 * np.array((-np.sin(self.poses[2, i] + math.pi / 2))),
0.02, facecolor='k')
#lw = patches.RegularPolygon(self.poses[:2, i]+self.robot_radius*np.array((np.cos(self.poses[2, i]-math.pi/2), np.sin(self.poses[2, i]-math.pi/2)))+\
# 0.035*np.array((-np.sin(self.poses[2, i]+math.pi/2), np.cos(self.poses[2, i]+math.pi/2))),\
# 4, math.sqrt(2)*0.02, self.poses[2,i]+math.pi/4, facecolor='k')
self.chassis_patches.append(p)
self.left_led_patches.append(lled)
self.right_led_patches.append(rled)
self.right_wheel_patches.append(rw)
self.left_wheel_patches.append(lw)
self.axes.add_patch(rw)
self.axes.add_patch(lw)
self.axes.add_patch(p)
self.axes.add_patch(lled)
self.axes.add_patch(rled)
# Draw arena
self.boundary_patch = self.axes.add_patch(
patches.Rectangle(
self.boundaries[:2],
self.boundaries[2],
self.boundaries[3],
fill=False))
self.axes.set_xlim(
self.boundaries[0] - 0.1,
self.boundaries[0] + self.boundaries[2] + 0.1)
self.axes.set_ylim(
self.boundaries[1] - 0.1,
self.boundaries[1] + self.boundaries[3] + 0.1)
plt.ion()
plt.show()
plt.subplots_adjust(
left=-0.03, right=1.03, bottom=-0.03, top=1.03, wspace=0,
hspace=0) """
|
MIT License
|
luhsra/chash
|
experiments/lib.py
|
read_hash_directory
|
python
|
def read_hash_directory(hash_dir, remove_keys = []):
ret = []
for root, dirnames, filenames in os.walk(hash_dir):
for filename in fnmatch.filter(filenames, '*.info'):
with open(os.path.join(root, filename)) as fd:
data = "[%s]" % (",".join(fd.readlines()))
data = eval(data)
for record in data:
for key in remove_keys:
del record[key]
ret.extend(data)
print len(ret)
return ret
|
Read in all records from a hash dir
|
https://github.com/luhsra/chash/blob/12b1afdc17533ac48285474d9daaf92eb122e426/experiments/lib.py#L8-L22
|
import os
import fnmatch
import time
from versuchung.execute import shell, CommandFailed, shell_failok
import logging
import tempfile
|
Apache License 2.0
|
workshoft/capablanca-api
|
api/models.py
|
Board.from_fen
|
python
|
def from_fen(cls, fen):
board = chess.Board(fen)
board_data = {
"fen": fen,
"turn": board.turn,
"castling_xfen": board.castling_xfen(),
"castling_rights": board.castling_rights,
"ep_square": board.ep_square,
"fullmove_number": board.fullmove_number,
"halfmove_clock": board.halfmove_clock,
}
return cls(**board_data)
|
A FEN string contains the position part board_fen(), the turn, the castling part (castling_rights),
the en passant square (ep_square), the halfmove_clock and the fullmove_number.
|
https://github.com/workshoft/capablanca-api/blob/28b81b164dee79d13299d7569c5382e1af4e2c91/api/models.py#L158-L176
|
import uuid
import chess
from annoying.fields import AutoOneToOneField
from django.conf import settings
from django.db.models import (
CASCADE,
BooleanField,
CharField,
DateTimeField,
ForeignKey,
IntegerField,
Model,
OneToOneField,
TextField,
UUIDField,
)
class Elo(Model):
rating = IntegerField(default=1200)
previous_rating = IntegerField(default=1200)
wins = IntegerField(default=0)
losses = IntegerField(default=0)
draws = IntegerField(default=0)
updated_at = DateTimeField(auto_now=True)
uuid = UUIDField(default=uuid.uuid4)
player = AutoOneToOneField(
settings.AUTH_USER_MODEL, on_delete=CASCADE, null=True, related_name="elo"
)
def update_rating(self, new_rating):
self.previous_rating = self.rating
self.rating = new_rating
self.save()
class Result(Model):
WHITE_WINS = "White wins"
BLACK_WINS = "Black wins"
DRAW = "Draw"
IN_PROGRESS = "In progress"
ABANDONED = "Abandoned"
ADJUDICATION = "Adjudication"
DEATH = "Death"
EMERGENCY = "Emergency"
NORMAL = "Normal"
RULES_INFRACTION = "Rules infraction"
TIME_FORFEIT = "Time forfeit"
UNTERMINATED = "Unterminated"
RESULT_CHOICES = [
(WHITE_WINS, "White wins"),
(BLACK_WINS, "Black wins"),
(DRAW, "Drawn game"),
(
IN_PROGRESS,
"Game still in progress, game abandoned, or result otherwise unknown",
),
]
TERMINATION_CHOICES = [
(ABANDONED, "Abandoned game."),
(ADJUDICATION, "Result due to third party adjudication process."),
(DEATH, "One or both players died during the course of this game."),
(EMERGENCY, "Game concluded due to unforeseen circumstances."),
(NORMAL, "Game terminated in a normal fashion."),
(
RULES_INFRACTION,
"Administrative forfeit due to losing player's failure to observe either the Laws of Chess or the event regulations.",
),
(
TIME_FORFEIT,
"Loss due to losing player's failure to meet time control requirements.",
),
(UNTERMINATED, "Game not terminated."),
]
result = TextField(choices=RESULT_CHOICES, default=IN_PROGRESS,)
termination = TextField(choices=TERMINATION_CHOICES, default=UNTERMINATED,)
def __str__(self):
return self.result
class Board(Model):
BLACK_PIECES = ["q", "k", "b", "n", "r", "p"]
WHITE_PIECES = [p.upper() for p in BLACK_PIECES]
fen = TextField(default="rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1")
board_fen = TextField(default="rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR")
board_fen_flipped = TextField(default="RNBKQBNR/PPPPPPPP/8/8/8/8/pppppppp/rnbkqbnr")
ep_square = IntegerField(null=True)
castling_xfen = TextField(null=True)
castling_rights = TextField(null=True)
turn = BooleanField(default=True)
fullmove_number = IntegerField(default=1)
halfmove_clock = IntegerField(default=0)
updated_at = DateTimeField(auto_now=True)
game_uuid = UUIDField(default=uuid.uuid4)
def update(self, chess_board, *args, **kwargs):
attributes = [
"ep_square",
"turn",
"fullmove_number",
"halfmove_clock",
]
for i in attributes:
setattr(self, i, getattr(chess_board, i))
chess_board_flip_vert = chess_board.transform(chess.flip_vertical)
chess_board_rotated = chess_board_flip_vert.transform(chess.flip_horizontal)
self.castling_rights = str(chess_board.castling_rights)
self.fen = chess_board.fen()
self.board_fen = chess_board.board_fen()
self.board_fen_flipped = chess_board_rotated.board_fen()
self.castling_xfen = chess_board.castling_xfen()
self.save()
@property
def move_stack(self):
if self.move_set:
moves = self.move_set.all()
move_ucis = [m.uci() for m in moves]
return move_ucis
return []
@classmethod
|
Apache License 2.0
|
jbusecke/xarrayutils
|
xarrayutils/xgcm_utils.py
|
dll_dist
|
python
|
def dll_dist(dlon, dlat, lon, lat):
distance_1deg_equator = 111000.0
dx = dlon * xr.ufuncs.cos(xr.ufuncs.deg2rad(lat)) * distance_1deg_equator
dy = ((lon * 0) + 1) * dlat * distance_1deg_equator
return dx, dy
|
Converts lat/lon differentials into distances in meters
PARAMETERS
----------
dlon : xarray.DataArray longitude differentials
dlat : xarray.DataArray latitude differentials
lon : xarray.DataArray longitude values
lat : xarray.DataArray latitude values
RETURNS
-------
dx : xarray.DataArray distance inferred from dlon
dy : xarray.DataArray distance inferred from dlat
|
https://github.com/jbusecke/xarrayutils/blob/d575823d015afd8c9dceaa8d69c0deba43ae0e60/xarrayutils/xgcm_utils.py#L279-L298
|
import xarray as xr
from xarrayutils.weighted_operations import weighted_mean
def _get_name(coord):
if isinstance(coord, xr.DataArray):
return coord.name
elif isinstance(coord, str):
return coord
else:
raise ValueError(
"coord input not recognized.\
Needs to be xr.DataArray or str. Is %s"
% (type(coord))
)
def _get_axis_pos(grid, axis, da):
if axis not in grid.axes.keys():
return None
else:
co = grid.axes[axis].coords
return [k for k, v in co.items() if _get_name(v) in da.dims][0]
def _find_dim(grid, obj, axis):
if axis not in grid.axes.keys():
return None
else:
dimlist = list(grid.axes[axis].coords.values())
dimlist = [_get_name(d) for d in dimlist]
matches = [d for d in dimlist if d in obj.dims]
if len(matches) == 0:
return None
else:
return matches
def _infer_gridtype(grid, u, v, verbose=False):
u = u.copy()
v = v.copy()
u_x_pos = _get_axis_pos(grid, "X", u)
u_y_pos = _get_axis_pos(grid, "Y", u)
v_x_pos = _get_axis_pos(grid, "X", v)
v_y_pos = _get_axis_pos(grid, "Y", v)
if any([a in ["outer", "inner"] for a in [u_x_pos, u_y_pos, v_x_pos, v_y_pos]]):
raise RuntimeError("`inner` or `outer` grid positions are not supported yet.")
if verbose:
print(
"Found: (u @X=%s,Y=%s and v @X=%s,Y=%s)"
% (u_x_pos, u_y_pos, v_x_pos, v_y_pos)
)
if ((u_x_pos == "right") and (u_y_pos == "right")) and (
(v_x_pos == "right") and (v_y_pos == "right")
):
gridtype = "B"
elif (u_x_pos == "right" and u_y_pos == "center") and (
v_x_pos == "center" and v_y_pos == "right"
):
gridtype = "C"
else:
raise RuntimeError(
"Gridtype not recognized. \
Currently only supports \
B-grids(u @X=right,Y=right and v @X=right,Y=right) and \
C-grids (u @X=right,Y=center and v @X=center,Y=right). \
Found: (u @X=%s,Y=%s and v @X=%s,Y=%s)"
% (u_x_pos, u_y_pos, v_x_pos, v_y_pos)
)
return gridtype
def _check_dims(a, b, a_name):
if not all([dd in a.dims for dd in b.dims]):
raise RuntimeError(
"%s does not have the appropriate dimensions. \
Expected %s, but found %s"
% (a_name, list(b.dims), list(a.dims))
)
else:
return True
def _find_metric(da, dim_metric_list):
matches = [m for m in dim_metric_list if m in da.coords]
if len(matches) > 1:
raise ValueError(
"found more than one matching metric(%s), \
something is wrong with the `metric_list`"
% matches
)
elif len(matches) == 0:
return None
else:
return matches[0]
def w_mean(grid, dat, axis, dim_metric_list, verbose=False):
dat = dat.copy()
dim = _find_dim(grid, dat, axis)
if dim is None:
return dat
else:
metric = _find_metric(dat, dim_metric_list)
if verbose:
print(metric)
if metric is None:
return dat
else:
return weighted_mean(dat, dat.coords[metric], dim=dim)
def xgcm_weighted_mean(grid, dat, axis, dim_metric_list, verbose=False):
dat = dat.copy()
if isinstance(dat, xr.Dataset):
ds_mean = xr.Dataset()
for vv in dat.data_vars:
ds_mean[vv] = w_mean(grid, dat[vv], axis, dim_metric_list, verbose=verbose)
elif isinstance(dat, xr.DataArray):
ds_mean = w_mean(grid, dat, axis, dim_metric_list, verbose=verbose)
return ds_mean
def calculate_rel_vorticity(grid, u, v, dx, dy, area, gridtype=None):
u = u.copy()
v = v.copy()
dx = dx.copy()
dy = dy.copy()
if gridtype is None:
gridtype = _infer_gridtype(grid, u, v)
_check_dims(u, dx, "dx")
u_int = u * dx
_check_dims(v, dy, "dy")
v_int = v * dy
if gridtype == "B":
u_int = grid.interp(u_int, "X")
v_int = grid.interp(v_int, "Y")
dx_v = grid.diff(v_int, "X")
dy_u = grid.diff(u_int, "Y")
_check_dims(dx_v, area, "area and dv/dx")
_check_dims(dy_u, area, "area and du/dy")
zeta = (dx_v - dy_u) / area
zeta.name = "relative vorticity"
return zeta
def interp_all(grid, ds, target="center", keep_coords=True):
ds = ds.copy()
ds_new = xr.Dataset()
def _core_interp(da, grid):
for ax in grid.axes.keys():
ax_coords = [_get_name(a) for a in grid.axes[ax].coords.values()]
match = [a for a in da.dims if a in ax_coords]
if len(match) > 0:
pos = [
p for p, a in grid.axes[ax].coords.items() if _get_name(a) in match
]
if target not in pos:
da = grid.interp(da, ax, to=target)
return da
for vv in ds.data_vars:
ds_new[vv] = _core_interp(ds[vv], grid)
if keep_coords:
for co in ds.coords:
if co not in list(ds_new.coords):
ds_new.coords[co] = ds[co]
return ds_new
|
MIT License
|
openstack/ironic
|
ironic/drivers/modules/ansible/deploy.py
|
AnsibleDeploy.process_next_step
|
python
|
def process_next_step(self, task, step_type):
if step_type == 'deploy' and self.in_core_deploy_step(task):
manager_utils.notify_conductor_resume_deploy(task)
|
Start the next clean/deploy step if the previous one is complete.
:param task: a TaskManager instance
:param step_type: "clean" or "deploy"
|
https://github.com/openstack/ironic/blob/a4a6f26333be31b84a9b1a874dde506e61d407d3/ironic/drivers/modules/ansible/deploy.py#L457-L465
|
import json
import os
import shlex
from urllib import parse as urlparse
from ironic_lib import metrics_utils
from ironic_lib import utils as irlib_utils
from oslo_concurrency import processutils
from oslo_log import log
from oslo_utils import strutils
from oslo_utils import units
import tenacity
import yaml
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common import faults
from ironic.common.i18n import _
from ironic.common import images
from ironic.common import states
from ironic.common import utils
from ironic.conductor import steps as conductor_steps
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.conf import CONF
from ironic.drivers import base
from ironic.drivers.modules import agent_base
from ironic.drivers.modules import deploy_utils
LOG = log.getLogger(__name__)
METRICS = metrics_utils.get_metrics_logger(__name__)
OPTIONAL_PROPERTIES = {
'ansible_username': _('Deploy ramdisk username for Ansible. '
'This user must have passwordless sudo '
'permissions. Optional.'),
'ansible_key_file': _('Full path to private SSH key file. '
'If not specified, default keys for user running '
'ironic-conductor process will be used. '
'Note that for keys with password, those '
'must be pre-loaded into ssh-agent. '
'Optional.'),
'ansible_playbooks_path': _('Path to folder holding playbooks to use '
'for this node. Optional. '
'Default is set in ironic config.'),
'ansible_deploy_playbook': _('Name of the Ansible playbook file inside '
'the "ansible_playbooks_path" folder which '
'is used for node deployment. Optional.'),
'ansible_shutdown_playbook': _('Name of the Ansible playbook file inside '
'the "ansible_playbooks_path" folder which '
'is used for node shutdown. Optional.'),
'ansible_clean_playbook': _('Name of the Ansible playbook file inside '
'the "ansible_playbooks_path" folder which '
'is used for node cleaning. Optional.'),
'ansible_clean_steps_config': _('Name of the file inside the '
'"ansible_playbooks_path" folder with '
'cleaning steps configuration. Optional.'),
'ansible_python_interpreter': _('Absolute path to the python interpreter '
'on the managed machines. Optional.'),
}
COMMON_PROPERTIES = OPTIONAL_PROPERTIES
class PlaybookNotFound(exception.IronicException):
_msg_fmt = _('Failed to set ansible playbook for action %(action)s')
def _get_playbooks_path(node):
return node.driver_info.get('ansible_playbooks_path',
CONF.ansible.playbooks_path)
def _parse_ansible_driver_info(node, action='deploy'):
user = node.driver_info.get('ansible_username',
CONF.ansible.default_username)
key = node.driver_info.get('ansible_key_file',
CONF.ansible.default_key_file)
playbook = node.driver_info.get('ansible_%s_playbook' % action,
getattr(CONF.ansible,
'default_%s_playbook' % action,
None))
if not playbook:
raise PlaybookNotFound(action=action)
return os.path.basename(playbook), user, key
def _get_python_interpreter(node):
return node.driver_info.get('ansible_python_interpreter',
CONF.ansible.default_python_interpreter)
def _get_configdrive_path(basename):
return os.path.join(CONF.tempdir, basename + '.cndrive')
def _get_node_ip(task):
callback_url = task.node.driver_internal_info.get('agent_url', '')
return urlparse.urlparse(callback_url).netloc.split(':')[0]
def _prepare_extra_vars(host_list, variables=None):
nodes_var = []
for node_uuid, ip, user, extra in host_list:
nodes_var.append(dict(name=node_uuid, ip=ip, user=user, extra=extra))
extra_vars = dict(nodes=nodes_var)
if variables:
extra_vars.update(variables)
return extra_vars
def _run_playbook(node, name, extra_vars, key, tags=None, notags=None):
root = _get_playbooks_path(node)
playbook = os.path.join(root, name)
inventory = os.path.join(root, 'inventory')
ironic_vars = {'ironic': extra_vars}
python_interpreter = _get_python_interpreter(node)
if python_interpreter:
ironic_vars['ansible_python_interpreter'] = python_interpreter
args = [CONF.ansible.ansible_playbook_script, playbook,
'-i', inventory,
'-e', json.dumps(ironic_vars),
]
if CONF.ansible.config_file_path:
env = ['env', 'ANSIBLE_CONFIG=%s' % CONF.ansible.config_file_path]
args = env + args
if tags:
args.append('--tags=%s' % ','.join(tags))
if notags:
args.append('--skip-tags=%s' % ','.join(notags))
if key:
args.append('--private-key=%s' % key)
verbosity = CONF.ansible.verbosity
if verbosity is None and CONF.debug:
verbosity = 4
if verbosity:
args.append('-' + 'v' * verbosity)
if CONF.ansible.ansible_extra_args:
args.extend(shlex.split(CONF.ansible.ansible_extra_args))
try:
out, err = utils.execute(*args)
return out, err
except processutils.ProcessExecutionError as e:
raise exception.InstanceDeployFailure(reason=e)
def _calculate_memory_req(task):
image_source = task.node.instance_info['image_source']
image_size = images.download_size(task.context, image_source)
return image_size // units.Mi + CONF.ansible.extra_memory
def _parse_partitioning_info(node):
info = node.instance_info
i_info = {'label': deploy_utils.get_disk_label(node) or 'msdos'}
is_gpt = i_info['label'] == 'gpt'
unit = 'MiB'
partitions = {}
def add_partition(name, start, end):
partitions[name] = {'number': len(partitions) + 1,
'part_start': '%i%s' % (start, unit),
'part_end': '%i%s' % (end, unit)}
if is_gpt:
partitions[name]['name'] = name
end = 1
if is_gpt:
start, end = end, end + 1
add_partition('bios', start, end)
partitions['bios']['flags'] = ['bios_grub']
ephemeral_mb = info['ephemeral_mb']
if ephemeral_mb:
start, end = end, end + ephemeral_mb
add_partition('ephemeral', start, end)
i_info['ephemeral_format'] = info['ephemeral_format']
i_info['preserve_ephemeral'] = (
'yes' if info['preserve_ephemeral'] else 'no')
swap_mb = info['swap_mb']
if swap_mb:
start, end = end, end + swap_mb
add_partition('swap', start, end)
configdrive = info.get('configdrive')
if configdrive:
start, end = end, end + 64
add_partition('configdrive', start, end)
start, end = end, end + info['root_mb']
add_partition('root', start, end)
if not is_gpt:
partitions['root']['flags'] = ['boot']
i_info['partitions'] = partitions
return {'partition_info': i_info}
def _parse_root_device_hints(node):
parsed_hints = deploy_utils.get_root_device_for_deploy(node)
if not parsed_hints:
return {}
root_device_hints = {}
advanced = {}
for hint, value in parsed_hints.items():
if isinstance(value, str):
if value.startswith('== '):
root_device_hints[hint] = int(value[3:])
elif value.startswith('s== '):
root_device_hints[hint] = urlparse.unquote(value[4:])
else:
advanced[hint] = value
else:
root_device_hints[hint] = value
if advanced:
raise exception.InvalidParameterValue(
_('Ansible-deploy does not support advanced root device hints '
'based on oslo.utils operators. '
'Present advanced hints for node %(node)s are %(hints)s.') % {
'node': node.uuid, 'hints': advanced})
return root_device_hints
def _add_ssl_image_options(image):
image['validate_certs'] = ('no' if CONF.ansible.image_store_insecure
else 'yes')
if CONF.ansible.image_store_cafile:
image['cafile'] = CONF.ansible.image_store_cafile
if CONF.ansible.image_store_certfile and CONF.ansible.image_store_keyfile:
image['client_cert'] = CONF.ansible.image_store_certfile
image['client_key'] = CONF.ansible.image_store_keyfile
def _prepare_variables(task):
node = task.node
i_info = node.instance_info
image = {}
for i_key, i_value in i_info.items():
if i_key.startswith('image_'):
image[i_key[6:]] = i_value
checksum = image.get('checksum')
if checksum:
if ':' not in checksum:
image['checksum'] = 'md5:%s' % checksum
_add_ssl_image_options(image)
variables = {'image': image}
configdrive = manager_utils.get_configdrive_image(task.node)
if configdrive:
if urlparse.urlparse(configdrive).scheme in ('http', 'https'):
cfgdrv_type = 'url'
cfgdrv_location = configdrive
else:
cfgdrv_location = _get_configdrive_path(node.uuid)
with open(cfgdrv_location, 'w') as f:
f.write(configdrive)
cfgdrv_type = 'file'
variables['configdrive'] = {'type': cfgdrv_type,
'location': cfgdrv_location}
root_device_hints = _parse_root_device_hints(node)
if root_device_hints:
variables['root_device_hints'] = root_device_hints
return variables
def _validate_clean_steps(steps, node_uuid):
missing = []
for step in steps:
name = step.get('name')
if not name:
missing.append({'name': 'undefined', 'field': 'name'})
continue
if 'interface' not in step:
missing.append({'name': name, 'field': 'interface'})
args = step.get('args', {})
for arg_name, arg in args.items():
if arg.get('required', False) and 'value' not in arg:
missing.append({'name': name,
'field': '%s.value' % arg_name})
if missing:
err_string = ', '.join(
'name %(name)s, field %(field)s' % i for i in missing)
msg = _("Malformed clean_steps file: %s") % err_string
LOG.error(msg)
raise exception.NodeCleaningFailure(node=node_uuid,
reason=msg)
if len(set(s['name'] for s in steps)) != len(steps):
msg = _("Cleaning steps do not have unique names.")
LOG.error(msg)
raise exception.NodeCleaningFailure(node=node_uuid,
reason=msg)
def _get_clean_steps(node, interface=None, override_priorities=None):
clean_steps_file = node.driver_info.get(
'ansible_clean_steps_config', CONF.ansible.default_clean_steps_config)
path = os.path.join(node.driver_info.get('ansible_playbooks_path',
CONF.ansible.playbooks_path),
os.path.basename(clean_steps_file))
try:
with open(path) as f:
internal_steps = yaml.safe_load(f)
except Exception as e:
msg = _('Failed to load clean steps from file '
'%(file)s: %(exc)s') % {'file': path, 'exc': e}
raise exception.NodeCleaningFailure(node=node.uuid, reason=msg)
_validate_clean_steps(internal_steps, node.uuid)
steps = []
override = override_priorities or {}
for params in internal_steps:
name = params['name']
clean_if = params['interface']
if interface is not None and interface != clean_if:
continue
new_priority = override.get(name)
priority = (new_priority if new_priority is not None else
params.get('priority', 0))
args = {}
argsinfo = params.get('args', {})
for arg, arg_info in argsinfo.items():
args[arg] = arg_info.pop('value', None)
step = {
'interface': clean_if,
'step': name,
'priority': priority,
'abortable': False,
'argsinfo': argsinfo,
'args': args
}
steps.append(step)
return steps
class AnsibleDeploy(agent_base.HeartbeatMixin,
agent_base.AgentOobStepsMixin,
base.DeployInterface):
collect_deploy_logs = False
def get_properties(self):
props = COMMON_PROPERTIES.copy()
props.update(agent_base.VENDOR_PROPERTIES)
return props
@METRICS.timer('AnsibleDeploy.validate')
def validate(self, task):
task.driver.boot.validate(task)
node = task.node
iwdi = node.driver_internal_info.get('is_whole_disk_image')
if not iwdi and deploy_utils.get_boot_option(node) == "netboot":
raise exception.InvalidParameterValue(_(
"Node %(node)s is configured to use the ansible deploy "
"interface, which does not support netboot.") %
{'node': node.uuid})
params = {}
image_source = node.instance_info.get('image_source')
params['instance_info.image_source'] = image_source
error_msg = _('Node %s failed to validate deploy image info. Some '
'parameters were missing') % node.uuid
deploy_utils.check_for_missing_params(params, error_msg)
_parse_root_device_hints(node)
def _ansible_deploy(self, task, node_address):
node = task.node
LOG.debug('IP of node %(node)s is %(ip)s',
{'node': node.uuid, 'ip': node_address})
variables = _prepare_variables(task)
if not node.driver_internal_info.get('is_whole_disk_image'):
variables.update(_parse_partitioning_info(node))
if node.target_raid_config:
variables.update({'raid_config': node.target_raid_config})
playbook, user, key = _parse_ansible_driver_info(node)
node_list = [(node.uuid, node_address, user, node.extra)]
extra_vars = _prepare_extra_vars(node_list, variables=variables)
LOG.debug('Starting deploy on node %s', node.uuid)
_run_playbook(node, playbook, extra_vars, key)
@METRICS.timer('AnsibleDeploy.deploy')
@base.deploy_step(priority=100)
@task_manager.require_exclusive_lock
def deploy(self, task):
self._required_image_info(task)
manager_utils.node_power_action(task, states.REBOOT)
return states.DEPLOYWAIT
def in_core_deploy_step(self, task):
step = task.node.deploy_step
return (step
and step['interface'] == 'deploy'
and step['step'] == 'deploy')
|
Apache License 2.0
|
devopshq/teamcity
|
dohq_teamcity/models/change.py
|
Change.id
|
python
|
def id(self):
return self._id
|
Gets the id of this Change. # noqa: E501
:return: The id of this Change. # noqa: E501
:rtype: int
|
https://github.com/devopshq/teamcity/blob/84f1757ec1fddef27d39246a75739d047be0e831/dohq_teamcity/models/change.py#L120-L127
|
from dohq_teamcity.custom.base_model import TeamCityObject
class Change(TeamCityObject):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'version': 'str',
'internal_version': 'str',
'username': 'str',
'_date': 'str',
'registration_date': 'str',
'personal': 'bool',
'href': 'str',
'web_url': 'str',
'comment': 'str',
'user': 'User',
'files': 'FileChanges',
'vcs_root_instance': 'VcsRootInstance',
'parent_changes': 'Changes',
'parent_revisions': 'Items',
'locator': 'str'
}
attribute_map = {
'id': 'id',
'version': 'version',
'internal_version': 'internalVersion',
'username': 'username',
'_date': 'date',
'registration_date': 'registrationDate',
'personal': 'personal',
'href': 'href',
'web_url': 'webUrl',
'comment': 'comment',
'user': 'user',
'files': 'files',
'vcs_root_instance': 'vcsRootInstance',
'parent_changes': 'parentChanges',
'parent_revisions': 'parentRevisions',
'locator': 'locator'
}
def __init__(self, id=None, version=None, internal_version=None, username=None, _date=None, registration_date=None, personal=False, href=None, web_url=None, comment=None, user=None, files=None, vcs_root_instance=None, parent_changes=None, parent_revisions=None, locator=None, teamcity=None):
self._id = None
self._version = None
self._internal_version = None
self._username = None
self.__date = None
self._registration_date = None
self._personal = None
self._href = None
self._web_url = None
self._comment = None
self._user = None
self._files = None
self._vcs_root_instance = None
self._parent_changes = None
self._parent_revisions = None
self._locator = None
self.discriminator = None
if id is not None:
self.id = id
if version is not None:
self.version = version
if internal_version is not None:
self.internal_version = internal_version
if username is not None:
self.username = username
if _date is not None:
self._date = _date
if registration_date is not None:
self.registration_date = registration_date
if personal is not None:
self.personal = personal
if href is not None:
self.href = href
if web_url is not None:
self.web_url = web_url
if comment is not None:
self.comment = comment
if user is not None:
self.user = user
if files is not None:
self.files = files
if vcs_root_instance is not None:
self.vcs_root_instance = vcs_root_instance
if parent_changes is not None:
self.parent_changes = parent_changes
if parent_revisions is not None:
self.parent_revisions = parent_revisions
if locator is not None:
self.locator = locator
super(Change, self).__init__(teamcity=teamcity)
@property
|
MIT License
|
sketchfab/c4d-plugin
|
dependencies/OSX/PIL/ImageDraw.py
|
ImageDraw.pieslice
|
python
|
def pieslice(self, xy, start, end, fill=None, outline=None, width=1):
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_pieslice(xy, start, end, fill, 1)
if ink is not None and ink != fill and width != 0:
self.draw.draw_pieslice(xy, start, end, ink, 0, width)
|
Draw a pieslice.
|
https://github.com/sketchfab/c4d-plugin/blob/d06ef20acdeffa53032b521073b820ed374807ef/dependencies/OSX/PIL/ImageDraw.py#L223-L229
|
import math
import numbers
from . import Image, ImageColor
class ImageDraw:
def __init__(self, im, mode=None):
im.load()
if im.readonly:
im._copy()
blend = 0
if mode is None:
mode = im.mode
if mode != im.mode:
if mode == "RGBA" and im.mode == "RGB":
blend = 1
else:
raise ValueError("mode mismatch")
if mode == "P":
self.palette = im.palette
else:
self.palette = None
self.im = im.im
self.draw = Image.core.draw(self.im, blend)
self.mode = mode
if mode in ("I", "F"):
self.ink = self.draw.draw_ink(1)
else:
self.ink = self.draw.draw_ink(-1)
if mode in ("1", "P", "I", "F"):
self.fontmode = "1"
else:
self.fontmode = "L"
self.fill = 0
self.font = None
def getfont(self):
if not self.font:
from . import ImageFont
self.font = ImageFont.load_default()
return self.font
def _getink(self, ink, fill=None):
if ink is None and fill is None:
if self.fill:
fill = self.ink
else:
ink = self.ink
else:
if ink is not None:
if isinstance(ink, str):
ink = ImageColor.getcolor(ink, self.mode)
if self.palette and not isinstance(ink, numbers.Number):
ink = self.palette.getcolor(ink)
ink = self.draw.draw_ink(ink)
if fill is not None:
if isinstance(fill, str):
fill = ImageColor.getcolor(fill, self.mode)
if self.palette and not isinstance(fill, numbers.Number):
fill = self.palette.getcolor(fill)
fill = self.draw.draw_ink(fill)
return ink, fill
def arc(self, xy, start, end, fill=None, width=1):
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_arc(xy, start, end, ink, width)
def bitmap(self, xy, bitmap, fill=None):
bitmap.load()
ink, fill = self._getink(fill)
if ink is None:
ink = fill
if ink is not None:
self.draw.draw_bitmap(xy, bitmap.im, ink)
def chord(self, xy, start, end, fill=None, outline=None, width=1):
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_chord(xy, start, end, fill, 1)
if ink is not None and ink != fill and width != 0:
self.draw.draw_chord(xy, start, end, ink, 0, width)
def ellipse(self, xy, fill=None, outline=None, width=1):
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_ellipse(xy, fill, 1)
if ink is not None and ink != fill and width != 0:
self.draw.draw_ellipse(xy, ink, 0, width)
def line(self, xy, fill=None, width=0, joint=None):
ink = self._getink(fill)[0]
if ink is not None:
self.draw.draw_lines(xy, ink, width)
if joint == "curve" and width > 4:
if not isinstance(xy[0], (list, tuple)):
xy = [tuple(xy[i : i + 2]) for i in range(0, len(xy), 2)]
for i in range(1, len(xy) - 1):
point = xy[i]
angles = [
math.degrees(math.atan2(end[0] - start[0], start[1] - end[1]))
% 360
for start, end in ((xy[i - 1], point), (point, xy[i + 1]))
]
if angles[0] == angles[1]:
continue
def coord_at_angle(coord, angle):
x, y = coord
angle -= 90
distance = width / 2 - 1
return tuple(
[
p + (math.floor(p_d) if p_d > 0 else math.ceil(p_d))
for p, p_d in (
(x, distance * math.cos(math.radians(angle))),
(y, distance * math.sin(math.radians(angle))),
)
]
)
flipped = (
angles[1] > angles[0] and angles[1] - 180 > angles[0]
) or (angles[1] < angles[0] and angles[1] + 180 > angles[0])
coords = [
(point[0] - width / 2 + 1, point[1] - width / 2 + 1),
(point[0] + width / 2 - 1, point[1] + width / 2 - 1),
]
if flipped:
start, end = (angles[1] + 90, angles[0] + 90)
else:
start, end = (angles[0] - 90, angles[1] - 90)
self.pieslice(coords, start - 90, end - 90, fill)
if width > 8:
if flipped:
gapCoords = [
coord_at_angle(point, angles[0] + 90),
point,
coord_at_angle(point, angles[1] + 90),
]
else:
gapCoords = [
coord_at_angle(point, angles[0] - 90),
point,
coord_at_angle(point, angles[1] - 90),
]
self.line(gapCoords, fill, width=3)
def shape(self, shape, fill=None, outline=None):
shape.close()
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_outline(shape, fill, 1)
if ink is not None and ink != fill:
self.draw.draw_outline(shape, ink, 0)
|
Apache License 2.0
|
necaris/python3-openid
|
openid/store/filestore.py
|
_ensureDir
|
python
|
def _ensureDir(dir_name):
try:
os.makedirs(dir_name)
except OSError as why:
if why.errno != EEXIST or not os.path.isdir(dir_name):
raise
|
Create dir_name as a directory if it does not exist. If it
exists, make sure that it is, in fact, a directory.
Can raise OSError
str -> NoneType
|
https://github.com/necaris/python3-openid/blob/5c7f8f8fa4d2a0124516046ab2f84130eb8c10cb/openid/store/filestore.py#L66-L78
|
import string
import os
import os.path
import time
import logging
from errno import EEXIST, ENOENT
from tempfile import mkstemp
from openid.association import Association
from openid.store.interface import OpenIDStore
from openid.store import nonce
from openid import cryptutil, oidutil
logger = logging.getLogger(__name__)
_filename_allowed = string.ascii_letters + string.digits + '.'
_isFilenameSafe = set(_filename_allowed).__contains__
def _safe64(s):
h64 = oidutil.toBase64(cryptutil.sha1(s))
h64 = bytearray(h64)
h64 = h64.replace(b'+', b'_')
h64 = h64.replace(b'/', b'.')
h64 = h64.replace(b'=', b'')
return bytes(h64)
def _filenameEscape(s):
filename_chunks = []
for c in s:
if _isFilenameSafe(c):
filename_chunks.append(c)
else:
filename_chunks.append('_%02X' % ord(c))
return ''.join(filename_chunks)
def _removeIfPresent(filename):
try:
os.unlink(filename)
except OSError as why:
if why.errno == ENOENT:
return 0
else:
raise
else:
return 1
|
Apache License 2.0
|
luciferjack/python-mysql-pool
|
PyMysqlPool/mysql/connector/protocol.py
|
MySQLProtocol.parse_eof
|
python
|
def parse_eof(self, packet):
if packet[4] == 0:
return self.parse_ok(packet)
err_msg = "Failed parsing EOF packet."
res = {}
try:
unpacked = struct_unpack('<xxxBBHH', packet)
except struct.error:
raise errors.InterfaceError(err_msg)
if not (unpacked[1] == 254 and len(packet) <= 9):
raise errors.InterfaceError(err_msg)
res['warning_count'] = unpacked[2]
res['status_flag'] = unpacked[3]
return res
|
Parse a MySQL EOF-packet
|
https://github.com/luciferjack/python-mysql-pool/blob/7b812c6fc7f04255620cb86f272a2d8900c2240d/PyMysqlPool/mysql/connector/protocol.py#L262-L280
|
import datetime
import struct
from decimal import Decimal
from . import errors, utils
from .authentication import get_auth_plugin
from .catch23 import PY2, struct_unpack
from .constants import (
FieldFlag, ServerCmd, FieldType, ClientFlag, MAX_MYSQL_TABLE_COLUMNS)
from .errors import get_exception
class MySQLProtocol(object):
def _connect_with_db(self, client_flags, database):
if client_flags & ClientFlag.CONNECT_WITH_DB and database:
return database.encode('utf8') + b'\x00'
return b'\x00'
def _auth_response(self, client_flags, username, password, database,
auth_plugin, auth_data, ssl_enabled):
if not password:
return b'\x00'
try:
auth = get_auth_plugin(auth_plugin)(
auth_data,
username=username, password=password, database=database,
ssl_enabled=ssl_enabled)
plugin_auth_response = auth.auth_response()
except (TypeError, errors.InterfaceError) as exc:
raise errors.ProgrammingError(
"Failed authentication: {0}".format(str(exc)))
if client_flags & ClientFlag.SECURE_CONNECTION:
resplen = len(plugin_auth_response)
auth_response = struct.pack('<B', resplen) + plugin_auth_response
else:
auth_response = plugin_auth_response + b'\x00'
return auth_response
def make_auth(self, handshake, username=None, password=None, database=None,
charset=33, client_flags=0,
max_allowed_packet=1073741824, ssl_enabled=False,
auth_plugin=None):
try:
auth_data = handshake['auth_data']
auth_plugin = auth_plugin or handshake['auth_plugin']
except (TypeError, KeyError) as exc:
raise errors.ProgrammingError(
"Handshake misses authentication info ({0})".format(exc))
if not username:
username = b''
try:
username_bytes = username.encode('utf8')
except AttributeError:
username_bytes = username
packet = struct.pack('<IIB{filler}{usrlen}sx'.format(
filler='x' * 23, usrlen=len(username_bytes)),
client_flags, max_allowed_packet, charset,
username_bytes)
packet += self._auth_response(client_flags, username, password,
database,
auth_plugin,
auth_data, ssl_enabled)
packet += self._connect_with_db(client_flags, database)
if client_flags & ClientFlag.PLUGIN_AUTH:
packet += auth_plugin.encode('utf8') + b'\x00'
return packet
def make_auth_ssl(self, charset=33, client_flags=0,
max_allowed_packet=1073741824):
return utils.int4store(client_flags) + utils.int4store(max_allowed_packet) + utils.int1store(charset) + b'\x00' * 23
def make_command(self, command, argument=None):
data = utils.int1store(command)
if argument is not None:
data += argument
return data
def make_change_user(self, handshake, username=None, password=None,
database=None, charset=33, client_flags=0,
ssl_enabled=False, auth_plugin=None):
try:
auth_data = handshake['auth_data']
auth_plugin = auth_plugin or handshake['auth_plugin']
except (TypeError, KeyError) as exc:
raise errors.ProgrammingError(
"Handshake misses authentication info ({0})".format(exc))
if not username:
username = b''
try:
username_bytes = username.encode('utf8')
except AttributeError:
username_bytes = username
packet = struct.pack('<B{usrlen}sx'.format(usrlen=len(username_bytes)),
ServerCmd.CHANGE_USER, username_bytes)
packet += self._auth_response(client_flags, username, password,
database,
auth_plugin,
auth_data, ssl_enabled)
packet += self._connect_with_db(client_flags, database)
packet += struct.pack('<H', charset)
if client_flags & ClientFlag.PLUGIN_AUTH:
packet += auth_plugin.encode('utf8') + b'\x00'
return packet
def parse_handshake(self, packet):
res = {}
res['protocol'] = struct_unpack('<xxxxB', packet[0:5])[0]
(packet, res['server_version_original']) = utils.read_string(
packet[5:], end=b'\x00')
(res['server_threadid'],
auth_data1,
capabilities1,
res['charset'],
res['server_status'],
capabilities2,
auth_data_length
) = struct_unpack('<I8sx2sBH2sBxxxxxxxxxx', packet[0:31])
res['server_version_original'] = res['server_version_original'].decode()
packet = packet[31:]
capabilities = utils.intread(capabilities1 + capabilities2)
auth_data2 = b''
if capabilities & ClientFlag.SECURE_CONNECTION:
size = min(13, auth_data_length - 8) if auth_data_length else 13
auth_data2 = packet[0:size]
packet = packet[size:]
if auth_data2[-1] == 0:
auth_data2 = auth_data2[:-1]
if capabilities & ClientFlag.PLUGIN_AUTH:
if (b'\x00' not in packet
and res['server_version_original'].startswith("5.5.8")):
(packet, res['auth_plugin']) = (b'', packet)
else:
(packet, res['auth_plugin']) = utils.read_string(
packet, end=b'\x00')
res['auth_plugin'] = res['auth_plugin'].decode('utf-8')
else:
res['auth_plugin'] = 'mysql_native_password'
res['auth_data'] = auth_data1 + auth_data2
res['capabilities'] = capabilities
return res
def parse_ok(self, packet):
if not packet[4] == 0:
raise errors.InterfaceError("Failed parsing OK packet (invalid).")
ok_packet = {}
try:
ok_packet['field_count'] = struct_unpack('<xxxxB', packet[0:5])[0]
(packet, ok_packet['affected_rows']) = utils.read_lc_int(packet[5:])
(packet, ok_packet['insert_id']) = utils.read_lc_int(packet)
(ok_packet['status_flag'],
ok_packet['warning_count']) = struct_unpack('<HH', packet[0:4])
packet = packet[4:]
if packet:
(packet, ok_packet['info_msg']) = utils.read_lc_string(packet)
ok_packet['info_msg'] = ok_packet['info_msg'].decode('utf-8')
except ValueError:
raise errors.InterfaceError("Failed parsing OK packet.")
return ok_packet
def parse_column_count(self, packet):
try:
count = utils.read_lc_int(packet[4:])[1]
if count > MAX_MYSQL_TABLE_COLUMNS:
return None
return count
except (struct.error, ValueError):
raise errors.InterfaceError("Failed parsing column count")
def parse_column(self, packet, charset='utf-8'):
(packet, _) = utils.read_lc_string(packet[4:])
(packet, _) = utils.read_lc_string(packet)
(packet, _) = utils.read_lc_string(packet)
(packet, _) = utils.read_lc_string(packet)
(packet, name) = utils.read_lc_string(packet)
(packet, _) = utils.read_lc_string(packet)
try:
(_, _, field_type,
flags, _) = struct_unpack('<xHIBHBxx', packet)
except struct.error:
raise errors.InterfaceError("Failed parsing column information")
return (
name.decode(charset),
field_type,
None,
None,
None,
None,
~flags & FieldFlag.NOT_NULL,
flags,
)
|
MIT License
|
ebellocchia/bip_utils
|
bip_utils/bip/conf/common/bip_coin_conf.py
|
BipCoinConf.CoinIndex
|
python
|
def CoinIndex(self) -> int:
return self.m_coin_idx
|
Get coin index.
Returns:
int: Coin index
|
https://github.com/ebellocchia/bip_utils/blob/b04f9ef493a5b57983412c0ce460a9ca05ee1f50/bip_utils/bip/conf/common/bip_coin_conf.py#L86-L93
|
from typing import Any, Dict, Optional, Type
from bip_utils.addr import IAddrEncoder
from bip_utils.bip.bip32 import Bip32KeyNetVersions, Bip32Base
from bip_utils.utils.conf import CoinNames as UtilsCoinNames
class BipCoinConf:
m_coin_names: UtilsCoinNames
m_coin_idx: int
m_is_testnet: bool
m_def_path: str
m_key_net_ver: Bip32KeyNetVersions
m_wif_net_ver: Optional[bytes]
m_bip32_cls: Type[Bip32Base]
m_addr_params: Dict[str, Any]
m_addr_cls: Type[IAddrEncoder]
def __init__(self,
coin_names: UtilsCoinNames,
coin_idx: int,
is_testnet: bool,
def_path: str,
key_net_ver: Bip32KeyNetVersions,
wif_net_ver: Optional[bytes],
bip32_cls: Type[Bip32Base],
addr_cls: Type[IAddrEncoder],
addr_params: Dict[str, Any]) -> None:
self.m_coin_names = coin_names
self.m_coin_idx = coin_idx
self.m_is_testnet = is_testnet
self.m_def_path = def_path
self.m_key_net_ver = key_net_ver
self.m_wif_net_ver = wif_net_ver
self.m_bip32_cls = bip32_cls
self.m_addr_params = addr_params
self.m_addr_cls = addr_cls
def CoinNames(self) -> UtilsCoinNames:
return self.m_coin_names
|
MIT License
|
radish-bdd/radish
|
src/radish/models/step.py
|
Step.set_rule
|
python
|
def set_rule(self, rule):
self.rule = rule
|
Set the Rule for this Step
|
https://github.com/radish-bdd/radish/blob/1685a48cacd491114cb6fb7e1dc3e1a54e8821a5/src/radish/models/step.py#L76-L78
|
import base64
import radish.utils as utils
from radish.errors import RadishError, StepBehaveLikeRecursionError
from radish.models.state import State
from radish.models.stepfailurereport import StepFailureReport
from radish.models.timed import Timed
class Step(Timed):
def __init__(
self,
step_id: int,
keyword: str,
used_keyword: str,
text: str,
doc_string,
data_table,
path: str,
line: int,
) -> None:
super().__init__()
self.id = step_id
self.keyword = keyword
self.used_keyword = used_keyword
self.text = text
self.doc_string = doc_string
self.data_table = data_table
self.path = path
self.line = line
self.feature = None
self.rule = None
self.scenario = None
self.step_impl = None
self.step_impl_match = None
self._behave_like_runner = None
self.state = State.UNTESTED
self.failure_report = None
self.embeddings = []
def __repr__(self) -> str:
return "<Step: {id} '{keyword} {text}' @ {path}:{line}>".format(
id=self.id,
keyword=self.keyword,
text=self.text,
path=self.path,
line=self.line,
)
def set_feature(self, feature):
self.feature = feature
|
MIT License
|
digitalsleuth/time_decode
|
time_decode/time_decode.py
|
TimeDecoder.from_ole_be
|
python
|
def from_ole_be(self):
reason = "[!] OLE Big-Endian timestamps are 16 hex characters (8 bytes)"
ts_type = self.ts_types['ole_be']
try:
if not len(self.oleb) == 16 or not all(char in hexdigits for char in self.oleb):
self.in_ole_be = indiv_output = combined_output = False
pass
else:
delta = struct.unpack('>d', struct.pack('>Q', int(self.oleb, 16)))[0]
if int(delta) < 0:
self.in_ole_be = indiv_output = combined_output = False
pass
else:
dt_obj = self.epoch_1899 + timedelta(days=delta)
self.in_ole_be = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')
indiv_output = str("{} {} UTC".format(ts_type, self.in_ole_be))
combined_output = str("{}{}\t{} UTC{}".format(self.left_color, ts_type, self.in_ole_be, self.right_color))
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(str(exc_type) + " - " + str(exc_obj) + " - line " + str(exc_tb.tb_lineno))
self.in_ole_be = indiv_output = combined_output = False
return self.in_ole_be, indiv_output, combined_output, reason
|
Convert an OLE Big-Endian timestamp to a date
|
https://github.com/digitalsleuth/time_decode/blob/247ca8d4765ef52cd02d8153cca6ffd0c925727b/time_decode/time_decode.py#L838-L860
|
from datetime import datetime as dt, timedelta
import struct
from binascii import hexlify, unhexlify
from string import hexdigits
import argparse
import re
import sys
import base64
import uuid
from calendar import monthrange
from dateutil import parser as duparser
from colorama import init
init(autoreset=True)
__author__ = 'Corey Forman'
__date__ = '18 Sep 2021'
__version__ = '3.2.0'
__description__ = 'Python 3 CLI Date Time Conversion Tool'
class TimeDecoder(object):
def __init__(self, args):
all_args = vars(args)
if args.guess:
for each_arg in all_args:
all_args[each_arg] = args.guess
for name, value in all_args.items():
if not hasattr(self, name):
setattr(self, name, value)
self.epoch_1601 = dt(1601, 1, 1)
self.epoch_1899 = dt(1899, 12, 30)
self.epoch_1904 = dt(1904, 1, 1)
self.epoch_1970 = dt(1970, 1, 1)
self.epoch_1980 = dt(1980, 1, 6)
self.epoch_2000 = dt(2000, 1, 1)
self.epoch_2001 = dt(2001, 1, 1)
self.hundreds_nano = 10000000
self.nano_2001 = 1000000000
self.epoch_active = 116444736000000000
self.hfs_dec_subtract = 2082844800
self.ts_funcs = [self.from_unix_sec, self.from_unix_milli, self.from_win_64_hex,
self.from_win_64_hexle, self.from_chrome, self.from_ad,
self.from_unix_hex_32be, self.from_unix_hex_32le, self.from_cookie,
self.from_ole_be, self.from_ole_le, self.from_mac,
self.from_hfs_dec, self.from_hfs_be, self.from_hfs_le, self.from_msdos,
self.from_fat, self.from_systime, self.from_filetime, self.from_hotmail,
self.from_prtime, self.from_ole_auto, self.from_ms1904,
self.from_ios_time, self.from_sym_time, self.from_gps_time,
self.from_eitime, self.from_bplist, self.from_gsm, self.from_vm,
self.from_tiktok, self.from_twitter, self.from_discord, self.from_ksuid,
self.from_mastodon, self.from_metasploit, self.from_sony, self.from_uuid]
self.date_funcs = [self.to_unix_sec, self.to_unix_milli, self.to_win_64_hex,
self.to_win_64_hexle, self.to_chrome, self.to_ad, self.to_unix_hex_32be,
self.to_unix_hex_32le, self.to_cookie, self.to_ole_be, self.to_ole_le,
self.to_mac, self.to_hfs_dec, self.to_hfs_be, self.to_hfs_le,
self.to_msdos, self.to_fat, self.to_systime, self.to_filetime,
self.to_hotmail, self.to_prtime, self.to_ole_auto, self.to_ms1904,
self.to_ios_time, self.to_sym_time, self.to_gps_time, self.to_eitime,
self.to_bplist, self.to_gsm, self.to_vm]
self.in_unix_sec = self.in_unix_milli = self.in_windows_hex_64 = None
self.in_windows_hex_le = self.in_chrome = self.in_ad = self.in_unix_hex_32 = None
self.in_unix_hex_32le = self.in_cookie = self.in_ole_be = self.in_ole_le = None
self.in_mac = self.in_hfs_dec = self.in_hfs_be = self.in_hfs_le = self.in_fat = None
self.in_msdos = self.in_systemtime = self.in_filetime = self.in_prtime = None
self.in_ole_auto = self.in_ms1904 = self.in_iostime = self.in_symtime = self.in_hotmail = None
self.in_gpstime = self.in_eitime = self.in_bplist = self.in_gsm = self.in_vm = None
self.in_tiktok = self.in_twitter = self.in_discord = self.in_ksuid = self.in_mastodon = None
self.in_metasploit = self.in_sony = self.in_uuid = None
self.out_unix_sec = self.out_unix_milli = self.out_windows_hex_64 = self.out_hotmail = None
self.out_windows_hex_le = self.out_chrome = self.out_adtime = self.out_unix_hex_32 = None
self.out_unix_hex_32le = self.out_cookie = self.out_ole_be = self.out_ole_le = None
self.out_mac = self.out_hfs_dec = self.out_hfs_be = self.out_hfs_le = self.out_fat = None
self.out_msdos = self.out_systemtime = self.out_filetime = self.out_prtime = None
self.out_ole_auto = self.out_ms1904 = self.out_iostime = self.out_symtime = None
self.out_gpstime = self.out_eitime = self.out_bplist = self.out_gsm = self.out_vm = None
self.leapseconds = {
10: [dt(1972, 1, 1), dt(1972, 7, 1)],
11: [dt(1972, 7, 1), dt(1973, 1, 1)],
12: [dt(1973, 1, 1), dt(1974, 1, 1)],
13: [dt(1974, 1, 1), dt(1975, 1, 1)],
14: [dt(1975, 1, 1), dt(1976, 1, 1)],
15: [dt(1976, 1, 1), dt(1977, 1, 1)],
16: [dt(1977, 1, 1), dt(1978, 1, 1)],
17: [dt(1978, 1, 1), dt(1979, 1, 1)],
18: [dt(1979, 1, 1), dt(1980, 1, 1)],
19: [dt(1980, 1, 1), dt(1981, 7, 1)],
20: [dt(1981, 7, 1), dt(1982, 7, 1)],
21: [dt(1982, 7, 1), dt(1983, 7, 1)],
22: [dt(1983, 7, 1), dt(1985, 7, 1)],
23: [dt(1985, 7, 1), dt(1988, 1, 1)],
24: [dt(1988, 1, 1), dt(1990, 1, 1)],
25: [dt(1990, 1, 1), dt(1991, 1, 1)],
26: [dt(1991, 1, 1), dt(1992, 7, 1)],
27: [dt(1992, 7, 1), dt(1993, 7, 1)],
28: [dt(1993, 7, 1), dt(1994, 7, 1)],
29: [dt(1994, 7, 1), dt(1996, 1, 1)],
30: [dt(1996, 1, 1), dt(1997, 7, 1)],
31: [dt(1997, 7, 1), dt(1999, 1, 1)],
32: [dt(1999, 1, 1), dt(2006, 1, 1)],
33: [dt(2006, 1, 1), dt(2009, 1, 1)],
34: [dt(2009, 1, 1), dt(2012, 7, 1)],
35: [dt(2012, 7, 1), dt(2015, 7, 1)],
36: [dt(2015, 7, 1), dt(2017, 1, 1)],
37: [dt(2017, 1, 1), dt.now() - timedelta(seconds=37)]
}
self.left_color = "\033[1;31m"
self.right_color = "\033[1;m"
self.ts_types = {'unix_sec': 'Unix Seconds:',
'unix_milli': 'Unix Milliseconds:',
'windows_hex_64': 'Windows 64-bit Hex BE:',
'windows_hex_le': 'Windows 64-bit Hex LE:',
'chrome': 'Google Chrome:',
'ad': 'Active Directory/LDAP dt:',
'unix_hex_32': 'Unix Hex 32-bit BE:',
'unix_hex_32le': 'Unix Hex 32-bit LE:',
'cookie': 'Windows Cookie Date:',
'ole_be': 'Windows OLE 64-bit double BE:',
'ole_le': 'Windows OLE 64-bit double LE:',
'mac': 'Mac Absolute Time:',
'hfs_dec': 'Mac OS/HFS+ Decimal Time:',
'hfs_be': 'HFS/HFS+ 32-bit Hex BE:',
'hfs_le': 'HFS/HFS+ 32-bit Hex LE:',
'msdos': 'MS-DOS 32-bit Hex Value:',
'fat': 'FAT Date + Time:',
'systemtime': 'Microsoft 128-bit SYSTEMTIME:',
'filetime': 'Microsoft FILETIME time:',
'hotmail': 'Microsoft Hotmail time:',
'prtime': 'Mozilla PRTime:',
'ole_auto': 'OLE Automation Date:',
'ms1904': 'MS Excel 1904 Date:',
'iostime': 'iOS 11 Date:',
'symtime': 'Symantec AV time:',
'gpstime': 'GPS time:',
'eitime': 'Google EI time:',
'bplist': 'iOS Binary Plist time:',
'gsm': 'GSM time:',
'vm': 'VMSD time:',
'tiktok': 'TikTok time:',
'twitter': 'Twitter time:',
'discord': 'Discord time:',
'ksuid': 'KSUID time:',
'mastodon': 'Mastodon time:',
'metasploit': 'Metasploit Payload UUID:',
'sony': 'Sonyflake time:',
'uu': 'UUID time:'}
def run(self):
try:
if self.guess:
self.from_all()
return
if self.unix:
result, indiv_output, combined_output, reason = self.from_unix_sec()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.umil:
result, indiv_output, combined_output, reason = self.from_unix_milli()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.wh:
result, indiv_output, combined_output, reason = self.from_win_64_hex()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.whle:
result, indiv_output, combined_output, reason = self.from_win_64_hexle()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.chrome:
result, indiv_output, combined_output, reason = self.from_chrome()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.active:
result, indiv_output, combined_output, reason = self.from_ad()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.uhbe:
result, indiv_output, combined_output, reason = self.from_unix_hex_32be()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.uhle:
result, indiv_output, combined_output, reason = self.from_unix_hex_32le()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.cookie:
result, indiv_output, combined_output, reason = self.from_cookie()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.oleb:
result, indiv_output, combined_output, reason = self.from_ole_be()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.olel:
result, indiv_output, combined_output, reason = self.from_ole_le()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.mac:
result, indiv_output, combined_output, reason = self.from_mac()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.hfsdec:
result, indiv_output, combined_output, reason = self.from_hfs_dec()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.hfsbe:
result, indiv_output, combined_output, reason = self.from_hfs_be()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.hfsle:
result, indiv_output, combined_output, reason = self.from_hfs_le()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.fat:
result, indiv_output, combined_output, reason = self.from_fat()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.msdos:
result, indiv_output, combined_output, reason = self.from_msdos()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.systime:
result, indiv_output, combined_output, reason = self.from_systime()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.ft:
result, indiv_output, combined_output, reason = self.from_filetime()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.hotmail:
result, indiv_output, combined_output, reason = self.from_hotmail()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.pr:
result, indiv_output, combined_output, reason = self.from_prtime()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.auto:
result, indiv_output, combined_output, reason = self.from_ole_auto()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.ms1904:
result, indiv_output, combined_output, reason = self.from_ms1904()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.ios:
result, indiv_output, combined_output, reason = self.from_ios_time()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.sym:
result, indiv_output, combined_output, reason = self.from_sym_time()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.gps:
result, indiv_output, combined_output, reason = self.from_gps_time()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.eitime:
result, indiv_output, combined_output, reason = self.from_eitime()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.bplist:
result, indiv_output, combined_output, reason = self.from_bplist()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.gsm:
result, indiv_output, combined_output, reason = self.from_gsm()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.vm:
result, indiv_output, combined_output, reason = self.from_vm()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.tiktok:
result, indiv_output, combined_output, reason = self.from_tiktok()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.twitter:
result, indiv_output, combined_output, reason = self.from_twitter()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.discord:
result, indiv_output, combined_output, reason = self.from_discord()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.ksuid:
result, indiv_output, combined_output, reason = self.from_ksuid()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.mastodon:
result, indiv_output, combined_output, reason = self.from_mastodon()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.meta:
result, indiv_output, combined_output, reason = self.from_metasploit()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.sony:
result, indiv_output, combined_output, reason = self.from_sony()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.uu:
result, indiv_output, combined_output, reason = self.from_uuid()
if indiv_output is False:
print(reason)
else:
print(indiv_output)
if self.timestamp:
self.to_timestamps()
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(str(exc_type) + " - " + str(exc_obj) + " - line " + str(exc_tb.tb_lineno))
def to_timestamps(self):
print('\nConverting Date: ' + self.timestamp + '\n')
for func in self.date_funcs:
func()
self.timestamp_output()
def from_unix_sec(self):
reason = "[!] Unix seconds timestamp is 10 digits in length"
ts_type = self.ts_types['unix_sec']
try:
if not len(self.unix) == 10 or not self.unix.isdigit():
self.in_unix_sec = indiv_output = combined_output = False
pass
else:
self.in_unix_sec = dt.utcfromtimestamp(float(self.unix)).strftime('%Y-%m-%d %H:%M:%S.%f')
indiv_output = str("{} {} UTC".format(ts_type, self.in_unix_sec))
combined_output = str("{}{}\t\t\t{} UTC{}".format(self.left_color, ts_type, self.in_unix_sec, self.right_color))
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(str(exc_type) + " - " + str(exc_obj) + " - line " + str(exc_tb.tb_lineno))
self.in_unix_sec = indiv_output = combined_output = False
return self.in_unix_sec, indiv_output, combined_output, reason
def to_unix_sec(self):
ts_type = self.ts_types['unix_sec']
try:
dt_obj = duparser.parse(self.timestamp)
if hasattr(dt_obj.tzinfo, '_offset'):
dt_tz = dt_obj.tzinfo._offset.total_seconds()
dt_obj = duparser.parse(self.timestamp, ignoretz=True)
else:
dt_tz = 0
self.out_unix_sec = str(int((dt_obj - self.epoch_1970).total_seconds()) - int(dt_tz))
ts_output = str("{}\t\t\t{}".format(ts_type, self.out_unix_sec))
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(str(exc_type) + " - " + str(exc_obj) + " - line " + str(exc_tb.tb_lineno))
self.out_unix_sec = ts_output = False
return self.out_unix_sec, ts_output
def from_unix_milli(self):
reason = "[!] Unix milliseconds timestamp is 13 digits in length"
ts_type = self.ts_types['unix_milli']
try:
if not len(self.umil) == 13 or not self.umil.isdigit():
self.in_unix_milli = indiv_output = combined_output = False
pass
else:
self.in_unix_milli = dt.utcfromtimestamp(float(self.umil) / 1000.0).strftime('%Y-%m-%d %H:%M:%S.%f')
indiv_output = str("{} {} UTC".format(ts_type, self.in_unix_milli))
combined_output = str("{}{}\t\t{} UTC{}".format(self.left_color, ts_type, self.in_unix_milli, self.right_color))
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(str(exc_type) + " - " + str(exc_obj) + " - line " + str(exc_tb.tb_lineno))
self.in_unix_milli = indiv_output = combined_output = False
return self.in_unix_milli, indiv_output, combined_output, reason
def to_unix_milli(self):
ts_type = self.ts_types['unix_milli']
try:
dt_obj = duparser.parse(self.timestamp)
if hasattr(dt_obj.tzinfo, '_offset'):
dt_tz = dt_obj.tzinfo._offset.total_seconds()
dt_obj = duparser.parse(self.timestamp, ignoretz=True)
else:
dt_tz = 0
self.out_unix_milli = str(int(((dt_obj - self.epoch_1970).total_seconds() - int(dt_tz))*1000))
ts_output = str("{}\t\t{}".format(ts_type, self.out_unix_milli))
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(str(exc_type) + " - " + str(exc_obj) + " - line " + str(exc_tb.tb_lineno))
self.out_unix_milli = ts_output = False
return self.out_unix_milli, ts_output
def from_win_64_hex(self):
reason = "[!] Windows 64-bit Hex Big-Endian timestamp is 16 hex characters (8 bytes)"
ts_type = self.ts_types['windows_hex_64']
try:
if not len(self.wh) == 16 or not all(char in hexdigits for char in self.wh):
self.in_windows_hex_64 = indiv_output = combined_output = False
pass
else:
base10_microseconds = int(self.wh, 16) / 10
if base10_microseconds >= 1e+17:
self.in_windows_hex_64 = indiv_output = combined_output = False
pass
else:
dt_obj = self.epoch_1601 + timedelta(microseconds=base10_microseconds)
self.in_windows_hex_64 = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')
indiv_output = str("{} {} UTC".format(ts_type, self.in_windows_hex_64))
combined_output = str("{}{}\t\t{} UTC{}".format(self.left_color, ts_type, self.in_windows_hex_64, self.right_color))
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(str(exc_type) + " - " + str(exc_obj) + " - line " + str(exc_tb.tb_lineno))
self.in_windows_hex_64 = indiv_output = combined_output = False
return self.in_windows_hex_64, indiv_output, combined_output, reason
def to_win_64_hex(self):
ts_type = self.ts_types['windows_hex_64']
try:
dt_obj = duparser.parse(self.timestamp)
if hasattr(dt_obj.tzinfo, '_offset'):
dt_tz = dt_obj.tzinfo._offset.total_seconds()
dt_obj = duparser.parse(self.timestamp, ignoretz=True)
else:
dt_tz = 0
minus_epoch = dt_obj - self.epoch_1601
calculated_time = minus_epoch.microseconds + ((minus_epoch.seconds - int(dt_tz)) * 1000000) + (minus_epoch.days * 86400000000)
self.out_windows_hex_64 = str(hex(int(calculated_time)*10))[2:].zfill(16)
ts_output = str("{}\t\t{}".format(ts_type, self.out_windows_hex_64))
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(str(exc_type) + " - " + str(exc_obj) + " - line " + str(exc_tb.tb_lineno))
self.out_windows_hex_64 = ts_output = False
return self.out_windows_hex_64, ts_output
def from_win_64_hexle(self):
reason = "[!] Windows 64-bit Hex Little-Endian timestamp is 16 hex characters (8 bytes)"
ts_type = self.ts_types['windows_hex_le']
try:
if not len(self.whle) == 16 or not all(char in hexdigits for char in self.whle):
self.in_windows_hex_le = indiv_output = combined_output = False
pass
else:
indiv_output = combined_output = False
endianness_change, = struct.unpack("<Q", unhexlify(self.whle))
converted_time = endianness_change / 10
try:
dt_obj = self.epoch_1601 + timedelta(microseconds=converted_time)
self.in_windows_hex_le = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')
indiv_output = str("{} {} UTC".format(ts_type, self.in_windows_hex_le))
combined_output = str("{}{}\t\t{} UTC{}".format(self.left_color, ts_type, self.in_windows_hex_le, self.right_color))
except OverflowError:
pass
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(str(exc_type) + " - " + str(exc_obj) + " - line " + str(exc_tb.tb_lineno))
self.in_windows_hex_le = indiv_output = combined_output = False
return self.in_windows_hex_le, indiv_output, combined_output, reason
def to_win_64_hexle(self):
ts_type = self.ts_types['windows_hex_le']
try:
dt_obj = duparser.parse(self.timestamp)
if hasattr(dt_obj.tzinfo, '_offset'):
dt_tz = dt_obj.tzinfo._offset.total_seconds()
dt_obj = duparser.parse(self.timestamp, ignoretz=True)
else:
dt_tz = 0
minus_epoch = dt_obj - self.epoch_1601
calculated_time = minus_epoch.microseconds + ((minus_epoch.seconds - int(dt_tz)) * 1000000) + (minus_epoch.days * 86400000000)
self.out_windows_hex_le = str(struct.pack("<Q", int(calculated_time*10)).hex()).zfill(16)
ts_output = str("{}\t\t{}".format(ts_type, self.out_windows_hex_le))
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(str(exc_type) + " - " + str(exc_obj) + " - line " + str(exc_tb.tb_lineno))
self.out_windows_hex_le = ts_output = False
return self.out_windows_hex_le, ts_output
def from_chrome(self):
reason = "[!] Chrome/Webkit timestamp is 17 digits"
ts_type = self.ts_types['chrome']
try:
if not len(self.chrome) == 17 or not self.chrome.isdigit():
self.in_chrome = indiv_output = combined_output = False
pass
else:
delta = timedelta(microseconds=int(self.chrome))
converted_time = self.epoch_1601 + delta
self.in_chrome = converted_time.strftime('%Y-%m-%d %H:%M:%S.%f')
indiv_output = str("{} {} UTC".format(ts_type, self.in_chrome))
combined_output = str("{}{}\t\t\t{} UTC{}".format(self.left_color, ts_type, self.in_chrome, self.right_color))
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(str(exc_type) + " - " + str(exc_obj) + " - line " + str(exc_tb.tb_lineno))
self.in_chrome = indiv_output = combined_output = False
return self.in_chrome, indiv_output, combined_output, reason
def to_chrome(self):
ts_type = self.ts_types['chrome']
try:
dt_obj = duparser.parse(self.timestamp)
nano_seconds = ''
if '.' in self.timestamp:
nano_seconds = self.timestamp.split('.')[1].split(' ')[0]
if hasattr(dt_obj.tzinfo, '_offset'):
dt_tz = dt_obj.tzinfo._offset.total_seconds()
dt_obj = duparser.parse(self.timestamp, ignoretz=True)
else:
dt_tz = 0
micro_seconds = (dt_obj - self.epoch_1601).microseconds
chrome_time = ((dt_obj - self.epoch_1601).total_seconds() - int(dt_tz))
chrome_micro = str(chrome_time).split('.')[1]
if (len(nano_seconds) == 6 and len(chrome_micro) < 6) or len(nano_seconds) > 6 or len(nano_seconds) == 6:
chrome_time = str(chrome_time).replace(str(chrome_time).split('.')[1], str(micro_seconds).zfill(6))
self.out_chrome = str(chrome_time).replace('.', '')
else:
self.out_chrome = str(int(chrome_time * 1000000))
ts_output = str("{}\t\t\t{}".format(ts_type, self.out_chrome))
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(str(exc_type) + " - " + str(exc_obj) + " - line " + str(exc_tb.tb_lineno))
self.out_chrome = ts_output = False
return self.out_chrome, ts_output
def from_ad(self):
reason = "[!] Active Directory/LDAP timestamps are 18 digits"
ts_type = self.ts_types['ad']
try:
if not len(self.active) == 18 or not self.active.isdigit():
self.in_ad = indiv_output = combined_output = False
pass
else:
dt_obj = dt.utcfromtimestamp((float(int(self.active) - self.epoch_active) / self.hundreds_nano))
self.in_ad = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')
indiv_output = str("{} {} UTC".format(ts_type, self.in_ad))
combined_output = str("{}{}\t{} UTC{}".format(self.left_color, ts_type, self.in_ad, self.right_color))
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(str(exc_type) + " - " + str(exc_obj) + " - line " + str(exc_tb.tb_lineno))
self.in_ad = indiv_output = combined_output = False
return self.in_ad, indiv_output, combined_output, reason
def to_ad(self):
ts_type = self.ts_types['ad']
try:
nano_seconds = ''
if '.' in self.timestamp:
nano_seconds = self.timestamp.split('.')[1].split(' ')[0]
dt_obj = duparser.parse(self.timestamp)
if hasattr(dt_obj.tzinfo, '_offset'):
dt_tz = dt_obj.tzinfo._offset.total_seconds()
dt_obj = duparser.parse(self.timestamp, ignoretz=True)
else:
dt_tz = 0
if len(nano_seconds) == 7:
dt_obj = dt_obj.replace(microsecond=0)
nano_seconds = int(nano_seconds)
elif len(nano_seconds) > 7:
dt_obj = dt_obj.replace(microsecond=0)
nano_seconds = int(nano_seconds[:-(len(nano_seconds) - 7)])
elif len(nano_seconds) == 6 or (len(nano_seconds) == 5 and len(str(dt_obj.microsecond)) == 6):
nano_seconds = dt_obj.microsecond * 10
dt_obj = dt_obj.replace(microsecond=0)
else:
nano_seconds = 0
tz_shift = int(((dt_obj - self.epoch_1970).total_seconds() - int(dt_tz)) * self.hundreds_nano) + nano_seconds
self.out_adtime = str(int(tz_shift) + int(self.epoch_active))
ts_output = str("{}\t{}".format(ts_type, self.out_adtime))
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(str(exc_type) + " - " + str(exc_obj) + " - line " + str(exc_tb.tb_lineno))
self.out_adtime = ts_output = False
return self.out_adtime, ts_output
def from_unix_hex_32be(self):
reason = "[!] Unix Hex 32-bit Big-Endian timestamps are 8 hex characters (4 bytes)"
ts_type = self.ts_types['unix_hex_32']
try:
if not len(self.uhbe) == 8 or not all(char in hexdigits for char in self.uhbe):
self.in_unix_hex_32 = indiv_output = combined_output = False
pass
else:
to_dec = int(self.uhbe, 16)
self.in_unix_hex_32 = dt.utcfromtimestamp(float(to_dec)).strftime('%Y-%m-%d %H:%M:%S.%f')
indiv_output = str("{} {} UTC".format(ts_type, self.in_unix_hex_32))
combined_output = str("{}{}\t\t{} UTC{}".format(self.left_color, ts_type, self.in_unix_hex_32, self.right_color))
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(str(exc_type) + " - " + str(exc_obj) + " - line " + str(exc_tb.tb_lineno))
self.in_unix_hex_32 = indiv_output = combined_output = False
return self.in_unix_hex_32, indiv_output, combined_output, reason
def to_unix_hex_32be(self):
ts_type = self.ts_types['unix_hex_32']
try:
dt_obj = duparser.parse(self.timestamp)
if hasattr(dt_obj.tzinfo, '_offset'):
dt_tz = dt_obj.tzinfo._offset.total_seconds()
dt_obj = duparser.parse(self.timestamp, ignoretz=True)
else:
dt_tz = 0
unix_time = int((dt_obj - self.epoch_1970).total_seconds() - int(dt_tz))
self.out_unix_hex_32 = str(struct.pack(">L", unix_time).hex())
ts_output = str("{}\t\t{}".format(ts_type, self.out_unix_hex_32))
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(str(exc_type) + " - " + str(exc_obj) + " - line " + str(exc_tb.tb_lineno))
self.out_unix_hex_32 = ts_output = False
return self.out_unix_hex_32, ts_output
def from_unix_hex_32le(self):
reason = "[!] Unix Hex 32-bit Little-Endian timestamps are 8 hex characters (4 bytes)"
ts_type = self.ts_types['unix_hex_32le']
try:
if not len(self.uhle) == 8 or not all(char in hexdigits for char in self.uhle):
self.in_unix_hex_32le = indiv_output = combined_output = False
pass
else:
to_dec = struct.unpack("<L", unhexlify(self.uhle))[0]
self.in_unix_hex_32le = dt.utcfromtimestamp(float(to_dec)).strftime('%Y-%m-%d %H:%M:%S.%f')
indiv_output = str("{} {} UTC".format(ts_type, self.in_unix_hex_32le))
combined_output = str("{}{}\t\t{} UTC{}".format(self.left_color, ts_type, self.in_unix_hex_32le, self.right_color))
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(str(exc_type) + " - " + str(exc_obj) + " - line " + str(exc_tb.tb_lineno))
self.in_unix_hex_32le = indiv_output = combined_output = False
return self.in_unix_hex_32le, indiv_output, combined_output, reason
def to_unix_hex_32le(self):
ts_type = self.ts_types['unix_hex_32le']
try:
dt_obj = duparser.parse(self.timestamp)
if hasattr(dt_obj.tzinfo, '_offset'):
dt_tz = dt_obj.tzinfo._offset.total_seconds()
dt_obj = duparser.parse(self.timestamp, ignoretz=True)
else:
dt_tz = 0
unix_time = int((dt_obj - self.epoch_1970).total_seconds() - int(dt_tz))
self.out_unix_hex_32le = str(struct.pack("<L", unix_time).hex())
ts_output = str("{}\t\t{}".format(ts_type, self.out_unix_hex_32le))
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(str(exc_type) + " - " + str(exc_obj) + " - line " + str(exc_tb.tb_lineno))
self.out_unix_hex_32le = ts_output = False
return self.out_unix_hex_32le, ts_output
def from_cookie(self):
reason = "[!] Internet Explorer Cookie timestamps (txt cookies) consist of 2 integers values. Must be input with a comma between them."
ts_type = self.ts_types['cookie']
try:
if not ("," in self.cookie) or not (self.cookie.split(",")[0].isdigit() and self.cookie.split(",")[1].isdigit()):
self.in_cookie = indiv_output = combined_output = False
pass
else:
low, high = [int(h, base=10) for h in self.cookie.split(',')]
calc = 10**-7 * (high * 2**32 + low) - 11644473600
if calc >= 1e+11:
self.in_cookie = indiv_output = combined_output = False
pass
else:
dt_obj = dt.utcfromtimestamp(calc)
self.in_cookie = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')
indiv_output = str("{} {} UTC".format(ts_type, self.in_cookie))
combined_output = str("{}{}\t\t{} UTC{}".format(self.left_color, ts_type, self.in_cookie, self.right_color))
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(str(exc_type) + " - " + str(exc_obj) + " - line " + str(exc_tb.tb_lineno))
self.in_cookie = indiv_output = combined_output = False
return self.in_cookie, indiv_output, combined_output, reason
def to_cookie(self):
ts_type = self.ts_types['cookie']
try:
dt_obj = duparser.parse(self.timestamp)
if hasattr(dt_obj.tzinfo, '_offset'):
dt_tz = dt_obj.tzinfo._offset.total_seconds()
dt_obj = duparser.parse(self.timestamp, ignoretz=True)
else:
dt_tz = 0
unix_time = int((dt_obj - self.epoch_1970).total_seconds() - int(dt_tz))
high = int(((unix_time + 11644473600) * 10**7) / 2**32)
low = int((unix_time + 11644473600) * 10**7) - (high * 2**32)
self.out_cookie = str(low) + ',' + str(high)
ts_output = str("{}\t\t{}".format(ts_type, self.out_cookie))
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(str(exc_type) + " - " + str(exc_obj) + " - line " + str(exc_tb.tb_lineno))
self.out_cookie = ts_output = False
return self.out_cookie, ts_output
|
MIT License
|
snudm-starlab/falcon2
|
src/imagenetutils/dataloaders.py
|
get_dali_train_loader
|
python
|
def get_dali_train_loader(dali_cpu=False):
def gdtl(data_path, batch_size, workers=5, _worker_init_fn=None):
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
traindir = os.path.join(data_path, 'train')
pipe = HybridTrainPipe(batch_size=batch_size, num_threads=workers,
device_id = local_rank,
data_dir = traindir, crop = 224, dali_cpu=dali_cpu)
pipe.build()
train_loader = DALIClassificationIterator(pipe, size = int(pipe.epoch_size("Reader") / world_size))
return DALIWrapper(train_loader), int(pipe.epoch_size("Reader") / (world_size * batch_size))
return gdtl
|
DALI train loader
:param dali_cpu: whether cpus is used
:return: gdtl: output of gdtl function
|
https://github.com/snudm-starlab/falcon2/blob/8f41ea2f14d640972aabbf074fa181078edc2d53/src/imagenetutils/dataloaders.py#L179-L217
|
import os
import torch
import numpy as np
import torchvision.datasets as datasets
import torchvision.transforms as transforms
DATA_BACKEND_CHOICES = ['pytorch']
try:
from nvidia.dali.plugin.pytorch import DALIClassificationIterator
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
DATA_BACKEND_CHOICES.append('dali-gpu')
DATA_BACKEND_CHOICES.append('dali-cpu')
except ImportError:
print("Please install DALI from https://www.github.com/NVIDIA/DALI to run this example.")
class HybridTrainPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, dali_cpu=False):
super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed = 12 + device_id)
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
self.input = ops.FileReader(
file_root = data_dir,
shard_id = local_rank,
num_shards = world_size,
random_shuffle = True)
if dali_cpu:
dali_device = "cpu"
self.decode = ops.HostDecoderRandomCrop(device=dali_device, output_type=types.RGB,
random_aspect_ratio=[0.75, 4./3.],
random_area=[0.08, 1.0],
num_attempts=100)
else:
dali_device = "gpu"
self.decode = ops.ImageDecoderRandomCrop(device="mixed", output_type=types.RGB, device_memory_padding=211025920, host_memory_padding=140544512,
random_aspect_ratio=[0.75, 4./3.],
random_area=[0.08, 1.0],
num_attempts=100)
self.res = ops.Resize(device=dali_device, resize_x=crop, resize_y=crop, interp_type=types.INTERP_TRIANGULAR)
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
output_layout = types.NCHW,
crop = (crop, crop),
image_type = types.RGB,
mean = [0.485 * 255,0.456 * 255,0.406 * 255],
std = [0.229 * 255,0.224 * 255,0.225 * 255])
self.coin = ops.CoinFlip(probability = 0.5)
def define_graph(self):
rng = self.coin()
self.jpegs, self.labels = self.input(name = "Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images.gpu(), mirror = rng)
return [output, self.labels]
class HybridValPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, size):
super(HybridValPipe, self).__init__(batch_size, num_threads, device_id, seed = 12 + device_id)
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
self.input = ops.FileReader(
file_root = data_dir,
shard_id = local_rank,
num_shards = world_size,
random_shuffle = False)
self.decode = ops.ImageDecoder(device = "mixed", output_type = types.RGB)
self.res = ops.Resize(device = "gpu", resize_shorter = size)
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
output_layout = types.NCHW,
crop = (crop, crop),
image_type = types.RGB,
mean = [0.485 * 255,0.456 * 255,0.406 * 255],
std = [0.229 * 255,0.224 * 255,0.225 * 255])
def define_graph(self):
self.jpegs, self.labels = self.input(name = "Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images)
return [output, self.labels]
class DALIWrapper:
def gen_wrapper(dalipipeline):
for data in dalipipeline:
input = data[0]["data"]
target = data[0]["label"].squeeze().cuda().long()
yield input, target
dalipipeline.reset()
def __init__(self, dalipipeline):
self.dalipipeline = dalipipeline
def __iter__(self):
return DALIWrapper.gen_wrapper(self.dalipipeline)
|
Apache License 2.0
|
pyobo/pyobo
|
src/pyobo/sources/msigdb.py
|
iter_terms
|
python
|
def iter_terms(version: str) -> Iterable[Term]:
xml_url = f"{BASE_URL}/{version}/msigdb_v{version}.xml"
path = ensure_path(prefix=PREFIX, url=xml_url, version=version)
tree = ElementTree.parse(path)
for entry in tqdm(tree.getroot(), desc=f"{PREFIX} v{version}"):
attrib = dict(entry.attrib)
tax_id = _SPECIES[attrib["ORGANISM"]]
reference_id = attrib["PMID"].strip()
if not reference_id:
reference = None
elif reference_id.startswith("GSE"):
reference = Reference("gse", reference_id)
else:
reference = Reference("pubmed", reference_id)
identifier = attrib["SYSTEMATIC_NAME"]
name = attrib["STANDARD_NAME"]
is_obsolete = attrib["CATEGORY_CODE"] == "ARCHIVED"
term = Term(
reference=Reference(PREFIX, identifier, name),
definition=_get_definition(attrib),
provenance=reference and [reference],
is_obsolete=is_obsolete,
)
for key in [
"CATEGORY_CODE",
"SUB_CATEGORY_CODE",
"CONTRIBUTOR",
"EXACT_SOURCE",
"EXTERNAL_DETAILS_URL",
]:
value = attrib[key].strip()
if value:
term.append_property(key.lower(), value)
term.set_species(tax_id)
contributor = attrib["CONTRIBUTOR"]
external_id = attrib["EXACT_SOURCE"]
external_details = attrib["EXTERNAL_DETAILS_URL"]
if contributor == "WikiPathways":
if not external_id:
logger.warning(
"missing %s source: msigdb:%s (%s)", contributor, identifier, external_details
)
term.append_xref(Reference("wikipathways", external_id))
elif contributor == "Reactome":
if not external_id:
logger.warning(
"missing %s source: msigdb:%s (%s)", contributor, identifier, external_details
)
term.append_xref(Reference("reactome", external_id))
elif contributor == "Gene Ontology":
if not external_id:
external_id = external_details[len(GO_URL_PREFIX) :]
if not external_id:
logger.warning(
"missing %s source: msigdb:%s (%s)", contributor, identifier, external_details
)
term.append_xref(Reference("go", external_id))
elif contributor == "KEGG":
if not external_id:
external_id = external_details[len(KEGG_URL_PREFIX) : len(".html")]
if not external_id:
logger.warning(
"missing %s source: msigdb:%s (%s)", contributor, identifier, external_details
)
term.append_xref(Reference("kegg.pathway", external_id))
for ncbigene_id in attrib["MEMBERS_EZID"].strip().split(","):
if ncbigene_id:
term.append_relationship(
has_part, Reference(prefix="ncbigene", identifier=ncbigene_id)
)
yield term
|
Get MSigDb terms.
|
https://github.com/pyobo/pyobo/blob/678b4eeb5ba40205ab5ed8315ae0cc1d4ff3199f/src/pyobo/sources/msigdb.py#L50-L131
|
import logging
from typing import Iterable, Optional
from xml.etree import ElementTree
import bioversions
import click
from more_click import verbose_option
from tqdm import tqdm
from ..struct import Obo, Reference, Term, has_part
from ..utils.path import ensure_path
logger = logging.getLogger(__name__)
PREFIX = "msigdb"
BASE_URL = "https://data.broadinstitute.org/gsea-msigdb/msigdb/release"
def get_obo() -> Obo:
version = bioversions.get_version(PREFIX)
return Obo(
ontology=PREFIX,
name="Molecular Signatures Database",
iter_terms=iter_terms,
iter_terms_kwargs=dict(version=version),
data_version=version,
auto_generated_by=f"bio2obo:{PREFIX}",
typedefs=[has_part],
)
_SPECIES = {
"Homo sapiens": "9606",
"Mus musculus": "10090",
"Rattus norvegicus": "10116",
"Macaca mulatta": "9544",
"Danio rerio": "7955",
}
REACTOME_URL_PREFIX = "https://www.reactome.org/content/detail/"
GO_URL_PREFIX = "http://amigo.geneontology.org/amigo/term/GO:"
KEGG_URL_PREFIX = "http://www.genome.jp/kegg/pathway/hsa/"
|
MIT License
|
howardhsu/bert-for-rrc-absa
|
pytorch-pretrained-bert/src/absa_data_utils.py
|
AscProcessor._create_examples
|
python
|
def _create_examples(self, lines, set_type):
examples = []
for (i, ids) in enumerate(lines):
guid = "%s-%s" % (set_type, ids )
text_a = lines[ids]['term']
text_b = lines[ids]['sentence']
label = lines[ids]['polarity']
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
|
Creates examples for the training and dev sets.
|
https://github.com/howardhsu/bert-for-rrc-absa/blob/2d194b1b2fb106c93e64b68513eff48ad0b99a9f/pytorch-pretrained-bert/src/absa_data_utils.py#L152-L162
|
import json
import os
from collections import defaultdict
import random
from pytorch_pretrained_bert.tokenization import BertTokenizer
class ABSATokenizer(BertTokenizer):
def subword_tokenize(self, tokens, labels):
split_tokens, split_labels= [], []
idx_map=[]
for ix, token in enumerate(tokens):
sub_tokens=self.wordpiece_tokenizer.tokenize(token)
for jx, sub_token in enumerate(sub_tokens):
split_tokens.append(sub_token)
if labels[ix]=="B" and jx>0:
split_labels.append("I")
else:
split_labels.append(labels[ix])
idx_map.append(ix)
return split_tokens, split_labels, idx_map
class InputExample(object):
def __init__(self, guid, text_a, text_b=None, label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
def get_train_examples(self, data_dir):
raise NotImplementedError()
def get_dev_examples(self, data_dir):
raise NotImplementedError()
def get_test_examples(self, data_dir):
raise NotImplementedError()
def get_labels(self):
raise NotImplementedError()
@classmethod
def _read_json(cls, input_file):
with open(input_file) as f:
return json.load(f)
class AeProcessor(DataProcessor):
def get_train_examples(self, data_dir, fn="train.json"):
return self._create_examples(
self._read_json(os.path.join(data_dir, fn)), "train")
def get_dev_examples(self, data_dir, fn="dev.json"):
return self._create_examples(
self._read_json(os.path.join(data_dir, fn)), "dev")
def get_test_examples(self, data_dir, fn="test.json"):
return self._create_examples(
self._read_json(os.path.join(data_dir, fn)), "test")
def get_labels(self):
return ["O", "B", "I"]
def _create_examples(self, lines, set_type):
examples = []
for (i, ids) in enumerate(lines):
guid = "%s-%s" % (set_type, ids )
text_a = lines[ids]['sentence']
label = lines[ids]['label']
examples.append(
InputExample(guid=guid, text_a=text_a, label=label) )
return examples
class AscProcessor(DataProcessor):
def get_train_examples(self, data_dir, fn="train.json"):
return self._create_examples(
self._read_json(os.path.join(data_dir, fn)), "train")
def get_dev_examples(self, data_dir, fn="dev.json"):
return self._create_examples(
self._read_json(os.path.join(data_dir, fn)), "dev")
def get_test_examples(self, data_dir, fn="test.json"):
return self._create_examples(
self._read_json(os.path.join(data_dir, fn)), "test")
def get_labels(self):
return ["positive", "negative", "neutral"]
|
Apache License 2.0
|
2ndwatch/cloudendure-python
|
cloudendure/cloudendure_api/models/cloud_endure_configurations.py
|
CloudEndureConfigurations.value
|
python
|
def value(self, value):
self._value = value
|
Sets the value of this CloudEndureConfigurations.
:param value: The value of this CloudEndureConfigurations. # noqa: E501
:type: str
|
https://github.com/2ndwatch/cloudendure-python/blob/f81d1be1422b7c19adedb06c584803eaaa811919/cloudendure/cloudendure_api/models/cloud_endure_configurations.py#L78-L86
|
import pprint
import re
import six
class CloudEndureConfigurations:
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {"name": "str", "value": "str"}
attribute_map = {"name": "name", "value": "value"}
def __init__(self, name=None, value=None):
self._name = None
self._value = None
self.discriminator = None
if name is not None:
self.name = name
if value is not None:
self.value = value
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def value(self):
return self._value
@value.setter
|
MIT License
|
haltakov/simple-photo-gallery
|
simplegallery/common.py
|
log
|
python
|
def log(message):
print(message)
|
Log a message to the console
:param message: message string
|
https://github.com/haltakov/simple-photo-gallery/blob/d47299072644b2e9d319d1e0bc52b020fa06c42f/simplegallery/common.py#L16-L21
|
import json
class SPGException(Exception):
def __init__(self, message):
super().__init__()
self.message = message
|
MIT License
|
ganeti/ganeti
|
lib/hypervisor/hv_xen.py
|
_GetAllInstanceList
|
python
|
def _GetAllInstanceList(fn, include_node, delays, timeout):
instance_list_errors = []
try:
lines = utils.Retry(_RunInstanceList, delays, timeout,
args=(fn, instance_list_errors))
except utils.RetryTimeout:
if instance_list_errors:
instance_list_result = instance_list_errors.pop()
errmsg = ("listing instances failed, timeout exceeded (%s): %s" %
(instance_list_result.fail_reason, instance_list_result.output))
else:
errmsg = "listing instances failed"
raise errors.HypervisorError(errmsg)
return _ParseInstanceList(lines, include_node)
|
Return the list of instances including running and shutdown.
See L{_RunInstanceList} and L{_ParseInstanceList} for parameter details.
|
https://github.com/ganeti/ganeti/blob/4d21019c72cba4d746f5d17ca22098f4c7682e9c/lib/hypervisor/hv_xen.py#L203-L224
|
import logging
import errno
import os
import string
import shutil
import time
from io import StringIO
from ganeti import constants
from ganeti import errors
from ganeti import utils
from ganeti.hypervisor import hv_base
from ganeti import netutils
from ganeti import objects
from ganeti import pathutils
XEND_CONFIG_FILE = utils.PathJoin(pathutils.XEN_CONFIG_DIR, "xend-config.sxp")
XL_CONFIG_FILE = utils.PathJoin(pathutils.XEN_CONFIG_DIR, "xen/xl.conf")
VIF_BRIDGE_SCRIPT = utils.PathJoin(pathutils.XEN_CONFIG_DIR,
"scripts/vif-bridge")
_DOM0_NAME = "Domain-0"
_DISK_LETTERS = string.ascii_lowercase
_FILE_DRIVER_MAP = {
constants.FD_LOOP: "file",
constants.FD_BLKTAP: "tap:aio",
constants.FD_BLKTAP2: "tap2:tapdisk:aio",
}
def _CreateConfigCpus(cpu_mask):
cpu_list = utils.ParseMultiCpuMask(cpu_mask)
if len(cpu_list) == 1:
all_cpu_mapping = cpu_list[0]
if all_cpu_mapping == constants.CPU_PINNING_OFF:
return None
else:
return "cpu = \"%s\"" % ",".join(map(str, all_cpu_mapping))
else:
def _GetCPUMap(vcpu):
if vcpu[0] == constants.CPU_PINNING_ALL_VAL:
cpu_map = constants.CPU_PINNING_ALL_XEN
else:
cpu_map = ",".join(map(str, vcpu))
return "\"%s\"" % cpu_map
return "cpus = [ %s ]" % ", ".join(map(_GetCPUMap, cpu_list))
def _RunInstanceList(fn, instance_list_errors):
result = fn()
if result.failed:
logging.error("Retrieving the instance list from xen failed (%s): %s",
result.fail_reason, result.output)
instance_list_errors.append(result)
raise utils.RetryAgain()
return result.stdout.splitlines()
class _InstanceCrashed(errors.GenericError):
def _ParseInstanceList(lines, include_node):
result = []
for line in lines[1:]:
data = line.split()
if len(data) != 6:
raise errors.HypervisorError("Can't parse instance list,"
" line: %s" % line)
try:
data[1] = int(data[1])
data[2] = int(data[2])
data[3] = int(data[3])
data[4] = _XenToHypervisorInstanceState(data[4])
data[5] = float(data[5])
except (TypeError, ValueError) as err:
raise errors.HypervisorError("Can't parse instance list,"
" line: %s, error: %s" % (line, err))
except _InstanceCrashed:
continue
if include_node or data[0] != _DOM0_NAME:
result.append(data)
return result
def _InstanceDomID(info):
return info[1]
def _InstanceRunning(info):
return info[4] == hv_base.HvInstanceState.RUNNING
def _InstanceRuntime(info):
return info[5]
|
BSD 2-Clause Simplified License
|
atomlinter/linter-pylama
|
bin/deps/flake8/options/manager.py
|
Option.to_optparse
|
python
|
def to_optparse(self):
if self._opt is None:
self._opt = optparse.Option(*self.option_args,
**self.option_kwargs)
return self._opt
|
Convert a Flake8 Option to an optparse Option.
|
https://github.com/atomlinter/linter-pylama/blob/9157f7f84083007161814c93b537a712984f3c86/bin/deps/flake8/options/manager.py#L149-L154
|
import collections
import logging
import optparse
from flake8 import utils
LOG = logging.getLogger(__name__)
class Option(object):
def __init__(self, short_option_name=None, long_option_name=None,
action=None, default=None, type=None, dest=None,
nargs=None, const=None, choices=None, callback=None,
callback_args=None, callback_kwargs=None, help=None,
metavar=None,
parse_from_config=False, comma_separated_list=False,
normalize_paths=False):
self.short_option_name = short_option_name
self.long_option_name = long_option_name
self.option_args = [
x for x in (short_option_name, long_option_name) if x is not None
]
self.option_kwargs = {
'action': action,
'default': default,
'type': type,
'dest': self._make_dest(dest),
'nargs': nargs,
'const': const,
'choices': choices,
'callback': callback,
'callback_args': callback_args,
'callback_kwargs': callback_kwargs,
'help': help,
'metavar': metavar,
}
for key, value in self.option_kwargs.items():
setattr(self, key, value)
self.parse_from_config = parse_from_config
self.comma_separated_list = comma_separated_list
self.normalize_paths = normalize_paths
self.config_name = None
if parse_from_config:
if not long_option_name:
raise ValueError('When specifying parse_from_config=True, '
'a long_option_name must also be specified.')
self.config_name = long_option_name[2:].replace('-', '_')
self._opt = None
def __repr__(self):
return (
'Option({0}, {1}, action={action}, default={default}, '
'dest={dest}, type={type}, callback={callback}, help={help},'
' callback={callback}, callback_args={callback_args}, '
'callback_kwargs={callback_kwargs}, metavar={metavar})'
).format(self.short_option_name, self.long_option_name,
**self.option_kwargs)
def _make_dest(self, dest):
if dest:
return dest
if self.long_option_name:
return self.long_option_name[2:].replace('-', '_')
return self.short_option_name[1]
def normalize(self, value, *normalize_args):
if self.normalize_paths:
normalize = utils.normalize_path
if self.comma_separated_list:
normalize = utils.normalize_paths
return normalize(value, *normalize_args)
elif self.comma_separated_list:
return utils.parse_comma_separated_list(value)
return value
def normalize_from_setuptools(self, value):
value = self.normalize(value)
if self.type == 'int' or self.action == 'count':
return int(value)
if self.action in ('store_true', 'store_false'):
value = str(value).upper()
if value in ('1', 'T', 'TRUE', 'ON'):
return True
if value in ('0', 'F', 'FALSE', 'OFF'):
return False
return value
|
MIT License
|
phonopy/phono3py
|
phono3py/phonon3/conductivity_LBTE.py
|
Conductivity_LBTE._prepare_collision_matrix
|
python
|
def _prepare_collision_matrix(self):
if self._is_reducible_collision_matrix:
if self._is_kappa_star:
self._average_collision_matrix_by_degeneracy()
num_mesh_points = np.prod(self._pp.mesh_numbers)
num_rot = len(self._point_operations)
rot_grid_points = np.zeros((num_rot, num_mesh_points), dtype="int_")
ir_gr_grid_points = np.array(
self._pp.bz_grid.bzg2grg[self._ir_grid_points], dtype="int_"
)
for i in range(num_mesh_points):
rot_grid_points[:, i] = self._pp.bz_grid.bzg2grg[
get_grid_points_by_rotations(
self._pp.bz_grid.grg2bzg[i], self._pp.bz_grid
)
]
self._expand_collisions(ir_gr_grid_points, rot_grid_points)
self._expand_local_values(ir_gr_grid_points, rot_grid_points)
self._combine_reducible_collisions()
weights = np.ones(np.prod(self._pp.mesh_numbers), dtype="int_")
self._symmetrize_collision_matrix()
else:
self._combine_collisions()
weights = self._get_weights()
for i, w_i in enumerate(weights):
for j, w_j in enumerate(weights):
self._collision_matrix[:, :, i, :, :, j, :, :] *= w_i * w_j
self._average_collision_matrix_by_degeneracy()
self._symmetrize_collision_matrix()
return weights
|
Prepare collision matrix to be solved.
|
https://github.com/phonopy/phono3py/blob/55df7fb796732a8b8ee893420152468a344b5f92/phono3py/phonon3/conductivity_LBTE.py#L469-L502
|
import sys
import time
import warnings
from typing import List
import numpy as np
from phonopy.phonon.degeneracy import degenerate_sets
from phonopy.phonon.group_velocity import GroupVelocity
from phonopy.units import Kb, THzToEv
from phono3py.file_IO import (
read_collision_from_hdf5,
read_pp_from_hdf5,
write_collision_eigenvalues_to_hdf5,
write_collision_to_hdf5,
write_kappa_to_hdf5,
write_unitary_matrix_to_hdf5,
)
from phono3py.phonon3.collision_matrix import CollisionMatrix
from phono3py.phonon3.conductivity import Conductivity, all_bands_exist, unit_to_WmK
from phono3py.phonon3.conductivity import write_pp as _write_pp
from phono3py.phonon3.interaction import Interaction
from phono3py.phonon.grid import get_grid_points_by_rotations
class Conductivity_LBTE(Conductivity):
def __init__(
self,
interaction: Interaction,
grid_points=None,
temperatures=None,
sigmas=None,
sigma_cutoff=None,
is_isotope=False,
mass_variances=None,
boundary_mfp=None,
solve_collective_phonon=False,
is_reducible_collision_matrix=False,
is_kappa_star=True,
gv_delta_q=None,
is_full_pp=False,
read_pp=False,
pp_filename=None,
pinv_cutoff=1.0e-8,
pinv_solver=0,
log_level=0,
):
self._pp: Interaction
self._gv_obj: GroupVelocity
self._sigmas: List
self._grid_point_count: int
self._temperatures = None
self._sigma_cutoff = None
self._is_kappa_star = None
self._is_full_pp = None
self._log_level = None
self._boundary_mfp = None
self._point_operations = None
self._rotations_cartesian = None
self._grid_points = None
self._grid_weights = None
self._ir_grid_points = None
self._ir_grid_weights = None
self._kappa = None
self._mode_kappa = None
self._kappa_RTA = None
self._mode_kappa_RTA = None
self._read_gamma = False
self._read_gamma_iso = False
self._frequencies = None
self._cv = None
self._gv = None
self._f_vectors = None
self._gv_sum2 = None
self._mfp = None
self._gamma = None
self._gamma_iso = None
self._averaged_pp_interaction = None
self._conversion_factor = None
self._is_isotope = None
self._isotope = None
self._mass_variances = None
self._collision_eigenvalues = None
super().__init__(
interaction,
grid_points=grid_points,
temperatures=temperatures,
sigmas=sigmas,
sigma_cutoff=sigma_cutoff,
is_isotope=is_isotope,
mass_variances=mass_variances,
boundary_mfp=boundary_mfp,
is_kappa_star=is_kappa_star,
gv_delta_q=gv_delta_q,
is_full_pp=is_full_pp,
log_level=log_level,
)
self._is_reducible_collision_matrix = is_reducible_collision_matrix
self._solve_collective_phonon = solve_collective_phonon
if not self._is_kappa_star:
self._is_reducible_collision_matrix = True
self._collision_matrix = None
self._read_pp = read_pp
self._pp_filename = pp_filename
self._pinv_cutoff = pinv_cutoff
self._pinv_solver = pinv_solver
if grid_points is None:
self._all_grid_points = True
else:
self._all_grid_points = False
if self._temperatures is not None:
self._allocate_values()
def set_kappa_at_sigmas(self):
if len(self._grid_points) != len(self._ir_grid_points):
print("Collision matrix is not well created.")
import sys
sys.exit(1)
else:
weights = self._prepare_collision_matrix()
self._set_kappa_at_sigmas(weights)
def get_f_vectors(self):
return self._f_vectors
@property
def collision_matrix(self):
return self._collision_matrix
@collision_matrix.setter
def collision_matrix(self, collision_matrix):
self._collision_matrix = collision_matrix
def get_collision_matrix(self):
warnings.warn(
"Use attribute, Conductivity_LBTE.collision_matrix "
"instead of Conductivity_LBTE.get_collision_matrix().",
DeprecationWarning,
)
return self.collision_matrix
def set_collision_matrix(self, collision_matrix):
warnings.warn(
"Use attribute, Conductivity_LBTE.collision_matrix "
"instead of Conductivity_LBTE.set_collision_matrix().",
DeprecationWarning,
)
self.collision_matrix = collision_matrix
def get_collision_eigenvalues(self):
return self._collision_eigenvalues
def get_mean_free_path(self):
return self._mfp
def get_frequencies_all(self):
return self._frequencies[self._pp.bz_grid.grg2bzg]
def get_kappa_RTA(self):
return self._kappa_RTA
def get_mode_kappa_RTA(self):
return self._mode_kappa_RTA
def delete_gp_collision_and_pp(self):
self._collision.delete_integration_weights()
self._pp.delete_interaction_strength()
def _run_at_grid_point(self):
i_gp = self._grid_point_count
self._show_log_header(i_gp)
gp = self._grid_points[i_gp]
if not self._all_grid_points:
self._collision_matrix[:] = 0
if not self._read_gamma:
self._collision.set_grid_point(gp)
if self._log_level:
print("Number of triplets: %d" % len(self._pp.get_triplets_at_q()[0]))
self._set_collision_matrix_at_sigmas(i_gp)
if self._is_reducible_collision_matrix:
i_data = self._pp.bz_grid.bzg2grg[gp]
else:
i_data = i_gp
self._set_cv(i_gp, i_data)
self._set_gv(i_gp, i_data)
self._set_gv_by_gv(i_gp, i_data)
if self._isotope is not None:
gamma_iso = self._get_gamma_isotope_at_sigmas(i_gp)
band_indices = self._pp.band_indices
self._gamma_iso[:, i_data, :] = gamma_iso[:, band_indices]
if self._log_level:
self._show_log(i_gp)
def _allocate_values(self):
num_band0 = len(self._pp.band_indices)
num_band = len(self._pp.primitive) * 3
num_temp = len(self._temperatures)
if self._is_reducible_collision_matrix:
self._allocate_reducible_colmat_values(num_temp, num_band0, num_band)
else:
self._allocate_ir_colmat_values(num_temp, num_band0, num_band)
def _allocate_local_values(self, num_temp, num_band0, num_grid_points):
self._kappa = np.zeros(
(len(self._sigmas), num_temp, 6), dtype="double", order="C"
)
self._kappa_RTA = np.zeros(
(len(self._sigmas), num_temp, 6), dtype="double", order="C"
)
self._gv = np.zeros((num_grid_points, num_band0, 3), dtype="double", order="C")
self._f_vectors = np.zeros(
(num_grid_points, num_band0, 3), dtype="double", order="C"
)
self._gv_sum2 = np.zeros(
(num_grid_points, num_band0, 6), dtype="double", order="C"
)
self._mfp = np.zeros(
(len(self._sigmas), num_temp, num_grid_points, num_band0, 3),
dtype="double",
order="C",
)
self._cv = np.zeros(
(num_temp, num_grid_points, num_band0), dtype="double", order="C"
)
if self._is_full_pp:
self._averaged_pp_interaction = np.zeros(
(num_grid_points, num_band0), dtype="double", order="C"
)
if self._gamma is None:
self._gamma = np.zeros(
(len(self._sigmas), num_temp, num_grid_points, num_band0),
dtype="double",
order="C",
)
if self._isotope is not None:
self._gamma_iso = np.zeros(
(len(self._sigmas), num_grid_points, num_band0),
dtype="double",
order="C",
)
self._mode_kappa = np.zeros(
(len(self._sigmas), num_temp, num_grid_points, num_band0, 6), dtype="double"
)
self._mode_kappa_RTA = np.zeros(
(len(self._sigmas), num_temp, num_grid_points, num_band0, 6), dtype="double"
)
def _allocate_reducible_colmat_values(self, num_temp, num_band0, num_band):
num_mesh_points = np.prod(self._pp.mesh_numbers)
if self._all_grid_points:
num_stored_grid_points = num_mesh_points
else:
num_stored_grid_points = 1
self._allocate_local_values(num_temp, num_band0, num_mesh_points)
self._collision = CollisionMatrix(
self._pp, is_reducible_collision_matrix=True, log_level=self._log_level
)
if self._collision_matrix is None:
self._collision_matrix = np.empty(
(
len(self._sigmas),
num_temp,
num_stored_grid_points,
num_band0,
num_mesh_points,
num_band,
),
dtype="double",
order="C",
)
self._collision_matrix[:] = 0
self._collision_eigenvalues = np.zeros(
(len(self._sigmas), num_temp, num_mesh_points * num_band),
dtype="double",
order="C",
)
def _allocate_ir_colmat_values(self, num_temp, num_band0, num_band):
num_ir_grid_points = len(self._ir_grid_points)
num_grid_points = len(self._grid_points)
if self._all_grid_points:
num_stored_grid_points = num_grid_points
else:
num_stored_grid_points = 1
self._allocate_local_values(num_temp, num_band0, num_grid_points)
self._rot_grid_points = np.zeros(
(num_ir_grid_points, len(self._point_operations)), dtype="int_"
)
for i, ir_gp in enumerate(self._ir_grid_points):
self._rot_grid_points[i] = get_grid_points_by_rotations(
ir_gp, self._pp.bz_grid
)
self._collision = CollisionMatrix(
self._pp,
rotations_cartesian=self._rotations_cartesian,
num_ir_grid_points=num_ir_grid_points,
rot_grid_points=self._rot_grid_points,
log_level=self._log_level,
)
if self._collision_matrix is None:
self._collision_matrix = np.empty(
(
len(self._sigmas),
num_temp,
num_stored_grid_points,
num_band0,
3,
num_ir_grid_points,
num_band,
3,
),
dtype="double",
order="C",
)
self._collision_matrix[:] = 0
self._collision_eigenvalues = np.zeros(
(len(self._sigmas), num_temp, num_ir_grid_points * num_band * 3),
dtype="double",
order="C",
)
def _set_collision_matrix_at_sigmas(self, i_gp):
for j, sigma in enumerate(self._sigmas):
if self._log_level:
text = "Calculating collision matrix with "
if sigma is None:
text += "tetrahedron method."
else:
text += "sigma=%s" % sigma
if self._sigma_cutoff is None:
text += "."
else:
text += "(%4.2f SD)." % self._sigma_cutoff
print(text)
self._collision.set_sigma(sigma, sigma_cutoff=self._sigma_cutoff)
self._collision.set_integration_weights()
if self._read_pp:
pp, _g_zero = read_pp_from_hdf5(
self._pp.mesh_numbers,
grid_point=self._grid_points[i_gp],
sigma=sigma,
sigma_cutoff=self._sigma_cutoff,
filename=self._pp_filename,
verbose=(self._log_level > 0),
)
_, g_zero = self._collision.get_integration_weights()
if self._log_level:
if len(self._sigmas) > 1:
print(
"Multiple sigmas or mixing smearing and "
"tetrahedron method is not supported."
)
if _g_zero is not None and (_g_zero != g_zero).any():
raise ValueError("Inconsistency found in g_zero.")
self._collision.set_interaction_strength(pp)
elif j != 0 and (self._is_full_pp or self._sigma_cutoff is None):
if self._log_level:
print("Existing ph-ph interaction is used.")
else:
if self._log_level:
print("Calculating ph-ph interaction...")
self._collision.run_interaction(is_full_pp=self._is_full_pp)
if self._is_full_pp and j == 0:
self._averaged_pp_interaction[
i_gp
] = self._pp.get_averaged_interaction()
for k, t in enumerate(self._temperatures):
self._collision.set_temperature(t)
self._collision.run()
if self._all_grid_points:
if self._is_reducible_collision_matrix:
i_data = self._pp.bz_grid.bzg2grg[self._grid_points[i_gp]]
else:
i_data = i_gp
else:
i_data = 0
self._gamma[j, k, i_data] = self._collision.get_imag_self_energy()
self._collision_matrix[
j, k, i_data
] = self._collision.get_collision_matrix()
|
BSD 3-Clause New or Revised License
|
irc-sphere/hyperstream
|
hyperstream/tool/tool.py
|
Tool._execute
|
python
|
def _execute(self, sources, alignment_stream, interval):
raise NotImplementedError
|
Tool implementations should override this function to actually perform computations
:param sources: The source streams (possibly None)
:param alignment_stream: The alignment stream
:param interval: The time interval
:type sources: list[Stream] | tuple[Stream] | None
:type alignment_stream: Stream | None
:type interval: TimeInterval
:return: None
|
https://github.com/irc-sphere/hyperstream/blob/35d63962f78cdfaac0383e38d79b16af373f1492/hyperstream/tool/tool.py#L35-L47
|
from . import BaseTool
from ..time_interval import TimeInterval, TimeIntervals
from ..stream import Stream
from ..utils import StreamNotAvailableError
import logging
class Tool(BaseTool):
|
MIT License
|
chryswoods/acquire
|
Acquire/Client/_resources.py
|
Resources.from_data
|
python
|
def from_data(data):
r = Resources()
if data is None or len(data) == 0:
return r
r.image = str(data["image"])
r._nodes = int(data["nodes"])
r._cores_per_node = int(data["cores_per_node"])
r._mem_per_core = str(data["mem_per_core"])
r._gpus_per_node = str(data["gpus_per_node"])
r._shape = str(data["shape"])
r._tmp_disk_per_node = str(data["tmp_disk_per_node"])
r._scratch_disk = str(data["scratch_disk"])
r._campaign_disk = str(data["campaign_disk"])
return r
|
Return Resources constructed from a json-deserialised dictionary
|
https://github.com/chryswoods/acquire/blob/bf8a0465a531f3b485cb2a14c69dc9aea79451fd/Acquire/Client/_resources.py#L51-L68
|
__all__ = ["Resources"]
class Resources:
def __init__(self, image=None, nodes=1, cores_per_node=1,
mem_per_core="100MB", gpus_per_node=0, shape=None,
tmp_disk_per_node="4GB", scratch_disk="10GB",
campaign_disk="5GB"):
self._image = str(image)
self._nodes = int(nodes)
self._cores_per_node = int(cores_per_node)
self._mem_per_core = str(mem_per_core)
self._gpus_per_node = int(gpus_per_node)
if shape is not None:
self._shape = str(shape)
else:
self._shape = None
self._tmp_disk_per_node = str(tmp_disk_per_node)
self._scratch_disk = str(scratch_disk)
self._campaign_disk = str(campaign_disk)
def to_data(self):
data = {}
data["image"] = self._image
data["nodes"] = self._nodes
data["cores_per_node"] = self._cores_per_node
data["mem_per_core"] = self._mem_per_core
data["gpus_per_node"] = self._gpus_per_node
data["shape"] = self._shape
data["tmp_disk_per_node"] = self._tmp_disk_per_node
data["scratch_disk"] = self._scratch_disk
data["campaign_disk"] = self._campaign_disk
return data
@staticmethod
|
Apache License 2.0
|
clovaai/extd_pytorch
|
logger.py
|
Logger.scalar_summary
|
python
|
def scalar_summary(self, tag, value, step, scope=None):
if USE_NSML:
if self.last and self.last['step'] != step:
nsml.report(scope=scope, **self.last)
self.last = None
if self.last is None:
self.last = {'step': step, 'iter': step, 'epoch': 1}
self.last[tag] = value
else:
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
|
Log a scalar variable.
|
https://github.com/clovaai/extd_pytorch/blob/e99af10f282d07054c1cf7c4b8c035084daaff78/logger.py#L21-L33
|
import scipy.misc
import numpy as np
import tensorflow as tf
from io import BytesIO
try:
import nsml
USE_NSML=True
except ImportError:
USE_NSML=False
pass
class Logger(object):
def __init__(self, log_dir):
self.writer = tf.summary.FileWriter(log_dir)
self.last = None
|
MIT License
|
ivan-vasilev/atpy
|
atpy/backtesting/environments.py
|
add_postgres_ohlc_1m
|
python
|
def add_postgres_ohlc_1m(dre: DataReplayEvents, bgn_prd: datetime.datetime, historical_depth=300, run_async=False, url: str = None, lmdb_path: str = None):
con = psycopg2.connect(url if url is not None else os.environ['POSTGRESQL_CACHE'])
lmdb_path = os.environ['ATPY_LMDB_PATH'] if lmdb_path is None and 'ATPY_LMDB_PATH' in os.environ else lmdb_path
cache = functools.partial(read_pickle, lmdb_path=lmdb_path) if lmdb_path is not None else None
bars_in_period = BarsInPeriodProvider(conn=con, interval_len=60, interval_type='s', bars_table='bars_1m', bgn_prd=bgn_prd, delta=relativedelta(weeks=1), overlap=relativedelta(microseconds=-1), cache=cache)
if run_async:
bars_in_period = AsyncInPeriodProvider(bars_in_period)
dre.data_replay.add_source(bars_in_period, 'bars_1m', historical_depth=historical_depth, listeners=dre.listeners)
return dre.event_filter_by_source('bars_1m'), dre.event_filter_function('bars_1m')
|
Create DataReplay environment for bar data using PostgreSQL
:param dre: DataReplayEvents
:param bgn_prd: begin period
:param historical_depth: historical depth for source
:param run_async: generate data asynchronously
:param url: postgre url (can be obtained via env variable)
:param lmdb_path: path to lmdb cache file
:return: filter function, that only accepts this event
|
https://github.com/ivan-vasilev/atpy/blob/abe72832ae8cec818b0e67989892c25456e9e5f5/atpy/backtesting/environments.py#L25-L48
|
import datetime
import functools
import logging
import os
import typing
import psycopg2
from dateutil.relativedelta import relativedelta
import atpy.data.iqfeed.util as iqutil
from atpy.backtesting.data_replay import DataReplayEvents, DataReplay
from atpy.backtesting.mock_exchange import MockExchange, StaticSlippageLoss, PerShareCommissionLoss
from atpy.backtesting.random_strategy import RandomStrategy
from atpy.data.cache.lmdb_cache import read_pickle
from atpy.data.cache.postgres_cache import BarsInPeriodProvider
from atpy.data.quandl.postgres_cache import SFInPeriodProvider
from atpy.data.ts_util import current_period, current_phase, gaps, rolling_mean, AsyncInPeriodProvider
from atpy.portfolio.portfolio_manager import PortfolioManager
def data_replay_events(listeners):
return DataReplayEvents(listeners, DataReplay(), event_name='data')
|
MIT License
|
vlasovskikh/funcparserlib
|
funcparserlib/parser.py
|
a
|
python
|
def a(value):
name = getattr(value, "name", value)
return some(lambda t: t == value).named(repr(name))
|
Return a parser that parses a token if it's equal to `value`.
Type: `(A) -> Parser[A, A]`
Examples:
```pycon
>>> expr = a("x")
>>> expr.parse("x")
'x'
>>> expr.parse("y")
Traceback (most recent call last):
...
parser.NoParseError: got unexpected token: 'y', expected: 'x'
```
Note:
Although `Parser.parse()` can parse sequences of any objects (including
`str` which is a sequence of `str` chars), **the recommended way** is
parsing sequences of `Token` objects.
You **should** use a regexp-based tokenizer `make_tokenizer()` defined in
`funcparserlib.lexer` to convert your text into a sequence of `Token` objects
before parsing it. You will get more readable parsing error messages (as `Token`
objects contain their position in the source file) and good separation of the
lexical and syntactic levels of the grammar.
|
https://github.com/vlasovskikh/funcparserlib/blob/3162777144d0013aad6a0293de57ad2ca63ed4cf/funcparserlib/parser.py#L619-L649
|
from __future__ import unicode_literals
__all__ = [
"some",
"a",
"tok",
"many",
"pure",
"finished",
"maybe",
"skip",
"oneplus",
"forward_decl",
"NoParseError",
"Parser",
]
import sys
import logging
import warnings
from funcparserlib.lexer import Token
log = logging.getLogger("funcparserlib")
debug = False
PY2 = sys.version_info < (3,)
if PY2:
string_types = (str, unicode)
else:
string_types = str
class Parser(object):
def __init__(self, p):
self.name = ""
self.define(p)
def named(self, name):
self.name = name
return self
def define(self, p):
f = getattr(p, "run", p)
if debug:
setattr(self, "_run", f)
else:
setattr(self, "run", f)
self.named(getattr(p, "name", p.__doc__))
def run(self, tokens, s):
if debug:
log.debug("trying %s" % self.name)
return self._run(tokens, s)
def _run(self, tokens, s):
raise NotImplementedError("you must define() a parser")
def parse(self, tokens):
try:
(tree, _) = self.run(tokens, State(0, 0, None))
return tree
except NoParseError as e:
max = e.state.max
if len(tokens) > max:
t = tokens[max]
if isinstance(t, Token):
if t.start is None or t.end is None:
loc = ""
else:
s_line, s_pos = t.start
e_line, e_pos = t.end
loc = "%d,%d-%d,%d: " % (s_line, s_pos, e_line, e_pos)
msg = "%s%s: %r" % (loc, e.msg, t.value)
elif isinstance(t, string_types):
msg = "%s: %r" % (e.msg, t)
else:
msg = "%s: %s" % (e.msg, t)
else:
msg = "got unexpected end of file"
if e.state.parser is not None:
msg = "%s, expected: %s" % (msg, e.state.parser.name)
e.msg = msg
raise
def __add__(self, other):
def magic(v1, v2):
if isinstance(v1, _Tuple):
return _Tuple(v1 + (v2,))
else:
return _Tuple((v1, v2))
@_TupleParser
def _add(tokens, s):
(v1, s2) = self.run(tokens, s)
(v2, s3) = other.run(tokens, s2)
return magic(v1, v2), s3
@Parser
def ignored_right(tokens, s):
v, s2 = self.run(tokens, s)
_, s3 = other.run(tokens, s2)
return v, s3
name = "(%s, %s)" % (self.name, other.name)
if isinstance(other, _IgnoredParser):
return ignored_right.named(name)
else:
return _add.named(name)
def __or__(self, other):
@Parser
def _or(tokens, s):
try:
return self.run(tokens, s)
except NoParseError as e:
state = e.state
try:
return other.run(tokens, State(s.pos, state.max, state.parser))
except NoParseError as e:
if s.max == e.state.max:
e.state = State(e.state.pos, e.state.max, _or)
raise
_or.name = "%s or %s" % (self.name, other.name)
return _or
def __rshift__(self, f):
@Parser
def _shift(tokens, s):
(v, s2) = self.run(tokens, s)
return f(v), s2
return _shift.named(self.name)
def bind(self, f):
@Parser
def _bind(tokens, s):
(v, s2) = self.run(tokens, s)
return f(v).run(tokens, s2)
_bind.name = "(%s >>=)" % (self.name,)
return _bind
def __neg__(self):
return _IgnoredParser(self)
class State(object):
def __init__(self, pos, max, parser=None):
self.pos = pos
self.max = max
self.parser = parser
def __str__(self):
return str((self.pos, self.max))
def __repr__(self):
return "State(%r, %r)" % (self.pos, self.max)
class NoParseError(Exception):
def __init__(self, msg="", state=None):
self.msg = msg
self.state = state
def __str__(self):
return self.msg
class _Tuple(tuple):
pass
class _TupleParser(Parser):
pass
class _Ignored(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return "_Ignored(%s)" % repr(self.value)
def __eq__(self, other):
return isinstance(other, _Ignored) and self.value == other.value
@Parser
def finished(tokens, s):
if s.pos >= len(tokens):
return None, s
else:
s2 = State(s.pos, s.max, finished)
raise NoParseError("got unexpected token", s2)
finished.name = "end of file"
def many(p):
@Parser
def _many(tokens, s):
res = []
try:
while True:
(v, s) = p.run(tokens, s)
res.append(v)
except NoParseError as e:
return res, State(s.pos, e.state.max, e.state.parser)
_many.name = "{ %s }" % p.name
return _many
def some(pred):
@Parser
def _some(tokens, s):
if s.pos >= len(tokens):
s2 = State(s.pos, s.max, _some if s.pos == s.max else s.parser)
raise NoParseError("got unexpected end of file", s2)
else:
t = tokens[s.pos]
if pred(t):
pos = s.pos + 1
s2 = State(pos, max(pos, s.max), s.parser)
if debug:
log.debug('*matched* "%s", new state = %s' % (t, s2))
return t, s2
else:
if debug:
log.debug('failed "%s", state = %s' % (t, s))
s2 = State(s.pos, s.max, _some if s.pos == s.max else s.parser)
raise NoParseError("got unexpected token", s2)
_some.name = "some(...)"
return _some
|
MIT License
|
nikikilbertus/blind-justice
|
python/src/tools.py
|
abs_fp
|
python
|
def abs_fp(x):
if _np_instance(x):
return np.abs(x)
return _apply_vectorized(x, lambda z: (z if z > 0. else -z))
|
Element wise absolute value with fallback to numpy.abs.
|
https://github.com/nikikilbertus/blind-justice/blob/2344609e55a2af20396ec042627ffed368e01e56/python/src/tools.py#L182-L186
|
import collections
from functools import reduce
import numpy as np
from spfpm.FixedPoint import FXnum
def to_fixed(x, family):
if family is None:
return x
xtype = _type_of(x)
if xtype == 'scal':
return FXnum(x, family=family)
if xtype == 'vec':
return [FXnum(a, family=family) for a in x]
else:
return [[FXnum(a, family=family) for a in x_row] for x_row in x]
def to_float(x):
if _np_instance(x):
return x
xtype = _type_of(x)
if xtype == 'scal':
return float(x)
if xtype == 'vec':
return np.array([float(a) for a in x], dtype=float)
else:
return np.array([[float(a) for a in x_row] for x_row in x],
dtype=float)
def stack_fp(x, y):
if _np_instance(x, y):
return np.hstack((x, y))
optype = _operation_type(x, y)
assert optype == 'vec_vec', "Cannot stack {} with {}".format(dimensions(x),
dimensions(y))
return x + y
def sigmoid_pw(x):
positive = True
if x < 0.:
positive = False
x = -x
if x < 1.:
y = 0.23105 * x + 0.50346
elif x < 2.:
y = 0.14973 * x + 0.58714
elif x < 3.:
y = 0.07177 * x + 0.74097
elif x < 4.:
y = 0.02943 * x + 0.86595
elif x < 5.:
y = 0.01129 * x + 0.93751
else:
y = 1.0
return y if positive else 1. - y
def sigmoid_secureml(x):
if x < -0.5:
return 0.0
elif x > 0.5:
return 1.0
else:
return x + 0.5
def d_sigmoid_pw(x):
if x < 0.:
return d_sigmoid_pw(-x)
elif x < 0.5:
return -0.02999 * x + 0.25175
elif x < 1.:
return -0.07678 * x + 0.27442
elif x < 2.:
return -0.09161 * x + 0.28729
elif x < 3.:
return -0.05981 * x + 0.22213
elif x < 4.:
return -0.02751 * x + 0.12623
elif x < 5.:
return -0.01101 * x + 0.06108
else:
return -0.00001 * x + 0.00608
def _apply_vectorized(x, func):
xtype = _type_of(x)
if xtype == 'scal':
return func(x)
if xtype == 'vec':
return [func(a) for a in x]
else:
return [[func(a) for a in x_row] for x_row in x]
def inv_fp(x):
if _np_instance(x):
return 1 / x
return _apply_vectorized(x, lambda z: 1/z)
def neg_fp(x):
if _np_instance(x):
return -x
return _apply_vectorized(x, lambda z: -z)
def div_pow2_fp(x, powof2):
if _np_instance(x):
return x / np.power(2, powof2)
return _apply_vectorized(x, lambda z: z >> powof2)
def mul_pow2_fp(x, powof2):
if _np_instance(x):
return x * np.power(2, powof2)
return _apply_vectorized(x, lambda z: z << powof2)
|
MIT License
|
hunterjm/hassio-addons
|
xboxone/xboxone.py
|
XboxOneDevice.media_pause
|
python
|
def media_pause(self):
self._xboxone.media_command('pause')
|
Send pause command.
|
https://github.com/hunterjm/hassio-addons/blob/05bf8b146fa4bf0d47439b9b9870d11626143aac/xboxone/xboxone.py#L638-L640
|
import logging
import functools
import requests
import voluptuous as vol
from urllib.parse import urljoin
from homeassistant.components.media_player import (
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_VOLUME_STEP, SUPPORT_VOLUME_MUTE, SUPPORT_PLAY,
MEDIA_TYPE_MUSIC, MEDIA_TYPE_VIDEO, MEDIA_TYPE_TVSHOW, MEDIA_TYPE_CHANNEL)
from homeassistant.const import (
STATE_IDLE, STATE_OFF, STATE_PAUSED, STATE_PLAYING, STATE_UNKNOWN, STATE_ON,
CONF_HOST, CONF_PORT, CONF_SSL, CONF_NAME, CONF_DEVICE, CONF_AUTHENTICATION,
CONF_IP_ADDRESS)
import homeassistant.util.dt as dt_util
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
SUPPORT_XBOXONE = SUPPORT_PAUSE | SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_SELECT_SOURCE | SUPPORT_PLAY | SUPPORT_VOLUME_STEP | SUPPORT_VOLUME_MUTE
REQUIRED_SERVER_VERSION = '0.9.8'
DEFAULT_SSL = False
DEFAULT_HOST = 'localhost'
DEFAULT_NAME = 'Xbox One SmartGlass'
DEFAULT_PORT = 5557
DEFAULT_AUTHENTICATION = True
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_DEVICE): cv.string,
vol.Optional(CONF_IP_ADDRESS, default=''): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_AUTHENTICATION, default=DEFAULT_AUTHENTICATION): cv.boolean,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
name = config.get(CONF_NAME)
ssl = config.get(CONF_SSL)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
liveid = config.get(CONF_DEVICE)
ip = config.get(CONF_IP_ADDRESS)
auth = config.get(CONF_AUTHENTICATION)
proto = 'https' if ssl else 'http'
base_url = '{0}://{1}:{2}'.format(proto, host, port)
add_devices([XboxOneDevice(base_url, liveid, ip, name, auth)])
class XboxOne:
def __init__(self, base_url, liveid, ip, auth):
self.is_server_up = False
self.is_server_correct_version = True
self.base_url = base_url
self.liveid = liveid
self._ip = ip
self._auth = auth
self._available = False
self._connected = False
self._media_status = None
self._console_status = None
self._volume_controls = None
self._pins = None
def get(self, endpoint, *args, **kwargs):
endpoint = endpoint.replace('<liveid>', self.liveid)
full_url = urljoin(self.base_url, endpoint)
return requests.get(full_url, *args, **kwargs)
@property
def available(self):
return self._available
@property
def connected(self):
return self._connected
@property
def console_status(self):
return self._console_status
@property
def media_status(self):
return self._media_status
@property
def volume_controls(self):
volume_controls = self._volume_controls
if not volume_controls:
return None
controls = volume_controls.get('avr') or volume_controls.get('tv')
if not controls:
return None
return {
'mute': controls['buttons']['btn.vol_mute']['url'],
'up': controls['buttons']['btn.vol_up']['url'],
'down': controls['buttons']['btn.vol_down']['url'],
}
@property
def media_playback_state(self):
if self.media_status:
return self.media_status.get('playback_status')
@property
def media_type(self):
if self.media_status:
return self.media_status.get('media_type')
@property
def media_position(self):
if self.media_status:
position = self.media_status.get('position')
if isinstance(position, int) and position >= 10000000:
return position / 10000000
@property
def media_duration(self):
if self.media_status:
media_end = self.media_status.get('media_end')
if isinstance(media_end, int) and media_end >= 10000000:
return media_end / 10000000
@property
def media_title(self):
if self.media_status:
return self.media_status.get('metadata', {}).get('title')
@property
def active_app(self):
if self.console_status:
active_titles = self.console_status.get('active_titles')
app = [a.get('name') for a in active_titles if a.get('has_focus')]
if len(app):
return app[0]
@property
def active_app_image(self):
if self.console_status:
active_titles = self.console_status.get('active_titles')
app = [a.get('image') for a in active_titles if a.get('has_focus')]
if len(app):
return app[0] or None
@property
def active_app_type(self):
if self.console_status:
active_titles = self.console_status.get('active_titles')
app = [a.get('type') for a in active_titles if a.get('has_focus')]
if len(app):
return app[0]
@property
def all_apps(self):
apps = {
'Home': 'ms-xbox-dashboard://home?view=home',
'TV': 'ms-xbox-livetv://'
}
if not self._pins and self._check_authentication():
self._pins = self.get('/web/pins').json()
if self._pins:
try:
for item in self._pins['ListItems']:
if item['Item']['ContentType'] == 'DApp' and item['Item']['Title'] not in apps.keys():
apps[item['Item']['Title']] = 'appx:{0}!App'.format(item['Item']['ItemId'])
except:
pass
if self.console_status:
active_titles = self.console_status.get('active_titles')
for app in active_titles:
if app.get('has_focus') and app.get('name') not in apps.keys():
apps[app.get('name')] = app.get('aum')
return apps
def _check_authentication(self):
try:
response = self.get('/auth').json()
if response.get('authenticated'):
return True
response = self.get('/auth/refresh').json()
if response.get('success'):
return True
except requests.exceptions.RequestException:
_LOGGER.error('Unreachable /auth endpoint')
return False
except Exception as e:
_LOGGER.error('Unknown Error: %s', e)
_LOGGER.error('Refreshing authentication tokens failed!')
return False
def _refresh_devicelist(self):
params = None
if self._ip:
params = {'addr': self._ip}
self.get('/device', params=params)
def _connect(self):
if self._auth and not self._check_authentication():
return False
try:
url = '/device/<liveid>/connect'
params = {}
if not self._auth:
params['anonymous'] = True
response = self.get(url, params=params).json()
if not response.get('success'):
_LOGGER.error('Failed to connect to console {0}: {1}'.format(self.liveid, str(response)))
return False
except requests.exceptions.RequestException:
_LOGGER.error('Unreachable /connect endpoint')
return False
except Exception as e:
_LOGGER.error('Unknown Error: %s', e)
return None
return True
def _get_device_info(self):
try:
response = self.get('/device/<liveid>').json()
if not response.get('success'):
_LOGGER.debug('Console {0} not available'.format(self.liveid))
return None
except requests.exceptions.RequestException:
_LOGGER.error('Unreachable device info /<liveid> endpoint')
return None
except Exception as e:
_LOGGER.error('Unknown Error: %s', e)
return None
return response['device']
def _update_console_status(self):
try:
response = self.get('/device/<liveid>/console_status').json()
if not response.get('success'):
_LOGGER.error('Console {0} not available'.format(self.liveid))
return None
except requests.exceptions.RequestException:
_LOGGER.error('Unreachable /console_status endpoint')
return None
except Exception as e:
_LOGGER.error('Unknown Error: %s', e)
return None
self._console_status = response['console_status']
def _update_media_status(self):
try:
response = self.get('/device/<liveid>/media_status').json()
if not response.get('success'):
_LOGGER.error('Console {0} not available'.format(self.liveid))
return None
except requests.exceptions.RequestException:
_LOGGER.error('Unreachable /media_status endpoint')
return None
except Exception as e:
_LOGGER.error('Unknown Error: %s', e)
return None
self._media_status = response['media_status']
def _update_volume_controls(self):
if self._volume_controls:
return
try:
response = self.get('/device/<liveid>/ir').json()
if not response.get('success'):
_LOGGER.error('Console {0} not available'.format(self.liveid))
return None
except requests.exceptions.RequestException:
_LOGGER.error('Unreachable /ir endpoint')
return None
except Exception as e:
_LOGGER.error('Unknown Error: %s', e)
return None
self._volume_controls = response
def poweron(self):
try:
url = '/device/<liveid>/poweron'
params = None
if self._ip:
params = { 'addr': self._ip }
response = self.get(url, params=params).json()
if not response.get('success'):
_LOGGER.error('Failed to poweron {0}'.format(self.liveid))
return None
except requests.exceptions.RequestException:
_LOGGER.error('Unreachable /poweron endpoint')
return None
return response
def poweroff(self):
try:
response = self.get('/device/<liveid>/poweroff').json()
if not response.get('success'):
_LOGGER.error('Failed to poweroff {0}'.format(self.liveid))
return None
except requests.exceptions.RequestException:
_LOGGER.error('Failed to call poweroff for {0}'.format(self.liveid))
return None
return response
def ir_command(self, device, command):
try:
response = self.get('/device/<liveid>/ir').json()
if not response.get('success'):
return None
except requests.exceptions.RequestException:
_LOGGER.error('Failed to get enabled media commands for {0}'.format(self.liveid))
return None
except Exception as e:
_LOGGER.error('Unknown Error: %s', e)
return
enabled_commands = response.get(device).get('buttons')
if command not in enabled_commands:
_LOGGER.error('Provided command {0} not enabled for current ir device'.format(command))
return None
else:
button_url = enabled_commands.get(command).get('url')
try:
response = self.get('{0}'.format(button_url)).json()
if not response.get('success'):
return None
except requests.exceptions.RequestException:
_LOGGER.error('Failed to get enabled ir commands for {0}'.format(self.liveid))
return None
except Exception as e:
_LOGGER.error('Unknown Error: %s', e)
return response
def media_command(self, command):
try:
response = self.get('/device/<liveid>/media').json()
if not response.get('success'):
return None
except requests.exceptions.RequestException:
_LOGGER.error('Failed to get enabled media commands for {0}'.format(self.liveid))
return None
except Exception as e:
_LOGGER.error('Unknown Error: %s', e)
return None
enabled_commands = response.get('commands')
if command not in enabled_commands:
_LOGGER.error('Provided command {0} not enabled for current media'.format(command))
return None
try:
response = self.get('/device/<liveid>/media/{0}'.format(command)).json()
if not response.get('success'):
return None
except requests.exceptions.RequestException:
_LOGGER.error('Failed to get enabled media commands for {0}'.format(self.liveid))
return None
except Exception as e:
_LOGGER.error('Unknown Error: %s', e)
return None
return response
def volume_command(self, command):
if not self._volume_controls:
return None
url = self._volume_controls.get(command)
if not url:
return None
try:
response = self.get(url).json()
if not response.get('success'):
return None
except requests.exceptions.RequestException:
_LOGGER.error('Failed to get enabled volume commands for {0}'.format(self.liveid))
return None
except Exception as e:
_LOGGER.error('Unknown Error: %s', e)
return None
return response
def launch_title(self, launch_uri):
try:
apps = self.all_apps
if launch_uri in apps.keys():
launch_uri = apps[launch_uri]
response = self.get('/device/<liveid>/launch/{0}'.format(launch_uri)).json()
if not response.get('success'):
return None
except requests.exceptions.RequestException:
_LOGGER.error('Failed to launch title \'{0}\' for {1}'.format(launch_uri, self.liveid))
return None
except Exception as e:
_LOGGER.error('Unknown Error: %s', e)
return None
return response
def _check_server(self):
if not self.is_server_correct_version:
return False
try:
resp = self.get('/versions').json()
version = resp['versions']['xbox-smartglass-rest']
if version != REQUIRED_SERVER_VERSION:
self.is_server_correct_version = False
_LOGGER.error("Invalid xbox-smartglass-rest version: %s. Required: %s",
version, REQUIRED_SERVER_VERSION)
except requests.exceptions.RequestException:
self.is_server_up = False
return False
self.is_server_up = True
return True
def refresh(self):
if not self._check_server():
return
self._check_authentication()
self._refresh_devicelist()
device_info = self._get_device_info()
if not device_info or device_info.get('device_status') == 'Unavailable':
self._available = False
self._connected = False
self._console_status = None
self._media_status = None
self._volume_controls = None
else:
self._available = True
connection_state = device_info.get('connection_state')
if connection_state == 'Connected':
self._connected = True
else:
success = self._connect()
if not success:
_LOGGER.error('Failed to connect to {0}'.format(self.liveid))
self._connected = False
else:
self._connected = True
if self.available and self.connected:
self._update_console_status()
self._update_media_status()
self._update_volume_controls()
class XboxOneDevice(MediaPlayerDevice):
def __init__(self, base_url, liveid, ip, name, auth):
self._xboxone = XboxOne(base_url, liveid, ip, auth)
self._name = name
self._liveid = liveid
self._state = STATE_UNKNOWN
self._running_apps = None
self._current_app = None
@property
def name(self):
return self._name
@property
def unique_id(self):
return self._liveid
@property
def should_poll(self):
return True
@property
def supported_features(self):
active_support = SUPPORT_XBOXONE
if self.state not in [STATE_PLAYING, STATE_PAUSED] and (self._xboxone.active_app_type not in ['Application', 'App'] or self._xboxone.active_app == 'Home'):
active_support &= ~SUPPORT_NEXT_TRACK & ~SUPPORT_PREVIOUS_TRACK
if not self._xboxone.volume_controls:
active_support &= ~SUPPORT_VOLUME_MUTE & ~SUPPORT_VOLUME_STEP
return active_support
@property
def state(self):
playback_state = {
'Closed': STATE_IDLE,
'Changing': STATE_IDLE,
'Stopped': STATE_IDLE,
'Playing': STATE_PLAYING,
'Paused': STATE_PAUSED
}.get(self._xboxone.media_playback_state)
if playback_state:
state = playback_state
elif self._xboxone.connected or self._xboxone.available:
if self._xboxone.active_app_type in ['Application', 'App'] or self._xboxone.active_app == 'Home':
state = STATE_ON
else:
state = STATE_UNKNOWN
else:
state = STATE_OFF
return state
@property
def media_content_type(self):
if self.state in [STATE_PLAYING, STATE_PAUSED]:
return {
'Music': MEDIA_TYPE_MUSIC,
'Video': MEDIA_TYPE_VIDEO
}.get(self._xboxone.media_type)
@property
def media_duration(self):
if self.state in [STATE_PLAYING, STATE_PAUSED]:
return self._xboxone.media_duration
@property
def media_position(self):
if self.state in [STATE_PLAYING, STATE_PAUSED]:
return self._xboxone.media_position
@property
def media_position_updated_at(self):
if self.state in [STATE_PLAYING, STATE_PAUSED]:
return dt_util.utcnow()
@property
def media_image_url(self):
return self._xboxone.active_app_image
@property
def media_title(self):
if self.state in [STATE_PLAYING, STATE_PAUSED]:
return self._xboxone.media_title
return self._xboxone.active_app
@property
def source(self):
return self._xboxone.active_app
@property
def source_list(self):
return list(self._xboxone.all_apps.keys())
def update(self):
self._xboxone.refresh()
def turn_on(self):
self._xboxone.poweron()
def turn_off(self):
self._xboxone.poweroff()
def mute_volume(self, mute):
self._xboxone.volume_command('mute')
def volume_up(self):
self._xboxone.volume_command('up')
def volume_down(self):
self._xboxone.volume_command('down')
def media_play(self):
self._xboxone.media_command('play')
|
MIT License
|
jnulzl/prnet-train
|
utils/estimate_pose.py
|
P2sRt
|
python
|
def P2sRt(P):
t2d = P[:2, 3]
R1 = P[0:1, :3]
R2 = P[1:2, :3]
s = (np.linalg.norm(R1) + np.linalg.norm(R2))/2.0
r1 = R1/np.linalg.norm(R1)
r2 = R2/np.linalg.norm(R2)
r3 = np.cross(r1, r2)
R = np.concatenate((r1, r2, r3), 0)
return s, R, t2d
|
decompositing camera matrix P.
Args:
P: (3, 4). Affine Camera Matrix.
Returns:
s: scale factor.
R: (3, 3). rotation matrix.
t2d: (2,). 2d translation.
|
https://github.com/jnulzl/prnet-train/blob/4b70a3b48e00cdd0e7e0670706997f3c8427be81/utils/estimate_pose.py#L43-L61
|
import numpy as np
from math import cos, sin, atan2, asin
def isRotationMatrix(R):
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype = R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-6
def matrix2angle(R):
if R[2,0] !=1 or R[2,0] != -1:
x = asin(R[2,0])
y = atan2(R[2,1]/cos(x), R[2,2]/cos(x))
z = atan2(R[1,0]/cos(x), R[0,0]/cos(x))
else:
z = 0
if R[2,0] == -1:
x = np.pi/2
y = z + atan2(R[0,1], R[0,2])
else:
x = -np.pi/2
y = -z + atan2(-R[0,1], -R[0,2])
return x, y, z
|
MIT License
|
superbird11/ranges
|
ranges/ranges.py
|
Range.isdisjoint
|
python
|
def isdisjoint(self, rng):
if isinstance(rng, RangeSet):
return rng.isdisjoint(self)
try:
if not isinstance(rng, Range):
rng = Range(rng)
except ValueError:
raise TypeError(str(rng) + " is not Range-like")
rng_a, rng_b = (self, rng) if self < rng else (rng, self)
return not (
rng_a == rng_b
or (rng_a.end in rng_b if rng_a.end != rng_b.start else (rng_a.include_end and rng_b.include_start))
or (rng_b.start in rng_a if rng_a.end != rng_b.start else (rng_a.include_end and rng_b.include_start))
)
|
returns `False` if this range overlaps with the given range,
and `True` otherwise.
|
https://github.com/superbird11/ranges/blob/e285da71f3572e7d1c753d3bafd7d0fc07a70f69/ranges/ranges.py#L204-L224
|
import re
from collections.abc import Iterable
from ._helper import _LinkedList, _InfiniteValue, _is_iterable_non_string, Inf, _UnhashableFriendlyDict
class Range:
def __init__(self, *args, **kwargs):
start = kwargs.get('start', _InfiniteValue(negative=True))
end = kwargs.get('end', _InfiniteValue(negative=False))
include_start = kwargs.get('include_start', True)
include_end = kwargs.get('include_end', False)
self.include_start = include_start
self.include_end = include_end
if len(args) == 0:
rng = None
elif len(args) == 1:
if not args[0] and not isinstance(args[0], Range):
raise ValueError("Cannot take a falsey non-Range value as only positional argument")
rng = args[0]
else:
start = args[0]
end = args[1]
rng = None
if rng is not None:
if isinstance(rng, Range):
self.start = rng.start
self.end = rng.end
self.include_start = rng.include_start
self.include_end = rng.include_end
elif isinstance(rng, str):
pattern = r"(\[|\()\s*([^\s,]+)\s*(?:,|\.\.)\s*([^\s,]+)\s*(\]|\))"
match = re.match(pattern, rng)
try:
if match.group(1) == "[":
self.include_start = True
elif match.group(1) == "(":
self.include_start = False
else:
raise AttributeError()
if match.group(4) == "]":
self.include_end = True
elif match.group(4) == ")":
self.include_end = False
else:
raise AttributeError()
self.start = float(match.group(2))
self.end = float(match.group(3))
if self.start.is_integer():
self.start = int(self.start)
if self.end.is_integer():
self.end = int(self.end)
except (AttributeError, IndexError):
raise ValueError(f"Range '{rng}' was given in wrong format. Must be like '(start, end)' " +
"where () means exclusive, [] means inclusive")
except ValueError:
raise ValueError("start and end must be numbers")
else:
raise ValueError(f"cannot construct a new Range from an object of type '{type(rng)}'")
else:
self.start = start
self.end = end
try:
if self.start > self.end:
raise ValueError("start must be less than or equal to end")
except TypeError as _:
raise ValueError("start and end are not comparable types")
|
MIT License
|
openstack/cinder
|
cinder/volume/flows/manager/create_volume.py
|
OnFailureRescheduleTask._reschedule
|
python
|
def _reschedule(self, context, cause, request_spec, filter_properties,
volume):
create_volume = self.scheduler_rpcapi.create_volume
if not filter_properties:
filter_properties = {}
if 'retry' not in filter_properties:
filter_properties['retry'] = {}
retry_info = filter_properties['retry']
num_attempts = retry_info.get('num_attempts', 0)
request_spec['volume_id'] = volume.id
LOG.debug("Volume %(volume_id)s: re-scheduling %(method)s "
"attempt %(num)d due to %(reason)s",
{'volume_id': volume.id,
'method': common.make_pretty_name(create_volume),
'num': num_attempts,
'reason': cause.exception_str})
if all(cause.exc_info):
retry_info['exc'] = traceback.format_exception(*cause.exc_info)
return create_volume(context, volume, request_spec=request_spec,
filter_properties=filter_properties)
|
Actions that happen during the rescheduling attempt occur here.
|
https://github.com/openstack/cinder/blob/4558e4b53a7e41dc1263417a4824f39bb6fd30e1/cinder/volume/flows/manager/create_volume.py#L145-L171
|
import binascii
import traceback
import typing
from typing import Any, Dict, List, Optional, Tuple
from castellan import key_manager
import os_brick.initiator.connectors
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import fileutils
from oslo_utils import netutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import taskflow.engines
from taskflow.patterns import linear_flow
from taskflow.types import failure as ft
from cinder import backup as backup_api
from cinder.backup import rpcapi as backup_rpcapi
from cinder import context as cinder_context
from cinder import coordination
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _
from cinder.image import glance
from cinder.image import image_utils
from cinder.message import api as message_api
from cinder.message import message_field
from cinder import objects
from cinder.objects import consistencygroup
from cinder.objects import fields
from cinder import utils
from cinder.volume.flows import common
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
ACTION = 'volume:create'
CONF = cfg.CONF
IMAGE_ATTRIBUTES = (
'checksum',
'container_format',
'disk_format',
'min_disk',
'min_ram',
'size',
)
REKEY_SUPPORTED_CONNECTORS = (
os_brick.initiator.connectors.iscsi.ISCSIConnector,
os_brick.initiator.connectors.fibre_channel.FibreChannelConnector,
)
class OnFailureRescheduleTask(flow_utils.CinderTask):
def __init__(self, reschedule_context, db, manager, scheduler_rpcapi,
do_reschedule):
requires = ['filter_properties', 'request_spec', 'volume',
'context']
super(OnFailureRescheduleTask, self).__init__(addons=[ACTION],
requires=requires)
self.do_reschedule = do_reschedule
self.scheduler_rpcapi = scheduler_rpcapi
self.db = db
self.manager = manager
self.reschedule_context = reschedule_context
self.no_reschedule_types = [
exception.ImageCopyFailure,
exception.MetadataCopyFailure,
exception.MetadataUpdateFailure,
exception.VolumeNotFound,
exception.SnapshotNotFound,
exception.VolumeTypeNotFound,
exception.ImageUnacceptable,
exception.ImageTooBig,
exception.InvalidSignatureImage,
exception.ImageSignatureVerificationException
]
def execute(self, **kwargs):
pass
def _pre_reschedule(self, volume):
try:
update = {
'scheduled_at': timeutils.utcnow(),
'host': None,
}
LOG.debug("Updating volume %(volume_id)s with %(update)s.",
{'update': update, 'volume_id': volume.id})
volume.update(update)
volume.save()
except exception.CinderException:
LOG.exception("Volume %s: update volume state failed.",
volume.id)
|
Apache License 2.0
|
morganstanley/testplan
|
testplan/testing/multitest/driver/tcp/server.py
|
TCPServerConfig.get_options
|
python
|
def get_options(cls):
return {
ConfigOption("host", default="localhost"): str,
ConfigOption("port", default=0): Use(int),
}
|
Schema for options validation and assignment of default values.
|
https://github.com/morganstanley/testplan/blob/8cb6a0ed0682698b2d6af82382fbb66d8d9e3ff7/testplan/testing/multitest/driver/tcp/server.py#L23-L30
|
import socket
from schema import Use
from testplan.common.config import ConfigOption
from testplan.common.utils.timing import TimeoutException, TimeoutExceptionInfo
from testplan.common.utils.sockets import Server
from ..base import Driver, DriverConfig
class TCPServerConfig(DriverConfig):
@classmethod
|
Apache License 2.0
|
peterdsharpe/aerosandbox
|
aerosandbox/modeling/fitting.py
|
FittedModel.goodness_of_fit
|
python
|
def goodness_of_fit(self,
type="R^2"
):
if type == "R^2":
y_mean = np.mean(self.y_data)
SS_tot = np.sum(
(self.y_data - y_mean) ** 2
)
y_model = self(self.x_data)
SS_res = np.sum(
(self.y_data - y_model) ** 2
)
R_squared = 1 - SS_res / SS_tot
return R_squared
elif type == "deviation" or type == "Linf":
return np.max(np.abs(self.y_data - self(self.x_data)))
else:
raise ValueError("Bad value of `type`!")
|
Returns a metric of the goodness of the fit.
Args:
type: Type of metric to use for goodness of fit. One of:
* "R^2": The coefficient of determination. Strictly speaking only mathematically rigorous to use this
for linear fits.
https://en.wikipedia.org/wiki/Coefficient_of_determination
* "deviation" or "Linf": The maximum deviation of the fit from any of the data points.
Returns: The metric of the goodness of the fit.
|
https://github.com/peterdsharpe/aerosandbox/blob/8fbf9449cba2f02e14424690ba2e34b438f21c69/aerosandbox/modeling/fitting.py#L328-L370
|
import aerosandbox.numpy as np
from aerosandbox.optimization.opti import Opti
from typing import Union, Dict, Callable, List
from aerosandbox.modeling.surrogate_model import SurrogateModel
import copy
import warnings
class FittedModel(SurrogateModel):
def __init__(self,
model: Callable[
[
Union[np.ndarray, Dict[str, np.ndarray]],
Dict[str, float]
],
np.ndarray
],
x_data: Union[np.ndarray, Dict[str, np.ndarray]],
y_data: np.ndarray,
parameter_guesses: Dict[str, float],
parameter_bounds: Dict[str, tuple] = None,
residual_norm_type: str = "L2",
fit_type: str = "best",
weights: np.ndarray = None,
put_residuals_in_logspace: bool = False,
verbose=True,
):
super().__init__()
def flatten(input):
return np.array(input).flatten()
try:
x_data = {
k: flatten(v)
for k, v in x_data.items()
}
x_data_is_dict = True
except AttributeError:
x_data = flatten(x_data)
x_data_is_dict = False
y_data = flatten(y_data)
n_datapoints = np.length(y_data)
if weights is None:
weights = np.ones(n_datapoints)
else:
weights = flatten(weights)
sum_weights = np.sum(weights)
if sum_weights <= 0:
raise ValueError("The weights must sum to a positive number!")
if np.any(weights < 0):
raise ValueError("No entries of the weights vector are allowed to be negative!")
weights = weights / np.sum(weights)
if parameter_bounds is None:
parameter_bounds = {}
for param_name, v in parameter_bounds.items():
if param_name not in parameter_guesses.keys():
raise ValueError(
f"A parameter name (key = \"{param_name}\") in parameter_bounds was not found in parameter_guesses.")
if not np.length(v) == 2:
raise ValueError(
"Every value in parameter_bounds must be a tuple in the format (lower_bound, upper_bound). "
"For one-sided bounds, use None for the unbounded side.")
if put_residuals_in_logspace:
if not np.all(y_data > 0):
raise ValueError("You can't fit a model with residuals in logspace if y_data is not entirely positive!")
relevant_inputs = {
"y_data" : y_data,
"weights": weights,
}
try:
relevant_inputs.update(x_data)
except TypeError:
relevant_inputs.update({"x_data": x_data})
for key, value in relevant_inputs.items():
series_length = np.length(value)
if not series_length == n_datapoints:
raise ValueError(
f"The supplied data series \"{key}\" has length {series_length}, but y_data has length {n_datapoints}.")
opti = Opti()
params = {}
for param_name, param_initial_guess in parameter_guesses.items():
if param_name in parameter_bounds:
params[param_name] = opti.variable(
init_guess=param_initial_guess,
lower_bound=parameter_bounds[param_name][0],
upper_bound=parameter_bounds[param_name][1],
)
else:
params[param_name] = opti.variable(
init_guess=param_initial_guess,
)
x_data_original = copy.deepcopy(
x_data)
try:
y_model = model(x_data, params)
except Exception:
raise Exception("""
There was an error when evaluating the model you supplied with the x_data you supplied.
Likely possible causes:
* Your model() does not have the call syntax model(x, p), where x is the x_data and p are parameters.
* Your model should take in p as a dict of parameters, but it does not.
* Your model assumes x is an array-like but you provided x_data as a dict, or vice versa.
See the docstring of FittedModel() if you have other usage questions or would like to see examples.
""")
try:
x_data_is_unchanged = np.all(x_data == x_data_original)
except ValueError:
x_data_is_unchanged = np.all([
x_series == x_series_original
for x_series, x_series_original in zip(x_data, x_data_original)
])
if not x_data_is_unchanged:
raise TypeError("model(x_data, parameter_guesses) did in-place operations on x, which is not allowed!")
if y_model is None:
raise TypeError("model(x_data, parameter_guesses) returned None, when it should've returned a 1D ndarray.")
if not put_residuals_in_logspace:
error = y_model - y_data
else:
y_model = np.fmax(y_model, 1e-300)
error = np.log(y_model) - np.log(y_data)
if residual_norm_type.lower() == "l1":
abs_error = opti.variable(init_guess=0,
n_vars=np.length(y_data))
opti.subject_to([
abs_error >= error,
abs_error >= -error,
])
opti.minimize(np.sum(weights * abs_error))
elif residual_norm_type.lower() == "l2":
opti.minimize(np.sum(weights * error ** 2))
elif residual_norm_type.lower() == "linf":
linf_value = opti.variable(init_guess=0)
opti.subject_to([
linf_value >= weights * error,
linf_value >= -weights * error
])
opti.minimize(linf_value)
else:
raise ValueError("Bad input for the 'residual_type' parameter.")
if fit_type == "best":
pass
elif fit_type == "upper bound":
opti.subject_to(y_model >= y_data)
elif fit_type == "lower bound":
opti.subject_to(y_model <= y_data)
else:
raise ValueError("Bad input for the 'fit_type' parameter.")
sol = opti.solve(verbose=verbose)
params_solved = {}
for param_name in params:
try:
params_solved[param_name] = sol.value(params[param_name])
except:
params_solved[param_name] = np.NaN
self.model = model
self.x_data = x_data
self.y_data = y_data
self.parameters = params_solved
self.parameter_guesses = parameter_guesses
self.parameter_bounds = parameter_bounds
self.residual_norm_type = residual_norm_type
self.fit_type = fit_type
self.weights = weights
self.put_residuals_in_logspace = put_residuals_in_logspace
def __call__(self, x):
super().__call__(x)
return self.model(x, self.parameters)
def plot_fit(self):
raise DeprecationWarning(
"Use FittedModel.plot() instead, which generalizes plotting to non-fitted surrogate models")
|
MIT License
|
rroller/dahua
|
custom_components/dahua/light.py
|
DahuaIlluminator.is_on
|
python
|
def is_on(self):
return self._coordinator.is_illuminator_on()
|
Return true if the light is on
|
https://github.com/rroller/dahua/blob/83c09300098b6e446d218a60280a7d5babfd2c6f/custom_components/dahua/light.py#L124-L126
|
from homeassistant.core import HomeAssistant
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
SUPPORT_BRIGHTNESS,
LightEntity,
)
from . import DahuaDataUpdateCoordinator, dahua_utils
from .const import DOMAIN, SECURITY_LIGHT_ICON, INFRARED_ICON
from .entity import DahuaBaseEntity
from .client import SECURITY_LIGHT_TYPE
DAHUA_SUPPORTED_OPTIONS = SUPPORT_BRIGHTNESS
async def async_setup_entry(hass: HomeAssistant, entry, async_add_entities):
coordinator = hass.data[DOMAIN][entry.entry_id]
entities = []
if coordinator.supports_infrared_light():
entities.append(DahuaInfraredLight(coordinator, entry, "Infrared"))
if coordinator.supports_illuminator():
entities.append(DahuaIlluminator(coordinator, entry, "Illuminator"))
if coordinator.supports_security_light():
entities.append(DahuaSecurityLight(coordinator, entry, "Security Light"))
async_add_entities(entities)
class DahuaInfraredLight(DahuaBaseEntity, LightEntity):
def __init__(self, coordinator: DahuaDataUpdateCoordinator, entry, name):
super().__init__(coordinator, entry)
self._name = name
self._coordinator = coordinator
@property
def name(self):
return self._coordinator.get_device_name() + " " + self._name
@property
def unique_id(self):
return self._coordinator.get_serial_number() + "_infrared"
@property
def is_on(self):
return self._coordinator.is_infrared_light_on()
@property
def brightness(self):
return self._coordinator.get_infrared_brightness()
@property
def supported_features(self):
return DAHUA_SUPPORTED_OPTIONS
@property
def should_poll(self):
return False
async def async_turn_on(self, **kwargs):
hass_brightness = kwargs.get(ATTR_BRIGHTNESS)
dahua_brightness = dahua_utils.hass_brightness_to_dahua_brightness(hass_brightness)
channel = self._coordinator.get_channel()
await self._coordinator.client.async_set_lighting_v1(channel, True, dahua_brightness)
await self.coordinator.async_refresh()
async def async_turn_off(self, **kwargs):
hass_brightness = kwargs.get(ATTR_BRIGHTNESS)
dahua_brightness = dahua_utils.hass_brightness_to_dahua_brightness(hass_brightness)
channel = self._coordinator.get_channel()
await self._coordinator.client.async_set_lighting_v1(channel, False, dahua_brightness)
await self.coordinator.async_refresh()
@property
def icon(self):
return INFRARED_ICON
class DahuaIlluminator(DahuaBaseEntity, LightEntity):
def __init__(self, coordinator: DahuaDataUpdateCoordinator, entry, name):
super().__init__(coordinator, entry)
self._name = name
self._coordinator = coordinator
@property
def name(self):
return self._coordinator.get_device_name() + " " + self._name
@property
def unique_id(self):
return self._coordinator.get_serial_number() + "_illuminator"
@property
|
MIT License
|
thriftrw/thriftrw-python
|
thriftrw/idl/parser.py
|
ParserSpec.p_definition_unit
|
python
|
def p_definition_unit(self, p):
p[0] = p[1]
|
definition_unit : const
| ttype
|
https://github.com/thriftrw/thriftrw-python/blob/22f6ab645f5af48cae2fee0dc1431dfacb971348/thriftrw/idl/parser.py#L100-L104
|
from __future__ import absolute_import, unicode_literals, print_function
from collections import deque
from ply import yacc
from . import ast
from .lexer import Lexer
from ..errors import ThriftParserError
__all__ = ['Parser']
class ParserSpec(object):
tokens = Lexer.tokens
def p_error(self, p):
if p is None:
raise ThriftParserError('Grammer error at EOF')
raise ThriftParserError(
'Grammar error %r at line %d' % (p.value, p.lineno)
)
def p_start(self, p):
p[0] = ast.Program(headers=p[1], definitions=p[2])
def p_header(self, p):
self._parse_seq(p)
def p_header_unit_(self, p):
p[0] = p[1]
def p_header_unit(self, p):
p[0] = p[1]
def p_include(self, p):
if len(p) == 4:
p[0] = ast.Include(name=p[2], path=p[3], lineno=p.lineno(1))
else:
p[0] = ast.Include(name=None, path=p[2], lineno=p.lineno(1))
def p_namespace(self, p):
p[0] = ast.Namespace(scope=p[2], name=p[3], lineno=p.lineno(1))
def p_namespace_scope(self, p):
p[0] = p[1]
def p_sep(self, p):
def p_definition(self, p):
self._parse_seq(p)
def p_definition_unit_(self, p):
p[0] = p[1]
|
MIT License
|
matir/pwnableweb
|
pwntalk/views.py
|
require_login
|
python
|
def require_login(func):
@functools.wraps(func)
def _login_wrapper(*args, **kwargs):
if not _check_login():
return flask.redirect(flask.url_for('home'))
return func(*args, **kwargs)
return _login_wrapper
|
Send to homepage if not logged in.
|
https://github.com/matir/pwnableweb/blob/81b2d3dc1da473d8833e2206401ec2808a6660b3/pwntalk/views.py#L29-L36
|
import base64
import binascii
import flask
import functools
import hashlib
import re
from sqlalchemy import exc
from sqlalchemy.orm import exc as orm_exc
import subprocess
from pwntalk import models
from pwntalk.app import app, get_flag
|
Apache License 2.0
|
crypto-toolbox/bitex
|
bitex/api/WSS/bitfinex.py
|
BitfinexWSS.ticker
|
python
|
def ticker(self, pair, **kwargs):
self._subscribe('ticker', symbol=pair, **kwargs)
|
Subscribe to the passed pair's ticker channel.
:param pair: str, Pair to request data for.
:param kwargs:
:return:
|
https://github.com/crypto-toolbox/bitex/blob/56d46ea3db6de5219a72dad9b052fbabc921232f/bitex/api/WSS/bitfinex.py#L764-L771
|
import logging
import json
import time
import queue
import threading
from threading import Thread
from websocket import create_connection, WebSocketTimeoutException
from websocket import WebSocketConnectionClosedException
from bitex.api.WSS.base import WSSAPI
from bitex.api.WSS.exceptions import InvalidBookLengthError, GenericSubscriptionError
from bitex.api.WSS.exceptions import NotSubscribedError, AlreadySubscribedError
from bitex.api.WSS.exceptions import InvalidPairError, InvalidChannelError
from bitex.api.WSS.exceptions import InvalidEventError, InvalidBookPrecisionError
from bitex.api.WSS.exceptions import UnknownEventError, UnknownWSSError
from bitex.api.WSS.exceptions import UnknownWSSInfo, AlreadyRegisteredError
from bitex.api.WSS.exceptions import NotRegisteredError, UnknownChannelError
from bitex.api.WSS.exceptions import FaultyPayloadError
log = logging.getLogger(__name__)
class BitfinexWSS(WSSAPI):
def __init__(self, pairs=None):
super(BitfinexWSS, self).__init__('wss://api.bitfinex.com/ws/2', 'Bitfinex')
self.conn = None
if pairs:
self.pairs = pairs
else:
self.pairs = ['ETHBTC', 'BTCUSD', 'ETHUSD', 'ETCUSD', 'ETCBTC',
'ZECUSD', 'ZECBTC', 'XMRUSD', 'XMRBTC', 'LTCUSD',
'LTCBTC', 'DSHUSD', 'DSHBTC']
self._receiver_lock = threading.Lock()
self._processor_lock = threading.Lock()
self.receiver_q = queue.Queue()
self.receiver_thread = None
self.processing_thread = None
self.ping_timer = None
self.timeout = 5
self._heartbeats = {}
self._late_heartbeats = {}
self.api_version = None
self.channels = {}
self.channel_labels = {}
self.channel_states = {}
self.channel_configs = {}
self.wss_config = {}
self._event_handlers = {'error': self._raise_error,
'unsubscribed': self._handle_unsubscribed,
'subscribed': self._handle_subscribed,
'auth': self._handle_subscribed,
'unauth': self._handle_unsubscribed,
'info': self._handle_info,
'pong': self._handle_pong,
'conf': self._handle_conf}
self._data_handlers = {'ticker': self._handle_ticker,
'book': self._handle_book,
'raw_book': self._handle_raw_book,
'candles': self._handle_candles,
'trades': self._handle_trades,
'auth': self._handle_auth}
def restart_client():
self._controller_q.put('restart')
self._code_handlers = {'20051': restart_client,
'20060': self.pause,
'20061': self.unpause,
'10000': InvalidEventError,
'10001': InvalidPairError,
'10300': GenericSubscriptionError,
'10301': AlreadySubscribedError,
'10302': InvalidChannelError,
'10400': GenericSubscriptionError,
'10401': NotSubscribedError,
'10011': InvalidBookPrecisionError,
'10012': InvalidBookLengthError}
def eval_command(self, cmd):
if cmd == 'restart':
self.restart(soft=True)
elif cmd == 'stop':
self.stop()
def _check_heartbeats(self, ts, *args, **kwargs):
for chan_id in self._heartbeats:
if ts - self._heartbeats[chan_id] >= 10:
if chan_id not in self._late_heartbeats:
try:
log.warning("BitfinexWSS.heartbeats: Channel %s hasn't "
"sent a heartbeat in %s seconds!",
self.channel_labels[chan_id],
ts - self._heartbeats[chan_id])
self._late_heartbeats[chan_id] = ts
except KeyError:
log.error("BitfinexWSS.heartbeats: Channel %s is not "
"registered in the connector's registry! "
"Restarting Connection to avoid errors..",
chan_id)
raise UnknownChannelError
else:
continue
else:
try:
self._late_heartbeats.pop(chan_id)
except KeyError:
continue
log.info("BitfinexWSS.heartbeats: Channel %s has sent a "
"heartbeat again!", self.channel_labels[chan_id])
self.ping()
def _check_ping(self):
if time.time() - self.ping_timer > self.timeout:
raise TimeoutError("Ping Command timed out!")
def pause(self):
self._receiver_lock.acquire()
log.info("BitfinexWSS.pause(): Pausing client..")
def unpause(self):
self._receiver_lock.release()
log.info("BitfinexWSS.pause(): Unpausing client..")
def start(self):
super(BitfinexWSS, self).start()
log.info("BitfinexWSS.start(): Initializing Websocket connection..")
while self.conn is None:
try:
self.conn = create_connection(self.addr, timeout=10)
except WebSocketTimeoutException:
self.conn = None
print("Couldn't create websocket connection - retrying!")
log.info("BitfinexWSS.start(): Initializing receiver thread..")
if not self.receiver_thread:
self.receiver_thread = Thread(target=self.receive, name='Receiver Thread')
self.receiver_thread.start()
else:
log.info("BitfinexWSS.start(): Thread not started! "
"self.receiver_thread is populated!")
log.info("BitfinexWSS.start(): Initializing processing thread..")
if not self.processing_thread:
self.processing_thread = Thread(target=self.process, name='Processing Thread')
self.processing_thread.start()
else:
log.info("BitfinexWSS.start(): Thread not started! "
"self.processing_thread is populated!")
self.setup_subscriptions()
def stop(self):
super(BitfinexWSS, self).stop()
log.info("BitfinexWSS.stop(): Stopping client..")
log.info("BitfinexWSS.stop(): Joining receiver thread..")
try:
self.receiver_thread.join()
if self.receiver_thread.is_alive():
time.time(1)
except AttributeError:
log.debug("BitfinexWSS.stop(): Receiver thread was not running!")
log.info("BitfinexWSS.stop(): Joining processing thread..")
try:
self.processing_thread.join()
if self.processing_thread.is_alive():
time.time(1)
except AttributeError:
log.debug("BitfinexWSS.stop(): Processing thread was not running!")
log.info("BitfinexWSS.stop(): Closing websocket conection..")
try:
self.conn.close()
except WebSocketConnectionClosedException:
pass
except AttributeError:
pass
self.conn = None
self.processing_thread = None
self.receiver_thread = None
log.info("BitfinexWSS.stop(): Done!")
def restart(self, soft=False):
log.info("BitfinexWSS.restart(): Restarting client..")
super(BitfinexWSS, self).restart()
channel_labels = [self.channel_labels[k] for k in self.channel_labels] if soft else None
self.channels = {}
self.channel_labels = {}
self.channel_states = {}
if channel_labels:
for channel_name, kwargs in channel_labels:
self._subscribe(channel_name, **kwargs)
def receive(self):
while self.running:
if self._receiver_lock.acquire(blocking=False):
try:
raw = self.conn.recv()
except WebSocketTimeoutException:
self._receiver_lock.release()
continue
except WebSocketConnectionClosedException:
self.conn = None
self._controller_q.put('restart')
except AttributeError:
self._receiver_lock.release()
continue
msg = time.time(), json.loads(raw)
log.debug("receiver Thread: Data Received: %s", msg)
self.receiver_q.put(msg)
self._receiver_lock.release()
else:
time.sleep(0.5)
def process(self):
while self.running:
if self._processor_lock.acquire(blocking=False):
if self.ping_timer:
try:
self._check_ping()
except TimeoutError:
log.exception("BitfinexWSS.ping(): TimedOut! (%ss)" %
self.ping_timer)
except (WebSocketConnectionClosedException,
ConnectionResetError):
log.exception("BitfinexWSS.ping(): Connection Error!")
self.conn = None
if not self.conn:
self._controller_q.put('restart')
skip_processing = False
try:
ts, data = self.receiver_q.get(timeout=0.1)
except queue.Empty:
skip_processing = True
ts = time.time()
data = None
if not skip_processing:
log.debug("Processing Data: %s", data)
if isinstance(data, list):
self.handle_data(ts, data)
else:
try:
self.handle_response(ts, data)
except UnknownEventError:
log.exception("main() - UnknownEventError: %s",
data)
log.info("main() - Shutting Down due to "
"Unknown Error!")
self._controller_q.put('stop')
except ConnectionResetError:
log.info("processor Thread: Connection Was reset, "
"initiating restart")
self._controller_q.put('restart')
self._check_heartbeats(ts)
self._processor_lock.release()
else:
time.sleep(0.5)
def handle_response(self, ts, resp):
log.info("handle_response: Handling response %s", resp)
event = resp['event']
try:
self._event_handlers[event](ts, **resp)
except (InvalidChannelError, InvalidPairError, InvalidBookLengthError,
InvalidBookPrecisionError) as e:
log.exception(e)
print(e)
except (NotSubscribedError, AlreadySubscribedError) as e:
log.exception(e)
print(e)
except GenericSubscriptionError as e:
log.exception(e)
print(e)
except InvalidEventError as e:
log.critical("handle_response(): %s; %s", e, resp)
log.exception(e)
raise SystemError(e)
except KeyError:
raise UnknownEventError("handle_response(): %s" % resp)
def _handle_subscribed(self, *args, chanId=None, channel=None, **kwargs):
log.debug("_handle_subscribed: %s - %s - %s", chanId, channel, kwargs)
if chanId in self.channels:
raise AlreadyRegisteredError()
self._heartbeats[chanId] = time.time()
try:
channel_key = ('raw_'+channel
if kwargs['prec'].startswith('R') and channel == 'book'
else channel)
except KeyError:
channel_key = channel
try:
self.channels[chanId] = self._data_handlers[channel_key]
except KeyError:
raise UnknownChannelError()
try:
kwargs.pop('event')
except KeyError:
pass
try:
kwargs.pop('len')
except KeyError:
pass
try:
kwargs.pop('chanId')
except KeyError:
pass
self.channel_labels[chanId] = (channel_key, kwargs)
def _handle_unsubscribed(self, *args, chanId=None, **kwargs):
log.debug("_handle_unsubscribed: %s - %s", chanId, kwargs)
try:
self.channels.pop(chanId)
except KeyError:
raise NotRegisteredError()
try:
self._heartbeats.pop(chanId)
except KeyError:
pass
try:
self._late_heartbeats.pop(chanId)
except KeyError:
pass
def _raise_error(self, *args, **kwargs):
log.debug("_raise_error(): %s" % kwargs)
try:
error_code = str(kwargs['code'])
except KeyError as e:
raise FaultyPayloadError('_raise_error(): %s' % kwargs)
try:
raise self._code_handlers[error_code]()
except KeyError:
raise UnknownWSSError()
def _handle_info(self, *args, **kwargs):
if 'version' in kwargs:
self.api_version = kwargs['version']
print("Initialized API with version %s" % self.api_version)
return
try:
info_code = str(kwargs['code'])
except KeyError:
raise FaultyPayloadError("_handle_info: %s" % kwargs)
if not info_code.startswith('2'):
raise ValueError("Info Code must start with 2! %s", kwargs)
output_msg = "_handle_info(): %s" % kwargs
log.info(output_msg)
try:
self._code_handlers[info_code]()
except KeyError:
raise UnknownWSSInfo(output_msg)
def _handle_pong(self, ts, *args, **kwargs):
log.info("BitfinexWSS.ping(): Ping received! (%ss)",
ts - self.ping_timer)
self.ping_timer = None
def _handle_conf(self, ts, *args, **kwargs):
pass
def handle_data(self, ts, msg):
try:
chan_id, *data = msg
except ValueError as e:
raise FaultyPayloadError("handle_data(): %s - %s" % (msg, e))
self._heartbeats[chan_id] = ts
if data[0] == 'hb':
self._handle_hearbeat(ts, chan_id)
return
try:
self.channels[chan_id](ts, chan_id, data)
except KeyError:
raise NotRegisteredError("handle_data: %s not registered - "
"Payload: %s" % (chan_id, msg))
@staticmethod
def _handle_hearbeat(*args, **kwargs):
pass
def _handle_ticker(self, ts, chan_id, data):
pair = self.channel_labels[chan_id][1]['pair']
entry = (*data, ts)
self.data_q.put(('ticker', pair, entry))
def _handle_book(self, ts, chan_id, data):
pair = self.channel_labels[chan_id][1]['pair']
entry = data, ts
self.data_q.put(('order_book', pair, entry))
def _handle_raw_book(self, ts, chan_id, data):
pair = self.channel_labels[chan_id][1]['pair']
entry = data, ts
self.data_q.put(('raw_order_book', pair, entry))
def _handle_trades(self, ts, chan_id, data):
pair = self.channel_labels[chan_id][1]['pair']
entry = data, ts
self.data_q.put(('trades', pair, entry))
def _handle_candles(self, ts, chan_id, data):
pair = self.channel_labels[chan_id][1]['key'].split(':')[-1][1:]
entry = data, ts
self.data_q.put(('ohlc', pair, entry))
def _handle_auth(self, ts, chan_id, data):
keys = {'hts': self._handle_auth_trades,
'te': self._handle_auth_trades, 'tu': self._handle_auth_trades,
'ps': self._handle_auth_positions,
'pn': self._handle_auth_positions,
'pu': self._handle_auth_positions,
'pc': self._handle_auth_positions,
'os': self._handle_auth_orders, 'on': self._handle_auth_orders,
'ou': self._handle_auth_orders, 'oc': self._handle_auth_orders,
'hos': self._handle_auth_orders, 'ws': self._handle_auth_wallet,
'wu': self._handle_auth_wallet, 'bs': self._handle_auth_balance,
'bu': self._handle_auth_balance,
'mis': self._handle_auth_margin_info,
'miu': self._handle_auth_margin_info,
'fis': self._handle_auth_funding_info,
'fiu': self._handle_auth_funding_info,
'fos': self._handle_auth_offers, 'fon': self._handle_auth_offers,
'fou': self._handle_auth_offers, 'foc': self._handle_auth_offers,
'hfos': self._handle_auth_offers,
'fcs': self._handle_auth_credits,
'fcn': self._handle_auth_credits,
'fcu': self._handle_auth_credits,
'fcc': self._handle_auth_credits,
'hfcs': self._handle_auth_credits,
'fls': self._handle_auth_loans, 'fln': self._handle_auth_loans,
'flu': self._handle_auth_loans, 'flc': self._handle_auth_loans,
'hfls': self._handle_auth_loans,
'hfts': self._handle_auth_funding_trades,
'fte': self._handle_auth_funding_trades,
'ftu': self._handle_auth_funding_trades}
event, *_ = data
try:
keys[event](ts, data)
except KeyError:
log.exception('%s; %s', chan_id, data)
raise UnknownEventError('The Passed event in data[0] is not '
'associated with any data handler!')
except Exception:
log.exception("_handle_auth: %s - %s, %s", chan_id, event, data)
raise
def _handle_auth_trades(self, ts, data):
entry = data, ts
self.data_q.put(('account_trades', 'NA', entry))
def _handle_auth_positions(self, ts, data):
entry = data, ts
self.data_q.put(('account_positions', 'NA', entry))
def _handle_auth_orders(self, ts, data):
entry = data, ts
self.data_q.put(('account_orders', 'NA', entry))
def _handle_auth_wallet(self, ts, data):
entry = data, ts
self.data_q.put(('account_wallet', 'NA', entry))
def _handle_auth_balance(self, ts, data):
entry = data, ts
self.data_q.put(('account_balance', 'NA', entry))
def _handle_auth_margin_info(self, ts, data):
entry = data, ts
self.data_q.put(('account_margin_info', 'NA', entry))
def _handle_auth_funding_info(self, ts, data):
entry = data, ts
self.data_q.put(('account_funding_info', 'NA', entry))
def _handle_auth_offers(self, ts, data):
entry = data, ts
self.data_q.put(('account_offers', 'NA', entry))
def _handle_auth_credits(self, ts, data):
entry = data, ts
self.data_q.put(('account_credits', 'NA', entry))
def _handle_auth_loans(self, event, data):
entry = data, time.time()
self.data_q.put(('account_loans', 'NA', entry))
def _handle_auth_funding_trades(self, event, data):
entry = data, time.time()
self.data_q.put(('account_funding_trades', 'NA', entry))
def send(self, payload):
self.conn.send(json.dumps(payload))
def ping(self):
self.ping_timer = time.time()
self.send({'event': 'ping'})
def setup_subscriptions(self):
self.config(decimals_as_strings=True)
for pair in self.pairs:
self.ticker(pair)
self.ohlc(pair)
self.order_book(pair)
self.raw_order_book(pair)
self.trades(pair)
def config(self, decimals_as_strings=True, ts_as_dates=False,
sequencing=False, **kwargs):
flags = 0
if decimals_as_strings:
flags += 8
if ts_as_dates:
flags += 32
if sequencing:
flags += 65536
payload = {'event': 'conf', 'flags': flags}
payload.update(kwargs)
self.send(payload)
def _subscribe(self, channel_name, **kwargs):
if not self.conn:
log.error("_subscribe(): Cannot subscribe to channel,"
"since the client has not been started!")
return
payload = {'event': 'subscribe', 'channel': channel_name}
payload.update(**kwargs)
log.debug("_subscribe: %s", payload)
self.send(payload)
|
MIT License
|
allenai/allennlp
|
allennlp/confidence_checks/task_checklists/utils.py
|
random_string
|
python
|
def random_string(n: int) -> str:
return "".join(np.random.choice([x for x in string.ascii_letters + string.digits], n))
|
Returns a random alphanumeric string of length `n`.
|
https://github.com/allenai/allennlp/blob/dcd8d9e9671da5a87de51f2bb42ceb3abdce8b3b/allennlp/confidence_checks/task_checklists/utils.py#L155-L159
|
import string
from typing import Dict, Callable, List, Union
import numpy as np
import spacy
from checklist.editor import Editor
def add_common_lexicons(editor: Editor):
profession = [
"journalist",
"historian",
"secretary",
"nurse",
"waitress",
"accountant",
"engineer",
"attorney",
"artist",
"editor",
"architect",
"model",
"interpreter",
"analyst",
"actor",
"actress",
"assistant",
"intern",
"economist",
"organizer",
"author",
"investigator",
"agent",
"administrator",
"executive",
"educator",
"investor",
"DJ",
"entrepreneur",
"auditor",
"advisor",
"instructor",
"activist",
"consultant",
"apprentice",
"reporter",
"expert",
"psychologist",
"examiner",
"painter",
"manager",
"contractor",
"therapist",
"programmer",
"musician",
"producer",
"associate",
"intermediary",
"designer",
"cook",
"salesperson",
"dentist",
"attorney",
"detective",
"banker",
"researcher",
"cop",
"driver",
"counselor",
"clerk",
"professor",
"tutor",
"coach",
"chemist",
"scientist",
"veterinarian",
"firefighter",
"baker",
"psychiatrist",
"prosecutor",
"director",
"technician",
]
editor.add_lexicon("profession", profession, overwrite=True)
def spacy_wrap(fn: Callable, language: str = "en_core_web_sm", **kwargs) -> Callable:
from allennlp.common.util import get_spacy_model
def new_fn(data: Union[spacy.tokens.doc.Doc, Dict, str]):
if not isinstance(data, spacy.tokens.doc.Doc):
model = get_spacy_model(language, **kwargs)
if isinstance(data, Dict):
for key, val in data.items():
if isinstance(val, str):
data[key] = model(val)
elif isinstance(data, tuple):
data = tuple(model(tup) if isinstance(tup, str) else tup for tup in data)
elif isinstance(data, str):
data = model(data)
else:
pass
return fn(data)
return new_fn
def strip_punctuation(data: Union[str, spacy.tokens.doc.Doc]) -> str:
if isinstance(data, str):
return data.rstrip(string.punctuation)
elif isinstance(data, spacy.tokens.doc.Doc):
while len(data) and data[-1].is_punct:
data = data[:-1]
else:
pass
return str(data)
def toggle_punctuation(data: str) -> List[str]:
s = strip_punctuation(data)
ret = []
if s != data:
ret.append(s)
if s + "." != data:
ret.append(s + ".")
return ret
|
Apache License 2.0
|
encode/httpx
|
httpx/_client.py
|
Client.send
|
python
|
def send(
self,
request: Request,
*,
stream: bool = False,
auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
follow_redirects: typing.Union[bool, UseClientDefault] = USE_CLIENT_DEFAULT,
) -> Response:
if self._state == ClientState.CLOSED:
raise RuntimeError("Cannot send a request, as the client has been closed.")
self._state = ClientState.OPENED
follow_redirects = (
self.follow_redirects
if isinstance(follow_redirects, UseClientDefault)
else follow_redirects
)
auth = self._build_request_auth(request, auth)
response = self._send_handling_auth(
request,
auth=auth,
follow_redirects=follow_redirects,
history=[],
)
try:
if not stream:
response.read()
return response
except Exception as exc:
response.close()
raise exc
|
Send a request.
The request is sent as-is, unmodified.
Typically you'll want to build one with `Client.build_request()`
so that any client-level configuration is merged into the request,
but passing an explicit `httpx.Request()` is supported as well.
See also: [Request instances][0]
[0]: /advanced/#request-instances
|
https://github.com/encode/httpx/blob/deb1a2b921eb4cbf9afb4622539accaf89db477d/httpx/_client.py#L841-L888
|
import datetime
import enum
import typing
import warnings
from contextlib import contextmanager
from types import TracebackType
from .__version__ import __version__
from ._auth import Auth, BasicAuth, FunctionAuth
from ._compat import asynccontextmanager
from ._config import (
DEFAULT_LIMITS,
DEFAULT_MAX_REDIRECTS,
DEFAULT_TIMEOUT_CONFIG,
Limits,
Proxy,
Timeout,
)
from ._decoders import SUPPORTED_DECODERS
from ._exceptions import (
InvalidURL,
RemoteProtocolError,
TooManyRedirects,
request_context,
)
from ._models import URL, Cookies, Headers, QueryParams, Request, Response
from ._status_codes import codes
from ._transports.asgi import ASGITransport
from ._transports.base import AsyncBaseTransport, BaseTransport
from ._transports.default import AsyncHTTPTransport, HTTPTransport
from ._transports.wsgi import WSGITransport
from ._types import (
AsyncByteStream,
AuthTypes,
CertTypes,
CookieTypes,
HeaderTypes,
ProxiesTypes,
QueryParamTypes,
RequestContent,
RequestData,
RequestFiles,
SyncByteStream,
TimeoutTypes,
URLTypes,
VerifyTypes,
)
from ._utils import (
NetRCInfo,
Timer,
URLPattern,
get_environment_proxies,
get_logger,
same_origin,
)
T = typing.TypeVar("T", bound="Client")
U = typing.TypeVar("U", bound="AsyncClient")
class UseClientDefault:
USE_CLIENT_DEFAULT = UseClientDefault()
logger = get_logger(__name__)
USER_AGENT = f"python-httpx/{__version__}"
ACCEPT_ENCODING = ", ".join(
[key for key in SUPPORTED_DECODERS.keys() if key != "identity"]
)
class ClientState(enum.Enum):
UNOPENED = 1
OPENED = 2
CLOSED = 3
class BoundSyncStream(SyncByteStream):
def __init__(
self, stream: SyncByteStream, response: Response, timer: Timer
) -> None:
self._stream = stream
self._response = response
self._timer = timer
def __iter__(self) -> typing.Iterator[bytes]:
for chunk in self._stream:
yield chunk
def close(self) -> None:
seconds = self._timer.sync_elapsed()
self._response.elapsed = datetime.timedelta(seconds=seconds)
self._stream.close()
class BoundAsyncStream(AsyncByteStream):
def __init__(
self, stream: AsyncByteStream, response: Response, timer: Timer
) -> None:
self._stream = stream
self._response = response
self._timer = timer
async def __aiter__(self) -> typing.AsyncIterator[bytes]:
async for chunk in self._stream:
yield chunk
async def aclose(self) -> None:
seconds = await self._timer.async_elapsed()
self._response.elapsed = datetime.timedelta(seconds=seconds)
await self._stream.aclose()
class BaseClient:
def __init__(
self,
*,
auth: AuthTypes = None,
params: QueryParamTypes = None,
headers: HeaderTypes = None,
cookies: CookieTypes = None,
timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,
follow_redirects: bool = False,
max_redirects: int = DEFAULT_MAX_REDIRECTS,
event_hooks: typing.Mapping[str, typing.List[typing.Callable]] = None,
base_url: URLTypes = "",
trust_env: bool = True,
):
event_hooks = {} if event_hooks is None else event_hooks
self._base_url = self._enforce_trailing_slash(URL(base_url))
self._auth = self._build_auth(auth)
self._params = QueryParams(params)
self.headers = Headers(headers)
self._cookies = Cookies(cookies)
self._timeout = Timeout(timeout)
self.follow_redirects = follow_redirects
self.max_redirects = max_redirects
self._event_hooks = {
"request": list(event_hooks.get("request", [])),
"response": list(event_hooks.get("response", [])),
}
self._trust_env = trust_env
self._netrc = NetRCInfo()
self._state = ClientState.UNOPENED
@property
def is_closed(self) -> bool:
return self._state == ClientState.CLOSED
@property
def trust_env(self) -> bool:
return self._trust_env
def _enforce_trailing_slash(self, url: URL) -> URL:
if url.raw_path.endswith(b"/"):
return url
return url.copy_with(raw_path=url.raw_path + b"/")
def _get_proxy_map(
self, proxies: typing.Optional[ProxiesTypes], allow_env_proxies: bool
) -> typing.Dict[str, typing.Optional[Proxy]]:
if proxies is None:
if allow_env_proxies:
return {
key: None if url is None else Proxy(url=url)
for key, url in get_environment_proxies().items()
}
return {}
if isinstance(proxies, dict):
new_proxies = {}
for key, value in proxies.items():
proxy = Proxy(url=value) if isinstance(value, (str, URL)) else value
new_proxies[str(key)] = proxy
return new_proxies
else:
proxy = Proxy(url=proxies) if isinstance(proxies, (str, URL)) else proxies
return {"all://": proxy}
@property
def timeout(self) -> Timeout:
return self._timeout
@timeout.setter
def timeout(self, timeout: TimeoutTypes) -> None:
self._timeout = Timeout(timeout)
@property
def event_hooks(self) -> typing.Dict[str, typing.List[typing.Callable]]:
return self._event_hooks
@event_hooks.setter
def event_hooks(
self, event_hooks: typing.Dict[str, typing.List[typing.Callable]]
) -> None:
self._event_hooks = {
"request": list(event_hooks.get("request", [])),
"response": list(event_hooks.get("response", [])),
}
@property
def auth(self) -> typing.Optional[Auth]:
return self._auth
@auth.setter
def auth(self, auth: AuthTypes) -> None:
self._auth = self._build_auth(auth)
@property
def base_url(self) -> URL:
return self._base_url
@base_url.setter
def base_url(self, url: URLTypes) -> None:
self._base_url = self._enforce_trailing_slash(URL(url))
@property
def headers(self) -> Headers:
return self._headers
@headers.setter
def headers(self, headers: HeaderTypes) -> None:
client_headers = Headers(
{
b"Accept": b"*/*",
b"Accept-Encoding": ACCEPT_ENCODING.encode("ascii"),
b"Connection": b"keep-alive",
b"User-Agent": USER_AGENT.encode("ascii"),
}
)
client_headers.update(headers)
self._headers = client_headers
@property
def cookies(self) -> Cookies:
return self._cookies
@cookies.setter
def cookies(self, cookies: CookieTypes) -> None:
self._cookies = Cookies(cookies)
@property
def params(self) -> QueryParams:
return self._params
@params.setter
def params(self, params: QueryParamTypes) -> None:
self._params = QueryParams(params)
def build_request(
self,
method: str,
url: URLTypes,
*,
content: RequestContent = None,
data: RequestData = None,
files: RequestFiles = None,
json: typing.Any = None,
params: QueryParamTypes = None,
headers: HeaderTypes = None,
cookies: CookieTypes = None,
timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
) -> Request:
url = self._merge_url(url)
headers = self._merge_headers(headers)
cookies = self._merge_cookies(cookies)
params = self._merge_queryparams(params)
timeout = (
self.timeout if isinstance(timeout, UseClientDefault) else Timeout(timeout)
)
return Request(
method,
url,
content=content,
data=data,
files=files,
json=json,
params=params,
headers=headers,
cookies=cookies,
extensions={"timeout": timeout.as_dict()},
)
def _merge_url(self, url: URLTypes) -> URL:
merge_url = URL(url)
if merge_url.is_relative_url:
merge_raw_path = self.base_url.raw_path + merge_url.raw_path.lstrip(b"/")
return self.base_url.copy_with(raw_path=merge_raw_path)
return merge_url
def _merge_cookies(
self, cookies: CookieTypes = None
) -> typing.Optional[CookieTypes]:
if cookies or self.cookies:
merged_cookies = Cookies(self.cookies)
merged_cookies.update(cookies)
return merged_cookies
return cookies
def _merge_headers(
self, headers: HeaderTypes = None
) -> typing.Optional[HeaderTypes]:
merged_headers = Headers(self.headers)
merged_headers.update(headers)
return merged_headers
def _merge_queryparams(
self, params: QueryParamTypes = None
) -> typing.Optional[QueryParamTypes]:
if params or self.params:
merged_queryparams = QueryParams(self.params)
return merged_queryparams.merge(params)
return params
def _build_auth(self, auth: AuthTypes) -> typing.Optional[Auth]:
if auth is None:
return None
elif isinstance(auth, tuple):
return BasicAuth(username=auth[0], password=auth[1])
elif isinstance(auth, Auth):
return auth
elif callable(auth):
return FunctionAuth(func=auth)
else:
raise TypeError(f'Invalid "auth" argument: {auth!r}')
def _build_request_auth(
self,
request: Request,
auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
) -> Auth:
auth = (
self._auth if isinstance(auth, UseClientDefault) else self._build_auth(auth)
)
if auth is not None:
return auth
username, password = request.url.username, request.url.password
if username or password:
return BasicAuth(username=username, password=password)
if self.trust_env and "Authorization" not in request.headers:
credentials = self._netrc.get_credentials(request.url.host)
if credentials is not None:
return BasicAuth(username=credentials[0], password=credentials[1])
return Auth()
def _build_redirect_request(self, request: Request, response: Response) -> Request:
method = self._redirect_method(request, response)
url = self._redirect_url(request, response)
headers = self._redirect_headers(request, url, method)
stream = self._redirect_stream(request, method)
cookies = Cookies(self.cookies)
return Request(
method=method, url=url, headers=headers, cookies=cookies, stream=stream
)
def _redirect_method(self, request: Request, response: Response) -> str:
method = request.method
if response.status_code == codes.SEE_OTHER and method != "HEAD":
method = "GET"
if response.status_code == codes.FOUND and method != "HEAD":
method = "GET"
if response.status_code == codes.MOVED_PERMANENTLY and method == "POST":
method = "GET"
return method
def _redirect_url(self, request: Request, response: Response) -> URL:
location = response.headers["Location"]
try:
url = URL(location)
except InvalidURL as exc:
raise RemoteProtocolError(
f"Invalid URL in location header: {exc}.", request=request
) from None
if url.scheme and not url.host:
url = url.copy_with(host=request.url.host)
if url.is_relative_url:
url = request.url.join(url)
if request.url.fragment and not url.fragment:
url = url.copy_with(fragment=request.url.fragment)
return url
def _redirect_headers(self, request: Request, url: URL, method: str) -> Headers:
headers = Headers(request.headers)
if not same_origin(url, request.url):
headers.pop("Authorization", None)
headers["Host"] = url.netloc.decode("ascii")
if method != request.method and method == "GET":
headers.pop("Content-Length", None)
headers.pop("Transfer-Encoding", None)
headers.pop("Cookie", None)
return headers
def _redirect_stream(
self, request: Request, method: str
) -> typing.Optional[typing.Union[SyncByteStream, AsyncByteStream]]:
if method != request.method and method == "GET":
return None
return request.stream
class Client(BaseClient):
def __init__(
self,
*,
auth: AuthTypes = None,
params: QueryParamTypes = None,
headers: HeaderTypes = None,
cookies: CookieTypes = None,
verify: VerifyTypes = True,
cert: CertTypes = None,
http1: bool = True,
http2: bool = False,
proxies: ProxiesTypes = None,
mounts: typing.Mapping[str, BaseTransport] = None,
timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,
follow_redirects: bool = False,
limits: Limits = DEFAULT_LIMITS,
max_redirects: int = DEFAULT_MAX_REDIRECTS,
event_hooks: typing.Mapping[str, typing.List[typing.Callable]] = None,
base_url: URLTypes = "",
transport: BaseTransport = None,
app: typing.Callable = None,
trust_env: bool = True,
):
super().__init__(
auth=auth,
params=params,
headers=headers,
cookies=cookies,
timeout=timeout,
follow_redirects=follow_redirects,
max_redirects=max_redirects,
event_hooks=event_hooks,
base_url=base_url,
trust_env=trust_env,
)
if http2:
try:
import h2
except ImportError:
raise ImportError(
"Using http2=True, but the 'h2' package is not installed. "
"Make sure to install httpx using `pip install httpx[http2]`."
) from None
allow_env_proxies = trust_env and app is None and transport is None
proxy_map = self._get_proxy_map(proxies, allow_env_proxies)
self._transport = self._init_transport(
verify=verify,
cert=cert,
http1=http1,
http2=http2,
limits=limits,
transport=transport,
app=app,
trust_env=trust_env,
)
self._mounts: typing.Dict[URLPattern, typing.Optional[BaseTransport]] = {
URLPattern(key): None
if proxy is None
else self._init_proxy_transport(
proxy,
verify=verify,
cert=cert,
http1=http1,
http2=http2,
limits=limits,
trust_env=trust_env,
)
for key, proxy in proxy_map.items()
}
if mounts is not None:
self._mounts.update(
{URLPattern(key): transport for key, transport in mounts.items()}
)
self._mounts = dict(sorted(self._mounts.items()))
def _init_transport(
self,
verify: VerifyTypes = True,
cert: CertTypes = None,
http1: bool = True,
http2: bool = False,
limits: Limits = DEFAULT_LIMITS,
transport: BaseTransport = None,
app: typing.Callable = None,
trust_env: bool = True,
) -> BaseTransport:
if transport is not None:
return transport
if app is not None:
return WSGITransport(app=app)
return HTTPTransport(
verify=verify,
cert=cert,
http1=http1,
http2=http2,
limits=limits,
trust_env=trust_env,
)
def _init_proxy_transport(
self,
proxy: Proxy,
verify: VerifyTypes = True,
cert: CertTypes = None,
http1: bool = True,
http2: bool = False,
limits: Limits = DEFAULT_LIMITS,
trust_env: bool = True,
) -> BaseTransport:
return HTTPTransport(
verify=verify,
cert=cert,
http1=http1,
http2=http2,
limits=limits,
trust_env=trust_env,
proxy=proxy,
)
def _transport_for_url(self, url: URL) -> BaseTransport:
for pattern, transport in self._mounts.items():
if pattern.matches(url):
return self._transport if transport is None else transport
return self._transport
def request(
self,
method: str,
url: URLTypes,
*,
content: RequestContent = None,
data: RequestData = None,
files: RequestFiles = None,
json: typing.Any = None,
params: QueryParamTypes = None,
headers: HeaderTypes = None,
cookies: CookieTypes = None,
auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
follow_redirects: typing.Union[bool, UseClientDefault] = USE_CLIENT_DEFAULT,
timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
) -> Response:
if cookies is not None:
message = (
"Setting per-request cookies=<...> is being deprecated, because "
"the expected behaviour on cookie persistence is ambiguous. Set "
"cookies directly on the client instance instead."
)
warnings.warn(message, DeprecationWarning)
request = self.build_request(
method=method,
url=url,
content=content,
data=data,
files=files,
json=json,
params=params,
headers=headers,
cookies=cookies,
timeout=timeout,
)
return self.send(request, auth=auth, follow_redirects=follow_redirects)
@contextmanager
def stream(
self,
method: str,
url: URLTypes,
*,
content: RequestContent = None,
data: RequestData = None,
files: RequestFiles = None,
json: typing.Any = None,
params: QueryParamTypes = None,
headers: HeaderTypes = None,
cookies: CookieTypes = None,
auth: typing.Union[AuthTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
follow_redirects: typing.Union[bool, UseClientDefault] = USE_CLIENT_DEFAULT,
timeout: typing.Union[TimeoutTypes, UseClientDefault] = USE_CLIENT_DEFAULT,
) -> typing.Iterator[Response]:
request = self.build_request(
method=method,
url=url,
content=content,
data=data,
files=files,
json=json,
params=params,
headers=headers,
cookies=cookies,
timeout=timeout,
)
response = self.send(
request=request,
auth=auth,
follow_redirects=follow_redirects,
stream=True,
)
try:
yield response
finally:
response.close()
|
BSD 3-Clause New or Revised License
|
elliot79313/tra-tracking-on-gae
|
gaesessions/__init__.py
|
Session.regenerate_id
|
python
|
def regenerate_id(self, expiration_ts=None):
if self.sid or expiration_ts is not None:
self.ensure_data_loaded()
if expiration_ts is None:
expiration_ts = self.get_expiration()
self.__set_sid(self.__make_sid(expiration_ts, self.is_ssl_only()))
self.dirty = True
|
Assigns the session a new session ID (data carries over). This
should be called whenever a user authenticates to prevent session
fixation attacks.
``expiration_ts`` - The UNIX timestamp the session will expire at. If
omitted, the session expiration time will not be changed.
|
https://github.com/elliot79313/tra-tracking-on-gae/blob/9f920a6e96b357bccba2d4328a3a7e2dcdebfc0a/gaesessions/__init__.py#L219-L232
|
from Cookie import CookieError, SimpleCookie
from base64 import b64decode, b64encode
import datetime
import hashlib
import hmac
import logging
import pickle
import os
import threading
import time
from google.appengine.api import memcache
from google.appengine.ext import db
COOKIE_NAME_PREFIX = "DgU"
COOKIE_PATH = "/"
DEFAULT_COOKIE_ONLY_THRESH = 10240
DEFAULT_LIFETIME = datetime.timedelta(days=7)
SID_LEN = 43
SIG_LEN = 44
MAX_COOKIE_LEN = 4096
EXPIRE_COOKIE_FMT = ' %s=; expires=Wed, 01-Jan-1970 00:00:00 GMT; Path=' + COOKIE_PATH
COOKIE_FMT = ' ' + COOKIE_NAME_PREFIX + '%02d="%s"; %sPath=' + COOKIE_PATH + '; HttpOnly'
COOKIE_FMT_SECURE = COOKIE_FMT + '; Secure'
COOKIE_DATE_FMT = '%a, %d-%b-%Y %H:%M:%S GMT'
COOKIE_OVERHEAD = len(COOKIE_FMT % (0, '', '')) + len('expires=Xxx, xx XXX XXXX XX:XX:XX GMT; ') + 150
MAX_DATA_PER_COOKIE = MAX_COOKIE_LEN - COOKIE_OVERHEAD
_tls = threading.local()
def get_current_session():
return _tls.current_session
def set_current_session(session):
_tls.current_session = session
def is_gaesessions_key(k):
return k.startswith(COOKIE_NAME_PREFIX)
class SessionModel(db.Model):
pdump = db.BlobProperty()
class Session(object):
DIRTY_BUT_DONT_PERSIST_TO_DB = 1
def __init__(self, sid=None, lifetime=DEFAULT_LIFETIME, no_datastore=False,
cookie_only_threshold=DEFAULT_COOKIE_ONLY_THRESH, cookie_key=None):
self._accessed = False
self.sid = None
self.cookie_keys = []
self.cookie_data = None
self.data = {}
self.dirty = False
self.lifetime = lifetime
self.no_datastore = no_datastore
self.cookie_only_thresh = cookie_only_threshold
self.base_key = cookie_key
if sid:
self.__set_sid(sid, False)
self.data = None
else:
self.__read_cookie()
@staticmethod
def __compute_hmac(base_key, sid, text):
key = base_key + sid
return b64encode(hmac.new(key, text, hashlib.sha256).digest())
def __read_cookie(self):
try:
cookie = SimpleCookie(os.environ['HTTP_COOKIE'])
self.cookie_keys = filter(is_gaesessions_key, cookie.keys())
if not self.cookie_keys:
return
self.cookie_keys.sort()
data = ''.join(cookie[k].value for k in self.cookie_keys)
i = SIG_LEN + SID_LEN
sig, sid, b64pdump = data[:SIG_LEN], data[SIG_LEN:i], data[i:]
pdump = b64decode(b64pdump)
actual_sig = Session.__compute_hmac(self.base_key, sid, pdump)
if sig == actual_sig:
self.__set_sid(sid, False)
if self.get_expiration() != 0 and time.time() > self.get_expiration():
return self.terminate()
if pdump:
self.data = self.__decode_data(pdump)
else:
self.data = None
else:
logging.warn('cookie with invalid sig received from %s: %s' % (os.environ.get('REMOTE_ADDR'), b64pdump))
except (CookieError, KeyError, IndexError, TypeError):
self.terminate(False)
def make_cookie_headers(self):
if not self.sid:
return [EXPIRE_COOKIE_FMT % k for k in self.cookie_keys]
if self.cookie_data is None:
return []
if self.is_ssl_only():
m = MAX_DATA_PER_COOKIE - 8
fmt = COOKIE_FMT_SECURE
else:
m = MAX_DATA_PER_COOKIE
fmt = COOKIE_FMT
sig = Session.__compute_hmac(self.base_key, self.sid, self.cookie_data)
cv = sig + self.sid + b64encode(self.cookie_data)
num_cookies = 1 + (len(cv) - 1) / m
if self.get_expiration() > 0:
ed = "expires=%s; " % datetime.datetime.fromtimestamp(self.get_expiration()).strftime(COOKIE_DATE_FMT)
else:
ed = ''
cookies = [fmt % (i, cv[i * m:i * m + m], ed) for i in xrange(num_cookies)]
old_cookies = xrange(num_cookies, len(self.cookie_keys))
key = COOKIE_NAME_PREFIX + '%02d'
cookies_to_ax = [EXPIRE_COOKIE_FMT % (key % i) for i in old_cookies]
return cookies + cookies_to_ax
def is_active(self):
return self.sid is not None
def is_ssl_only(self):
return self.sid is not None and self.sid[-33] == 'S'
def is_accessed(self):
return self._accessed
def ensure_data_loaded(self):
self._accessed = True
if self.data is None and self.sid:
self.__retrieve_data()
def get_expiration(self):
try:
return int(self.sid[:-33])
except:
return 0
def __make_sid(self, expire_ts=None, ssl_only=False):
if expire_ts is None:
expire_dt = datetime.datetime.now() + self.lifetime
expire_ts = int(time.mktime((expire_dt).timetuple()))
else:
expire_ts = int(expire_ts)
if ssl_only:
sep = 'S'
else:
sep = '_'
return ('%010d' % expire_ts) + sep + hashlib.md5(os.urandom(16)).hexdigest()
@staticmethod
def __encode_data(d):
eP = {}
eO = {}
for k, v in d.iteritems():
if isinstance(v, db.Model):
eP[k] = db.model_to_protobuf(v)
else:
eO[k] = v
return pickle.dumps((eP, eO), 2)
@staticmethod
def __decode_data(pdump):
try:
eP, eO = pickle.loads(pdump)
for k, v in eP.iteritems():
eO[k] = db.model_from_protobuf(v)
except Exception, e:
logging.warn("failed to decode session data: %s" % e)
eO = {}
return eO
|
MIT License
|
hexrd/hexrd
|
hexrd/imageseries/baseclass.py
|
ImageSeries.__init__
|
python
|
def __init__(self, adapter):
self._adapter = adapter
return
|
Build FrameSeries from adapter instance
*adapter* - object instance based on abstract Sequence class with
properties for image shape, data type and metadata.
|
https://github.com/hexrd/hexrd/blob/90e9b26e5e5091dd5ecf460b3227072e6d90bcd5/hexrd/imageseries/baseclass.py#L12-L20
|
from .imageseriesabc import ImageSeriesABC
class ImageSeries(ImageSeriesABC):
|
BSD 3-Clause New or Revised License
|
react-native-skia/react-native-skia
|
build/config/ios/compile_xcassets.py
|
FilterCompilerOutput
|
python
|
def FilterCompilerOutput(compiler_output, relative_paths):
filtered_output = []
current_section = None
data_in_section = False
for line in compiler_output.splitlines():
match = SECTION_HEADER.search(line)
if match is not None:
data_in_section = False
current_section = match.group(1)
continue
if current_section and current_section != NOTICE_SECTION:
if IsSpuriousMessage(line):
continue
absolute_path = line.split(':')[0]
relative_path = relative_paths.get(absolute_path, absolute_path)
if absolute_path != relative_path:
line = relative_path + line[len(absolute_path):]
if not data_in_section:
data_in_section = True
filtered_output.append('/* %s */\n' % current_section)
filtered_output.append(line + '\n')
return ''.join(filtered_output)
|
Filers actool compilation output.
The compiler output is composed of multiple sections for each different
level of output (error, warning, notices, ...). Each section starts with
the section name on a single line, followed by all the messages from the
section.
The function filter any lines that are not in com.apple.actool.errors or
com.apple.actool.document.warnings sections (as spurious messages comes
before any section of the output).
See crbug.com/730054, crbug.com/739163 and crbug.com/770634 for some example
messages that pollute the output of actool and cause flaky builds.
Args:
compiler_output: string containing the output generated by the
compiler (contains both stdout and stderr)
relative_paths: mapping from absolute to relative paths used to
convert paths in the warning and error messages (unknown paths
will be left unaltered)
Returns:
The filtered output of the compiler. If the compilation was a
success, then the output will be empty, otherwise it will use
relative path and omit any irrelevant output.
|
https://github.com/react-native-skia/react-native-skia/blob/91ecc74444b163f128541dbc1a42e27a9c0fb40b/build/config/ios/compile_xcassets.py#L65-L114
|
import argparse
import os
import re
import subprocess
import sys
import tempfile
SECTION_HEADER = re.compile('^/\\* ([^ ]*) \\*/$')
NOTICE_SECTION = 'com.apple.actool.compilation-results'
SPURIOUS_PATTERNS = [
re.compile(v) for v in [
r'\[\]\[ipad\]\[76x76\]\[\]\[\]\[1x\]\[\]\[\]: notice: \(null\)',
r'\[\]\[ipad\]\[76x76\]\[\]\[\]\[1x\]\[\]\[\]: notice: 76x76@1x app icons'
' only apply to iPad apps targeting releases of iOS prior to 10.0.',
]
]
ACTOOL_FLAG_FOR_ASSET_TYPE = {
'.appiconset': '--app-icon',
'.launchimage': '--launch-image',
}
def IsSpuriousMessage(line):
for pattern in SPURIOUS_PATTERNS:
match = pattern.search(line)
if match is not None:
return True
return False
|
MIT License
|
googleads/google-ads-python
|
google/ads/googleads/v8/services/services/label_service/transports/grpc.py
|
LabelServiceGrpcTransport.mutate_labels
|
python
|
def mutate_labels(
self,
) -> Callable[
[label_service.MutateLabelsRequest], label_service.MutateLabelsResponse
]:
if "mutate_labels" not in self._stubs:
self._stubs["mutate_labels"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v8.services.LabelService/MutateLabels",
request_serializer=label_service.MutateLabelsRequest.serialize,
response_deserializer=label_service.MutateLabelsResponse.deserialize,
)
return self._stubs["mutate_labels"]
|
r"""Return a callable for the mutate labels method over gRPC.
Creates, updates, or removes labels. Operation statuses are
returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `DatabaseError <>`__ `DateError <>`__
`DistinctError <>`__ `FieldError <>`__ `FieldMaskError <>`__
`HeaderError <>`__ `IdError <>`__ `InternalError <>`__
`LabelError <>`__ `MutateError <>`__
`NewResourceCreationError <>`__ `NotEmptyError <>`__
`NullError <>`__ `OperatorError <>`__ `QuotaError <>`__
`RangeError <>`__ `RequestError <>`__
`ResourceCountLimitExceededError <>`__ `SizeLimitError <>`__
`StringFormatError <>`__ `StringLengthError <>`__
Returns:
Callable[[~.MutateLabelsRequest],
~.MutateLabelsResponse]:
A function that, when called, will call the underlying RPC
on the server.
|
https://github.com/googleads/google-ads-python/blob/6794993e146abcfe21292677144c66cb546446bc/google/ads/googleads/v8/services/services/label_service/transports/grpc.py#L243-L280
|
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth
from google.auth import credentials as ga_credentials
from google.auth.transport.grpc import SslCredentials
import grpc
from google.ads.googleads.v8.resources.types import label
from google.ads.googleads.v8.services.types import label_service
from .base import LabelServiceTransport, DEFAULT_CLIENT_INFO
class LabelServiceGrpcTransport(LabelServiceTransport):
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
credentials = False
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {}
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
return self._grpc_channel
@property
def get_label(
self,
) -> Callable[[label_service.GetLabelRequest], label.Label]:
if "get_label" not in self._stubs:
self._stubs["get_label"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v8.services.LabelService/GetLabel",
request_serializer=label_service.GetLabelRequest.serialize,
response_deserializer=label.Label.deserialize,
)
return self._stubs["get_label"]
@property
|
Apache License 2.0
|
chaffelson/whoville
|
whoville/cloudbreak/apis/v1connectors_api.py
|
V1connectorsApi.get_ochestrators_by_type_with_http_info
|
python
|
def get_ochestrators_by_type_with_http_info(self, type, **kwargs):
all_params = ['type']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_ochestrators_by_type" % key
)
params[key] = val
del params['kwargs']
if ('type' not in params) or (params['type'] is None):
raise ValueError("Missing the required parameter `type` when calling `get_ochestrators_by_type`")
collection_formats = {}
path_params = {}
if 'type' in params:
path_params['type'] = params['type']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client. select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client. select_header_content_type(['application/json'])
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/connectors/ochestrators/{type}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[str]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
retrive orchestrators by type
Each cloud provider has it's own specific resources like instance types and disk types. These endpoints are collecting them.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_ochestrators_by_type_with_http_info(type, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str type: (required)
:return: list[str]
If the method is called asynchronously,
returns the request thread.
|
https://github.com/chaffelson/whoville/blob/f71fda629c9fd50d0a482120165ea5abcc754522/whoville/cloudbreak/apis/v1connectors_api.py#L788-L866
|
from __future__ import absolute_import
import sys
import os
import re
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class V1connectorsApi(object):
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_recommendation(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_recommendation_with_http_info(**kwargs)
else:
(data) = self.create_recommendation_with_http_info(**kwargs)
return data
def create_recommendation_with_http_info(self, **kwargs):
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_recommendation" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client. select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client. select_header_content_type(['application/json'])
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/connectors/recommendation', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RecommendationResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_access_configs(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_access_configs_with_http_info(**kwargs)
else:
(data) = self.get_access_configs_with_http_info(**kwargs)
return data
def get_access_configs_with_http_info(self, **kwargs):
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_access_configs" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client. select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client. select_header_content_type(['application/json'])
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/connectors/accessconfigs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PlatformAccessConfigsResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_disktype_by_type(self, type, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_disktype_by_type_with_http_info(type, **kwargs)
else:
(data) = self.get_disktype_by_type_with_http_info(type, **kwargs)
return data
def get_disktype_by_type_with_http_info(self, type, **kwargs):
all_params = ['type']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_disktype_by_type" % key
)
params[key] = val
del params['kwargs']
if ('type' not in params) or (params['type'] is None):
raise ValueError("Missing the required parameter `type` when calling `get_disktype_by_type`")
collection_formats = {}
path_params = {}
if 'type' in params:
path_params['type'] = params['type']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client. select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client. select_header_content_type(['application/json'])
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/connectors/disktypes/{type}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[str]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_disktypes(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_disktypes_with_http_info(**kwargs)
else:
(data) = self.get_disktypes_with_http_info(**kwargs)
return data
def get_disktypes_with_http_info(self, **kwargs):
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_disktypes" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client. select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client. select_header_content_type(['application/json'])
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/connectors/disktypes', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PlatformDisksJson',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_encryption_keys(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_encryption_keys_with_http_info(**kwargs)
else:
(data) = self.get_encryption_keys_with_http_info(**kwargs)
return data
def get_encryption_keys_with_http_info(self, **kwargs):
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_encryption_keys" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client. select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client. select_header_content_type(['application/json'])
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/connectors/encryptionkeys', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PlatformEncryptionKeysResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_gateways_credential_id(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_gateways_credential_id_with_http_info(**kwargs)
else:
(data) = self.get_gateways_credential_id_with_http_info(**kwargs)
return data
def get_gateways_credential_id_with_http_info(self, **kwargs):
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_gateways_credential_id" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client. select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client. select_header_content_type(['application/json'])
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/connectors/gateways', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PlatformGatewaysResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_ip_pools_credential_id(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_ip_pools_credential_id_with_http_info(**kwargs)
else:
(data) = self.get_ip_pools_credential_id_with_http_info(**kwargs)
return data
def get_ip_pools_credential_id_with_http_info(self, **kwargs):
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_ip_pools_credential_id" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client. select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client. select_header_content_type(['application/json'])
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/connectors/ippools', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PlatformIpPoolsResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_ochestrators_by_type(self, type, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_ochestrators_by_type_with_http_info(type, **kwargs)
else:
(data) = self.get_ochestrators_by_type_with_http_info(type, **kwargs)
return data
|
Apache License 2.0
|
adafruit/adafruit_python_bluefruitle
|
Adafruit_BluefruitLE/corebluetooth/adapter.py
|
CoreBluetoothAdapter.power_off
|
python
|
def power_off(self, timeout_sec=TIMEOUT_SEC):
self._powered_off.clear()
IOBluetoothPreferenceSetControllerPowerState(0)
if not self._powered_off.wait(timeout_sec):
raise RuntimeError('Exceeded timeout waiting for adapter to power off!')
|
Power off Bluetooth.
|
https://github.com/adafruit/adafruit_python_bluefruitle/blob/a01dec2c88fa38143afb855e1df4f9ac774156b7/Adafruit_BluefruitLE/corebluetooth/adapter.py#L102-L108
|
import logging
import threading
import time
import objc
from ..config import TIMEOUT_SEC
from ..interfaces import Adapter
from ..platform import get_provider
objc.loadBundleFunctions(
objc.loadBundle("IOBluetooth", globals(), bundle_path=objc.pathForFramework(u'/System/Library/Frameworks/IOBluetooth.framework')),
globals(),
[('IOBluetoothPreferenceGetControllerPowerState', b'oI'),('IOBluetoothPreferenceSetControllerPowerState', b'vI')]
)
logger = logging.getLogger(__name__)
class CoreBluetoothAdapter(Adapter):
def __init__(self):
self._is_scanning = False
self._powered_on = threading.Event()
self._powered_off = threading.Event()
def _state_changed(self, state):
logger.debug('Adapter state change: {0}'.format(state))
if state == 5:
self._powered_off.clear()
self._powered_on.set()
elif state == 4:
self._powered_on.clear()
self._powered_off.set()
@property
def name(self):
return "Default Adapter"
def start_scan(self, timeout_sec=TIMEOUT_SEC):
get_provider()._central_manager.scanForPeripheralsWithServices_options_(None, None)
self._is_scanning = True
def stop_scan(self, timeout_sec=TIMEOUT_SEC):
get_provider()._central_manager.stopScan()
self._is_scanning = False
@property
def is_scanning(self):
return self._is_scanning
def power_on(self, timeout_sec=TIMEOUT_SEC):
self._powered_on.clear()
IOBluetoothPreferenceSetControllerPowerState(1)
if not self._powered_on.wait(timeout_sec):
raise RuntimeError('Exceeded timeout waiting for adapter to power on!')
|
MIT License
|
google/flax
|
examples/ogbg_molpcba/input_pipeline.py
|
convert_to_graphs_tuple
|
python
|
def convert_to_graphs_tuple(graph: Dict[str, tf.Tensor],
add_virtual_node: bool,
add_undirected_edges: bool,
add_self_loops: bool) -> jraph.GraphsTuple:
num_nodes = tf.squeeze(graph['num_nodes'])
num_edges = tf.squeeze(graph['num_edges'])
nodes = graph['node_feat']
edges = graph['edge_feat']
edge_feature_dim = edges.shape[-1]
labels = graph['labels']
senders = graph['edge_index'][:, 0]
receivers = graph['edge_index'][:, 1]
if add_virtual_node:
nodes = tf.concat(
[nodes, tf.zeros_like(nodes[0, None])], axis=0)
senders = tf.concat(
[senders, tf.range(num_nodes)], axis=0)
receivers = tf.concat(
[receivers, tf.fill((num_nodes,), num_nodes + 1)], axis=0)
edges = tf.concat(
[edges, tf.zeros((num_nodes, edge_feature_dim))], axis=0)
num_edges += num_nodes
num_nodes += 1
if add_undirected_edges:
new_senders = tf.concat([senders, receivers], axis=0)
new_receivers = tf.concat([receivers, senders], axis=0)
edges = tf.concat([edges, edges], axis=0)
senders, receivers = new_senders, new_receivers
num_edges *= 2
if add_self_loops:
senders = tf.concat([senders, tf.range(num_nodes)], axis=0)
receivers = tf.concat([receivers, tf.range(num_nodes)], axis=0)
edges = tf.concat([edges, tf.zeros((num_nodes, edge_feature_dim))], axis=0)
num_edges += num_nodes
return jraph.GraphsTuple(
n_node=tf.expand_dims(num_nodes, 0),
n_edge=tf.expand_dims(num_edges, 0),
nodes=nodes,
edges=edges,
senders=senders,
receivers=receivers,
globals=tf.expand_dims(labels, axis=0),
)
|
Converts a dictionary of tf.Tensors to a GraphsTuple.
|
https://github.com/google/flax/blob/48b34ab87c7d20afc567f6e0fe5d67e423cf08bc/examples/ogbg_molpcba/input_pipeline.py#L108-L162
|
import functools
from typing import Dict, NamedTuple
import jraph
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
class GraphsTupleSize(NamedTuple):
n_node: int
n_edge: int
n_graph: int
def get_raw_datasets() -> Dict[str, tf.data.Dataset]:
ds_builder = tfds.builder('ogbg_molpcba')
ds_builder.download_and_prepare()
ds_splits = ['train', 'validation', 'test']
datasets = {
split: ds_builder.as_dataset(split=split) for split in ds_splits
}
return datasets
def get_datasets(batch_size: int,
add_virtual_node: bool = True,
add_undirected_edges: bool = True,
add_self_loops: bool = True) -> Dict[str, tf.data.Dataset]:
if batch_size <= 1:
raise ValueError('Batch size must be > 1 to account for padding graphs.')
datasets = get_raw_datasets()
convert_to_graphs_tuple_fn = functools.partial(
convert_to_graphs_tuple,
add_virtual_node=add_self_loops,
add_undirected_edges=add_undirected_edges,
add_self_loops=add_virtual_node,
)
for split_name in datasets:
datasets[split_name] = datasets[split_name].map(
convert_to_graphs_tuple_fn,
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=True)
budget = estimate_padding_budget_for_batch_size(datasets['train'], batch_size,
num_estimation_graphs=100)
example_graph = next(datasets['train'].as_numpy_iterator())
example_padded_graph = jraph.pad_with_graphs(example_graph, *budget)
padded_graphs_spec = specs_from_graphs_tuple(example_padded_graph)
for split_name, dataset_split in datasets.items():
if split_name == 'train':
dataset_split = dataset_split.shuffle(100, reshuffle_each_iteration=True)
dataset_split = dataset_split.repeat()
batching_fn = functools.partial(
jraph.dynamically_batch,
graphs_tuple_iterator=iter(dataset_split),
n_node=budget.n_node,
n_edge=budget.n_edge,
n_graph=budget.n_graph)
dataset_split = tf.data.Dataset.from_generator(
batching_fn,
output_signature=padded_graphs_spec)
if split_name in ['validation', 'test']:
dataset_split = dataset_split.cache()
datasets[split_name] = dataset_split
return datasets
|
Apache License 2.0
|
stilljake/openvpn-azure-ad-auth
|
openvpn-azure-ad-auth.py
|
success
|
python
|
def success():
sys.exit(0)
|
The user has authenticated and is authorized
|
https://github.com/stilljake/openvpn-azure-ad-auth/blob/ba62d62d61c481f419e125eb4191a359da1325eb/openvpn-azure-ad-auth.py#L28-L30
|
import binascii
from backports.pbkdf2 import pbkdf2_hmac, compare_digest
import logging
import os
import sys
import adal
import requests
import yaml
loggerName = __name__
logging.basicConfig(
format='%(asctime) 25s openvpn-azure-aad-auth %(levelname) 7s %(pathname)s %(module)s: %(message)s'
)
logger = logging.getLogger(loggerName)
|
MIT License
|
matijakolaric-com/django-music-publisher
|
music_publisher/templatetags/dmp_dashboard.py
|
yield_sections
|
python
|
def yield_sections(model_dict, sections):
for name, object_names in sections.items():
models = []
for object_name in object_names:
if object_name in model_dict:
models.append(model_dict[object_name])
del model_dict[object_name]
if models:
yield {
'name': name,
'models': models}
|
Convert model dictionary according to section structure
|
https://github.com/matijakolaric-com/django-music-publisher/blob/8c73696673cf1a2928a6b66a5bb027f12cbcb6b1/music_publisher/templatetags/dmp_dashboard.py#L9-L20
|
from django import template
from collections import OrderedDict
register = template.Library()
|
MIT License
|
prometheus/client_python
|
prometheus_client/metrics.py
|
Counter.inc
|
python
|
def inc(self, amount=1):
self._raise_if_not_observable()
if amount < 0:
raise ValueError('Counters can only be incremented by non-negative amounts.')
self._value.inc(amount)
|
Increment counter by the given amount.
|
https://github.com/prometheus/client_python/blob/09fb45953bac018a90e89f0b1e7bcd1d5d81c01b/prometheus_client/metrics.py#L259-L264
|
import sys
from threading import Lock
import time
import types
from . import values
from .context_managers import ExceptionCounter, InprogressTracker, Timer
from .metrics_core import (
Metric, METRIC_LABEL_NAME_RE, METRIC_NAME_RE,
RESERVED_METRIC_LABEL_NAME_RE,
)
from .registry import REGISTRY
from .utils import floatToGoString, INF
if sys.version_info > (3,):
unicode = str
create_bound_method = types.MethodType
else:
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def _build_full_name(metric_type, name, namespace, subsystem, unit):
full_name = ''
if namespace:
full_name += namespace + '_'
if subsystem:
full_name += subsystem + '_'
full_name += name
if metric_type == 'counter' and full_name.endswith('_total'):
full_name = full_name[:-6]
if unit and not full_name.endswith("_" + unit):
full_name += "_" + unit
if unit and metric_type in ('info', 'stateset'):
raise ValueError('Metric name is of a type that cannot have a unit: ' + full_name)
return full_name
def _validate_labelnames(cls, labelnames):
labelnames = tuple(labelnames)
for l in labelnames:
if not METRIC_LABEL_NAME_RE.match(l):
raise ValueError('Invalid label metric name: ' + l)
if RESERVED_METRIC_LABEL_NAME_RE.match(l):
raise ValueError('Reserved label metric name: ' + l)
if l in cls._reserved_labelnames:
raise ValueError('Reserved label metric name: ' + l)
return labelnames
class MetricWrapperBase(object):
_type = None
_reserved_labelnames = ()
def _is_observable(self):
return not self._labelnames or (self._labelnames and self._labelvalues)
def _raise_if_not_observable(self):
if not self._is_observable():
raise ValueError('%s metric is missing label values' % str(self._type))
def _is_parent(self):
return self._labelnames and not self._labelvalues
def _get_metric(self):
return Metric(self._name, self._documentation, self._type, self._unit)
def describe(self):
return [self._get_metric()]
def collect(self):
metric = self._get_metric()
for suffix, labels, value in self._samples():
metric.add_sample(self._name + suffix, labels, value)
return [metric]
def __str__(self):
return "{0}:{1}".format(self._type, self._name)
def __repr__(self):
metric_type = type(self)
return "{0}.{1}({2})".format(metric_type.__module__, metric_type.__name__, self._name)
def __init__(self,
name,
documentation,
labelnames=(),
namespace='',
subsystem='',
unit='',
registry=REGISTRY,
_labelvalues=None,
):
self._name = _build_full_name(self._type, name, namespace, subsystem, unit)
self._labelnames = _validate_labelnames(self, labelnames)
self._labelvalues = tuple(_labelvalues or ())
self._kwargs = {}
self._documentation = documentation
self._unit = unit
if not METRIC_NAME_RE.match(self._name):
raise ValueError('Invalid metric name: ' + self._name)
if self._is_parent():
self._lock = Lock()
self._metrics = {}
if self._is_observable():
self._metric_init()
if not self._labelvalues:
if registry:
registry.register(self)
def labels(self, *labelvalues, **labelkwargs):
if not self._labelnames:
raise ValueError('No label names were set when constructing %s' % self)
if self._labelvalues:
raise ValueError('%s already has labels set (%s); can not chain calls to .labels()' % (
self,
dict(zip(self._labelnames, self._labelvalues))
))
if labelvalues and labelkwargs:
raise ValueError("Can't pass both *args and **kwargs")
if labelkwargs:
if sorted(labelkwargs) != sorted(self._labelnames):
raise ValueError('Incorrect label names')
labelvalues = tuple(unicode(labelkwargs[l]) for l in self._labelnames)
else:
if len(labelvalues) != len(self._labelnames):
raise ValueError('Incorrect label count')
labelvalues = tuple(unicode(l) for l in labelvalues)
with self._lock:
if labelvalues not in self._metrics:
self._metrics[labelvalues] = self.__class__(
self._name,
documentation=self._documentation,
labelnames=self._labelnames,
unit=self._unit,
_labelvalues=labelvalues,
**self._kwargs
)
return self._metrics[labelvalues]
def remove(self, *labelvalues):
if not self._labelnames:
raise ValueError('No label names were set when constructing %s' % self)
"""Remove the given labelset from the metric."""
if len(labelvalues) != len(self._labelnames):
raise ValueError('Incorrect label count (expected %d, got %s)' % (len(self._labelnames), labelvalues))
labelvalues = tuple(unicode(l) for l in labelvalues)
with self._lock:
del self._metrics[labelvalues]
def clear(self):
with self._lock:
self._metrics = {}
def _samples(self):
if self._is_parent():
return self._multi_samples()
else:
return self._child_samples()
def _multi_samples(self):
with self._lock:
metrics = self._metrics.copy()
for labels, metric in metrics.items():
series_labels = list(zip(self._labelnames, labels))
for suffix, sample_labels, value in metric._samples():
yield (suffix, dict(series_labels + list(sample_labels.items())), value)
def _child_samples(self):
raise NotImplementedError('_child_samples() must be implemented by %r' % self)
def _metric_init(self):
raise NotImplementedError('_metric_init() must be implemented by %r' % self)
class Counter(MetricWrapperBase):
_type = 'counter'
def _metric_init(self):
self._value = values.ValueClass(self._type, self._name, self._name + '_total', self._labelnames,
self._labelvalues)
self._created = time.time()
|
Apache License 2.0
|
pyqode/pyqode.core
|
pyqode/core/widgets/interactive.py
|
InteractiveConsole.merge_outputs
|
python
|
def merge_outputs(self):
return self._merge_outputs
|
Merge stderr with stdout. Default is False.
If set to true, stderr and stdin will use the same color: stdin_color.
|
https://github.com/pyqode/pyqode.core/blob/0ffabebe4f0397d53429024f6f44db3fe97b0828/pyqode/core/widgets/interactive.py#L229-L236
|
import locale
import logging
import sys
from pyqode.core.api.client import PROCESS_ERROR_STRING
from pyqode.core.managers.decorations import TextDecorationsManager
from pyqode.core.managers.panels import PanelsManager
from pyqode.qt.QtCore import Qt, Signal, QProcess, QProcessEnvironment
from pyqode.qt.QtWidgets import QTextEdit, QAction, QApplication
from pyqode.qt.QtGui import QColor, QTextCursor, QFont, QKeySequence
def _logger():
return logging.getLogger(__name__)
class InteractiveConsole(QTextEdit):
process_finished = Signal(int)
process_started = Signal()
def __init__(self, parent=None):
super(InteractiveConsole, self).__init__(parent)
self.panels = PanelsManager(self)
self.decorations = TextDecorationsManager(self)
from pyqode.core.panels import SearchAndReplacePanel
self.panels.append(SearchAndReplacePanel(),
SearchAndReplacePanel.Position.TOP)
self._stdout_col = QColor("#404040")
self._app_msg_col = QColor("#4040FF")
self._stdin_col = QColor("#22AA22")
self._stderr_col = QColor("#FF0000")
self._write_app_messages = True
self._process_name = ''
self.process = None
self._args = None
self._usr_buffer = ""
self._clear_on_start = True
self._merge_outputs = False
self._running = False
self._writer = self.write
self._user_stop = False
font = "monospace"
if sys.platform == "win32":
font = "Consolas"
elif sys.platform == "darwin":
font = 'Monaco'
self._font_family = font
self.setFont(QFont(font, 10))
self.setReadOnly(True)
self._mask_user_input = False
action = QAction(_('Copy'), self)
action.setShortcut(QKeySequence.Copy)
action.triggered.connect(self.copy)
self.add_action(action)
action = QAction(_('Paste'), self)
action.setShortcut(QKeySequence.Paste)
action.triggered.connect(self.paste)
self.add_action(action)
def showEvent(self, event):
super(InteractiveConsole, self).showEvent(event)
self.panels.refresh()
def resizeEvent(self, e):
super(InteractiveConsole, self).resizeEvent(e)
self.panels.resize()
def add_action(self, action):
self.addAction(action)
action.setShortcutContext(Qt.WidgetShortcut)
def set_writer(self, writer):
if self._writer != writer and self._writer:
self._writer = None
if writer:
self._writer = writer
def _on_stdout(self):
raw = bytes(self.process.readAllStandardOutput())
try:
txt = raw.decode(sys.getfilesystemencoding())
except UnicodeDecodeError:
txt = str(raw).replace("b'", '')[:-1].replace(
'\\r\\n', '\n').replace('\\\\', '\\')
_logger().debug('stdout: %s', txt)
self._writer(self, txt, self.stdout_color)
def _on_stderr(self):
raw = bytes(self.process.readAllStandardError())
try:
txt = raw.decode(sys.getfilesystemencoding())
except UnicodeDecodeError:
txt = str(raw).replace("b'", '')[:-1].replace(
'\\r\\n', '\n').replace('\\\\', '\\')
_logger().debug('stderr: %s', txt)
self._writer(self, txt, self.stderr_color)
@property
def exit_code(self):
if self.is_running:
return None
exit_status = self.process.exitStatus()
if exit_status == self.process.Crashed:
exit_code = 139
else:
exit_code = self.process.exitCode()
return exit_code
@property
def write_app_messages(self):
return self._write_app_messages
@write_app_messages.setter
def write_app_messages(self, value):
self._write_app_messages = value
@property
def background_color(self):
pal = self.palette()
return pal.color(pal.Base)
@background_color.setter
def background_color(self, color):
pal = self.palette()
pal.setColor(pal.Base, color)
pal.setColor(pal.Text, self.stdout_color)
self.setPalette(pal)
@property
def stdout_color(self):
return self._stdout_col
@stdout_color.setter
def stdout_color(self, color):
self._stdout_col = color
pal = self.palette()
pal.setColor(pal.Text, self._stdout_col)
self.setPalette(pal)
@property
def stderr_color(self):
return self._stderr_col
@stderr_color.setter
def stderr_color(self, color):
self._stderr_col = color
@property
def stdin_color(self):
return self._stdin_col
@stdin_color.setter
def stdin_color(self, color):
self._stdin_col = color
@property
def app_msg_color(self):
return self._app_msg_col
@app_msg_color.setter
def app_msg_color(self, color):
self._app_msg_col = color
@property
def clear_on_start(self):
return self._clear_on_start
@clear_on_start.setter
def clear_on_start(self, value):
self._clear_on_start = value
@property
|
MIT License
|
wuziyi616/if-defense
|
baselines/defense/drop_points/SRS.py
|
SRSDefense.__init__
|
python
|
def __init__(self, drop_num=500):
super(SRSDefense, self).__init__()
self.drop_num = drop_num
|
SRS defense method.
Args:
drop_num (int, optional): number of points to drop.
Defaults to 500.
|
https://github.com/wuziyi616/if-defense/blob/4b1d69d03d76e8d5ca1b4d45f81a8c9c60791263/baselines/defense/drop_points/SRS.py#L12-L21
|
import numpy as np
import torch
import torch.nn as nn
class SRSDefense(nn.Module):
|
MIT License
|
amzn/metalearn-leap
|
src/omniglot/utils.py
|
convert_arg
|
python
|
def convert_arg(arg):
if arg.lower() == 'none':
arg = None
elif arg.lower() == 'false':
arg = False
elif arg.lower() == 'true':
arg = True
elif '.' in arg:
try:
arg = float(arg)
except Exception:
pass
else:
try:
arg = int(arg)
except Exception:
pass
return arg
|
Convert string to type
|
https://github.com/amzn/metalearn-leap/blob/9d6fa0c1c27fa7812cb9510ab0b23d5f25f575f0/src/omniglot/utils.py#L22-L41
|
import os
from os.path import join
import numpy as np
|
Apache License 2.0
|
stackstorm-exchange/stackstorm-napalm
|
actions/lib/action.py
|
NapalmBaseAction.find_device_from_config
|
python
|
def find_device_from_config(self, search, driver=None, credentials=None, port=None):
try:
devices = self.config['devices']
except KeyError:
message = ("Configuration Error: Please provide a pack config and ensure it's loaded "
" with `st2ctl reload --register-configs`")
raise Exception(message)
search = search.lower()
host_result = search
for d in devices:
hostname = d['hostname'].lower()
if hostname.startswith(search):
if not driver:
driver = d['driver']
if not credentials:
credentials = d['credentials']
if not port:
port = d.get('port')
host_result = hostname
break
if not driver:
raise ValueError('Can not find driver for host {}, try with '
'driver parameter.'.format(host_result))
if not credentials:
raise ValueError(('Can not find credential group for host {}, try with credentials '
'parameter.').format(host_result))
if driver not in ["ios", "iosxr", "junos", "eos", "fortios", "ibm", "nxos",
"pluribus", "panos", "ros", "vyos", "nxos_ssh"]:
raise ValueError('Driver "{}" is not a valid NAPALM Driver.'.format(driver))
return {
"hostname": host_result,
"port": port,
"driver": driver,
"credentials": credentials,
}
|
Locates device in configuration based on search parameters
|
https://github.com/stackstorm-exchange/stackstorm-napalm/blob/64fedfbaa5ec519c4a9af3d1c254582f75dbcd2c/actions/lib/action.py#L85-L158
|
from json2table import convert
from st2common.runners.base_action import Action
from napalm import get_network_driver
__all__ = [
'NapalmBaseAction'
]
class NapalmBaseAction(Action):
def __init__(self, config):
super(NapalmBaseAction, self).__init__(config)
def get_driver(self, **std_kwargs):
hostname = std_kwargs['hostname']
credentials = std_kwargs.get('credentials')
driver = std_kwargs.get('driver')
port = std_kwargs.get('port')
htmlout = std_kwargs.get('htmlout', False)
found_device = self.find_device_from_config(hostname, driver, credentials, port)
login = self.get_credentials(found_device['credentials'])
optional_args = {}
if not found_device['port']:
pass
else:
optional_args = {'port': int(found_device['port'])}
if 'secret' in login:
optional_args = {'secret': login['secret']}
if 'key_file' in login:
optional_args = {'key_file': str(login['key_file'])}
login['password'] = None
self.hostname = found_device['hostname']
self.driver = found_device['driver']
self.htmlout = htmlout
return get_network_driver(self.driver)(
hostname=str(found_device['hostname']),
username=login['username'],
password=login['password'],
optional_args=optional_args
)
def get_credentials(self, credentials):
authconfig = self.config['credentials'].get(credentials, None)
if not authconfig:
raise ValueError('Can not find credentials group {}.'.format(credentials))
if 'password' not in authconfig and 'key_file' not in authconfig:
raise ValueError("Missing password or SSH key in credentials.")
if authconfig['username'] is None:
raise ValueError("Missing username in credentials.")
return authconfig
|
Apache License 2.0
|
allegro/django-powerdns-dnssec
|
powerdns/models/requests.py
|
ChangeCreateRequest.copy_records_data
|
python
|
def copy_records_data(self, fields_to_copy):
all_fields = self._meta.get_all_field_names()
for field_name, value in fields_to_copy:
if field_name in all_fields:
setattr(self, field_name, value)
elif 'target_' + field_name in all_fields:
setattr(self, 'target_' + field_name, value)
else:
log.warning("Unknown field {}:{}".format(field_name, value))
|
Sets data from `fields_to_copy` on self
args:
fields_to_copy: [(key, value), ..]
|
https://github.com/allegro/django-powerdns-dnssec/blob/333bdc668b6cda1a2ff240efd814a1896a1a8e07/powerdns/models/requests.py#L217-L230
|
import logging
from django.db import models, transaction
from django.conf import settings
from django_extensions.db.fields.json import JSONField
from dj.choices import Choices
from dj.choices.fields import ChoiceField
from django.contrib.contenttypes.fields import ContentType, GenericForeignKey
from django.utils.translation import ugettext_lazy as _
from threadlocals.threadlocals import get_current_user
from .powerdns import (
Domain,
Owned,
Record,
validate_domain_name,
)
from .ownership import Service
from ..utils import AutoPtrOptions, RecordLike, TimeTrackable, flat_dict_diff
log = logging.getLogger(__name__)
def can_auto_accept_record_request(user_request, user, action):
def _validate_domain(domain):
if not domain:
raise Exception(
"Can't check auto acceptance without domain set"
)
can_auto_accept = False
domain = (
user_request.domain
if action != 'delete' else user_request.target.domain
)
_validate_domain(domain)
if action == 'create':
can_auto_accept = (
user_request.domain.can_auto_accept(user) and
not user_request.is_sec_acceptance_required()
)
elif action == 'update':
can_auto_accept = (
user_request.domain.can_auto_accept(user) and
user_request.record.can_auto_accept(user) and
not user_request.is_sec_acceptance_required()
)
elif action == 'delete':
can_auto_accept = (
user_request.target.domain.can_auto_accept(user) and
user_request.target.can_auto_accept(user) and
not user_request.is_seo_acceptance_required()
)
return can_auto_accept
class RequestStates(Choices):
_ = Choices.Choice
OPEN = _('Open')
ACCEPTED = _('Accepted')
REJECTED = _('Rejected')
class Request(Owned, TimeTrackable):
class Meta:
abstract = True
state = ChoiceField(
choices=RequestStates,
default=RequestStates.OPEN,
)
key = models.CharField(
max_length=255,
null=True,
blank=True
)
last_change_json = JSONField(null=True, blank=True)
def save(self, *args, **kwargs):
if self.owner is None:
self.owner = get_current_user()
super().save(*args, **kwargs)
def _log_processed_request_message(self):
log.warning('{} (id:{}) already {}'.format(
self._meta.object_name,
self.id,
RequestStates.DescFromID(self.state).lower(),
))
class DeleteRequest(Request):
content_type = models.ForeignKey(ContentType)
target_id = models.PositiveIntegerField()
target = GenericForeignKey('content_type', 'target_id')
@transaction.atomic
def accept(self):
if self.state != RequestStates.OPEN:
self._log_processed_request_message()
return
old_dict = self.target.as_history_dump()
new_dict = self.target.as_empty_history()
result = flat_dict_diff(old_dict, new_dict)
result['_request_type'] = 'delete'
self.last_change_json = result
self.target.delete()
self.state = RequestStates.ACCEPTED
self.save()
@transaction.atomic
def reject(self):
if self.state != RequestStates.OPEN:
self._log_processed_request_message()
return
self.state = RequestStates.REJECTED
self.save()
def __str__(self):
return 'Delete {}'.format(self.target)
def is_seo_acceptance_required(self):
if self.owner and self.owner.is_superuser:
return False
return (
isinstance(self.target, Record) and
self.target.type in settings.SEO_ACCEPTANCE_FOR_RECORD_TYPE and
(
not self.target.domain.template or
self.target.domain.template.is_public_domain
) and
self.target.domain.require_seo_acceptance
)
class ChangeCreateRequest(Request):
ignore_fields = {'created', 'modified'}
prefix = 'target_'
class Meta:
abstract = True
def _get_json_history(self, object_):
if object_.id:
old_dict = object_.as_history_dump()
else:
old_dict = object_.as_empty_history()
new_dict = self.as_history_dump()
result = flat_dict_diff(old_dict, new_dict)
result['_request_type'] = 'update' if object_.id else 'create'
return result
def _set_json_history(self, object_):
self.last_change_json = self._get_json_history(object_)
@transaction.atomic
def accept(self):
object_ = self.get_object()
if self.state != RequestStates.OPEN:
self._log_processed_request_message()
return object_
self._set_json_history(object_)
for field_name in type(self).copy_fields:
if field_name in self.ignore_fields:
continue
if field_name == 'target_owner' and not getattr(self, field_name):
continue
setattr(
object_,
field_name[len(self.prefix):],
getattr(self, field_name)
)
object_.save()
self.assign_object(object_)
self.state = RequestStates.ACCEPTED
self.save()
return object_
@transaction.atomic
def reject(self):
if self.state != RequestStates.OPEN:
self._log_processed_request_message()
return
object_ = self.get_object()
self._set_json_history(object_)
self.state = RequestStates.REJECTED
self.save()
|
BSD 2-Clause Simplified License
|
wagtail/wagtail-live
|
src/wagtail_live/webapp/receiver.py
|
WebAppReceiver.get_message_id_from_message
|
python
|
def get_message_id_from_message(self, message):
return message["id"]
|
See base class.
|
https://github.com/wagtail/wagtail-live/blob/3395f473c3c34f8932d8b0ea6de56745d446e3bd/src/wagtail_live/webapp/receiver.py#L33-L36
|
from wagtail_live.receivers.base import BaseMessageReceiver
from .models import Image
MESSAGE_CREATED = 1
MESSAGE_EDITED = 2
MESSAGE_DELETED = 3
class WebAppReceiver(BaseMessageReceiver):
def dispatch_event(self, event):
message = event
if message["update_type"] == MESSAGE_EDITED:
self.change_message(message=message)
return
elif message["update_type"] == MESSAGE_DELETED:
self.delete_message(message=message)
return
else:
self.add_message(message=message)
def get_channel_id_from_message(self, message):
return message["channel"]
|
BSD 3-Clause New or Revised License
|
carlos-jenkins/plantweb
|
lib/plantweb/defaults.py
|
_read_defaults_git
|
python
|
def _read_defaults_git(path):
git = find_executable('git')
if not git:
log.debug(
'Unable to read defaults from repository. '
'git executable not found.'
)
return {}
proc = Popen(
[git, 'rev-parse', '--show-toplevel'],
stdout=PIPE, stderr=PIPE
)
stdout_raw, stderr_raw = proc.communicate()
stdout = stdout_raw.decode(getdefaultencoding())
stderr = stderr_raw.decode(getdefaultencoding())
if proc.returncode != 0:
if 'not a git repository' in stderr.lower():
log.debug(
'Not in a git repository: {}'.format(getcwd())
)
else:
log.error(
'Unable to determine git root directory:\n{}'.format(stderr)
)
return {}
repo_root = stdout.strip()
return _read_defaults_file(
join(repo_root, path)
)
|
Read defaults from given path in current git repository.
See :data:`DEFAULTS_PROVIDERS` for inner workings.
|
https://github.com/carlos-jenkins/plantweb/blob/6898b08b5377d70272e507bc668e02d2cd323d2e/lib/plantweb/defaults.py#L91-L132
|
from __future__ import unicode_literals, absolute_import
from __future__ import print_function, division
import logging
from os import getcwd
from json import loads
from copy import deepcopy
from inspect import isfunction
from traceback import format_exc
from sys import getdefaultencoding
from subprocess import Popen, PIPE
from importlib import import_module
from distutils.spawn import find_executable
from os.path import isfile, expanduser, join
log = logging.getLogger(__name__)
DEFAULT_CONFIG = {
'engine': 'plantuml',
'format': 'svg',
'server': 'http://plantuml.com/plantuml/',
'use_cache': True,
'cache_dir': '~/.cache/plantweb'
}
DEFAULTS_PROVIDERS = [
'python://plantweb.defaults.DEFAULT_CONFIG',
'file://~/.plantwebrc',
'git://.plantwebrc'
]
|
Apache License 2.0
|
urinieto/msaf
|
msaf/eval.py
|
get_results_file_name
|
python
|
def get_results_file_name(boundaries_id, labels_id, config,
annotator_id):
utils.ensure_dir(msaf.config.results_dir)
file_name = os.path.join(msaf.config.results_dir, "results")
file_name += "_boundsE%s_labelsE%s" % (boundaries_id, labels_id)
file_name += "_annotatorE%d" % (annotator_id)
sorted_keys = sorted(config.keys(), key=str.lower)
for key in sorted_keys:
file_name += "_%sE%s" % (key, str(config[key]).replace("/", "_"))
if len(file_name) > 255 - len(msaf.config.results_ext):
file_name = file_name[:255 - len(msaf.config.results_ext)]
return file_name + msaf.config.results_ext
|
Based on the config and the dataset, get the file name to store the
results.
|
https://github.com/urinieto/msaf/blob/17db5b698e06d662dfa5c7442d826022746454b7/msaf/eval.py#L281-L297
|
import jams
from joblib import Parallel, delayed
import logging
import mir_eval
import numpy as np
import os
import pandas as pd
import six
import msaf
from msaf.exceptions import NoReferencesError
import msaf.input_output as io
from msaf import utils
def print_results(results):
if len(results) == 0:
logging.warning("No results to print!")
return
res = results.mean()
logging.info("Results:\n%s" % res)
def compute_results(ann_inter, est_inter, ann_labels, est_labels, bins,
est_file, weight=0.58):
res = {}
res["HitRate_3P"], res["HitRate_3R"], res["HitRate_3F"] = mir_eval.segment.detection(ann_inter, est_inter, window=3, trim=False)
res["HitRate_0.5P"], res["HitRate_0.5R"], res["HitRate_0.5F"] = mir_eval.segment.detection(ann_inter, est_inter, window=.5, trim=False)
res["HitRate_t3P"], res["HitRate_t3R"], res["HitRate_t3F"] = mir_eval.segment.detection(ann_inter, est_inter, window=3, trim=True)
res["HitRate_t0.5P"], res["HitRate_t0.5R"], res["HitRate_t0.5F"] = mir_eval.segment.detection(ann_inter, est_inter, window=.5, trim=True)
_, _, res["HitRate_w3F"] = mir_eval.segment.detection(
ann_inter, est_inter, window=3, trim=False, beta=weight)
_, _, res["HitRate_w0.5F"] = mir_eval.segment.detection(
ann_inter, est_inter, window=.5, trim=False, beta=weight)
_, _, res["HitRate_wt3F"] = mir_eval.segment.detection(
ann_inter, est_inter, window=3, trim=True, beta=weight)
_, _, res["HitRate_wt0.5F"] = mir_eval.segment.detection(
ann_inter, est_inter, window=.5, trim=True, beta=weight)
res["D"] = compute_information_gain(ann_inter, est_inter, est_file,
bins=bins)
res["DevR2E"], res["DevE2R"] = mir_eval.segment.deviation(
ann_inter, est_inter, trim=False)
res["DevtR2E"], res["DevtE2R"] = mir_eval.segment.deviation(
ann_inter, est_inter, trim=True)
if est_labels is not None and ("-1" in est_labels or "@" in est_labels):
est_labels = None
if est_labels is not None and len(est_labels) != 0:
ann_labels = list(ann_labels)
est_labels = list(est_labels)
ann_inter, ann_labels = mir_eval.util.adjust_intervals(ann_inter,
ann_labels)
est_inter, est_labels = mir_eval.util.adjust_intervals(
est_inter, est_labels, t_min=0.0, t_max=ann_inter.max())
res["PWP"], res["PWR"], res["PWF"] = mir_eval.segment.pairwise(
ann_inter, ann_labels, est_inter, est_labels)
res["So"], res["Su"], res["Sf"] = mir_eval.segment.nce(
ann_inter, ann_labels, est_inter, est_labels)
base = os.path.basename(est_file)
res["track_id"] = base[:-5]
res["ds_name"] = base.split("_")[0]
return res
def compute_gt_results(est_file, ref_file, boundaries_id, labels_id, config,
bins=251, annotator_id=0):
if config["hier"]:
ref_times, ref_labels, ref_levels = msaf.io.read_hier_references(
ref_file, annotation_id=annotator_id,
exclude_levels=["segment_salami_function"])
else:
jam = jams.load(ref_file, validate=False)
ann = jam.search(namespace='segment_.*')[annotator_id]
ref_inter, ref_labels = ann.to_interval_values()
est_inter, est_labels = io.read_estimations(est_file, boundaries_id,
labels_id, **config)
logging.info("Evaluating %s" % os.path.basename(est_file))
if config["hier"]:
assert len(est_inter) == len(est_labels), "Same number of levels " "are required in the boundaries and labels for the hierarchical " "evaluation."
est_times = []
est_labels = []
est_inter = sorted(est_inter, key=lambda level: len(level))
for inter in est_inter:
est_times.append(msaf.utils.intervals_to_times(inter))
est_labels.append(np.ones(len(est_times[-1]) - 1) * -1)
utils.align_end_hierarchies(est_times, ref_times, thres=1)
est_hier = [utils.times_to_intervals(times) for times in est_times]
ref_hier = [utils.times_to_intervals(times) for times in ref_times]
res = {}
res["t_recall10"], res["t_precision10"], res["t_measure10"] = mir_eval.hierarchy.tmeasure(ref_hier, est_hier, window=10)
res["t_recall15"], res["t_precision15"], res["t_measure15"] = mir_eval.hierarchy.tmeasure(ref_hier, est_hier, window=15)
res["track_id"] = os.path.basename(est_file)[:-5]
return res
else:
return compute_results(ref_inter, est_inter, ref_labels, est_labels,
bins, est_file)
def compute_information_gain(ann_inter, est_inter, est_file, bins):
ann_times = utils.intervals_to_times(ann_inter)
est_times = utils.intervals_to_times(est_inter)
return mir_eval.beat.information_gain(ann_times, est_times, bins=bins)
def process_track(file_struct, boundaries_id, labels_id, config,
annotator_id=0):
if isinstance(file_struct, six.string_types):
file_struct = io.FileStruct(file_struct)
est_file = file_struct.est_file
ref_file = file_struct.ref_file
assert os.path.basename(est_file)[:-4] == os.path.basename(ref_file)[:-4], "File names are different %s --- %s" % (os.path.basename(est_file)[:-4], os.path.basename(ref_file)[:-4])
if not os.path.isfile(ref_file):
raise NoReferencesError("Reference file %s does not exist. You must "
"have annotated references to run "
"evaluations." % ref_file)
one_res = compute_gt_results(est_file, ref_file, boundaries_id, labels_id,
config, annotator_id=annotator_id)
return one_res
|
MIT License
|
ljvmiranda921/gym-lattice
|
gym_lattice/envs/lattice2d_env.py
|
Lattice2DEnv.reset
|
python
|
def reset(self):
self.state = OrderedDict({(0, 0) : self.seq[0]})
self.actions = []
self.collisions = 0
self.trapped = 0
self.done = len(self.seq) == 1
self.grid = np.zeros(shape=(self.grid_length, self.grid_length), dtype=int)
self.grid[self.midpoint] = POLY_TO_INT[self.seq[0]]
self.last_action = None
return self.grid
|
Resets the environment
|
https://github.com/ljvmiranda921/gym-lattice/blob/8e94972751de96bbd3768d42ab54aa22c1c98822/gym_lattice/envs/lattice2d_env.py#L228-L241
|
import sys
from math import floor
from collections import OrderedDict
import gym
from gym import (spaces, utils, logger)
import numpy as np
from six import StringIO
ACTION_TO_STR = {
0 : 'L', 1 : 'D',
2 : 'U', 3 : 'R'}
POLY_TO_INT = {
'H' : 1, 'P' : -1
}
class Lattice2DEnv(gym.Env):
metadata = {'render.modes': ['human', 'ansi']}
def __init__(self, seq, collision_penalty=-2, trap_penalty=0.5):
try:
if not set(seq.upper()) <= set('HP'):
raise ValueError("%r (%s) is an invalid sequence" % (seq, type(seq)))
self.seq = seq.upper()
except AttributeError:
logger.error("%r (%s) must be of type 'str'" % (seq, type(seq)))
raise
try:
if collision_penalty >= 0:
raise ValueError("%r (%s) must be negative" %
(collision_penalty, type(collision_penalty)))
if not isinstance(collision_penalty, int):
raise ValueError("%r (%s) must be of type 'int'" %
(collision_penalty, type(collision_penalty)))
self.collision_penalty = collision_penalty
except TypeError:
logger.error("%r (%s) must be of type 'int'" %
(collision_penalty, type(collision_penalty)))
raise
try:
if not 0 < trap_penalty < 1:
raise ValueError("%r (%s) must be between 0 and 1" %
(trap_penalty, type(trap_penalty)))
self.trap_penalty = trap_penalty
except TypeError:
logger.error("%r (%s) must be of type 'float'" %
(trap_penalty, type(trap_penalty)))
raise
self.grid_length = 2 * len(seq) + 1
self.midpoint = (len(self.seq), len(self.seq))
self.action_space = spaces.Discrete(4)
self.observation_space = spaces.Box(low=-2, high=1,
shape=(self.grid_length, self.grid_length),
dtype=int)
self.reset()
def step(self, action):
if not self.action_space.contains(action):
raise ValueError("%r (%s) invalid" % (action, type(action)))
self.last_action = action
is_trapped = False
collision = False
x, y = next(reversed(self.state))
adj_coords = self._get_adjacent_coords((x, y))
next_move = adj_coords[action]
idx = len(self.state)
if next_move in self.state:
self.collisions += 1
collision = True
else:
self.actions.append(action)
try:
self.state.update({next_move : self.seq[idx]})
except IndexError:
logger.error('All molecules have been placed! Nothing can be added to the protein chain.')
raise
if set(self._get_adjacent_coords(next_move).values()).issubset(self.state.keys()):
logger.warn('Your agent was trapped! Ending the episode.')
self.trapped += 1
is_trapped = True
grid = self._draw_grid(self.state)
self.done = True if (len(self.state) == len(self.seq) or is_trapped) else False
reward = self._compute_reward(is_trapped, collision)
info = {
'chain_length' : len(self.state),
'seq_length' : len(self.seq),
'collisions' : self.collisions,
'actions' : [ACTION_TO_STR[i] for i in self.actions],
'is_trapped' : is_trapped,
'state_chain' : self.state
}
return (grid, reward, self.done, info)
|
MIT License
|
sciter-sdk/pysciter
|
sciter/value.py
|
value.__bool__
|
python
|
def __bool__(self):
return bool(self.get_value())
|
Value to bool conversion.
|
https://github.com/sciter-sdk/pysciter/blob/f5f00112d635f9d1ee98661eeebb2afcaa56c1f4/sciter/value.py#L162-L165
|
import inspect
import ctypes
import sciter
import sciter.error
import sciter.capi.scdef
import sciter.capi.sctypes
from sciter.capi.scvalue import *
_api = sciter.SciterAPI()
byref = ctypes.byref
_python_types = {VALUE_TYPE.T_UNDEFINED: type(None),
VALUE_TYPE.T_NULL: type(None),
VALUE_TYPE.T_BOOL: bool,
VALUE_TYPE.T_INT: int,
VALUE_TYPE.T_FLOAT: float,
VALUE_TYPE.T_STRING: str,
VALUE_TYPE.T_ARRAY: list,
VALUE_TYPE.T_MAP: dict,
VALUE_TYPE.T_BYTES: bytes,
VALUE_TYPE.T_DURATION: float,
VALUE_TYPE.T_ANGLE: float,
VALUE_TYPE.T_COLOR: int,
}
_value_type_names = {val: name.lower()[2:] for name, val in VALUE_TYPE.__members__.items()}
def _subtype_name(subtype):
return {val: name.split('_')[-1].lower() for name, val in subtype.__members__.items()}
_value_subtypes = {VALUE_TYPE.T_LENGTH: _subtype_name(VALUE_UNIT_TYPE),
VALUE_TYPE.T_DATE: _subtype_name(VALUE_UNIT_TYPE_DATE),
VALUE_TYPE.T_OBJECT: _subtype_name(VALUE_UNIT_TYPE_OBJECT),
VALUE_TYPE.T_STRING: _subtype_name(VALUE_UNIT_TYPE_STRING),
}
class ValueError(sciter.error.SciterError):
def __init__(self, hv_code, script=None):
msg = "Incompatible type" if hv_code == 2 else "Bad parameter"
if script:
msg = msg + " at " + script
super().__init__(msg)
pass
class value():
@classmethod
def parse(cls, json: str, how=VALUE_STRING_CVT_TYPE.CVT_JSON_LITERAL, throw=True):
rv = value()
ok = _api.ValueFromString(rv, json, len(json), how)
if ok != 0 and throw:
raise sciter.value.ValueError(VALUE_RESULT.HV_BAD_PARAMETER, "value.parse")
return rv
@classmethod
def null(cls):
rv = value()
rv.data.t = VALUE_TYPE.T_NULL
return rv
@classmethod
def symbol(cls, name):
rv = value()
rv._assign_str(name, VALUE_UNIT_TYPE_STRING.UT_STRING_SYMBOL)
return rv
@classmethod
def secure_string(cls, val):
rv = value()
rv._assign_str(val, VALUE_UNIT_TYPE_STRING.UT_STRING_SECURE)
return rv
@classmethod
def color(cls, val):
rv = value()
ok = _api.ValueIntDataSet(rv, val, VALUE_TYPE.T_COLOR, 0)
rv._throw_if(ok)
return rv
@classmethod
def duration(cls, val):
rv = value()
ok = _api.ValueFloatDataSet(rv, val, VALUE_TYPE.T_DURATION, 0)
rv._throw_if(ok)
return rv
@classmethod
def angle(cls, val):
rv = value()
ok = _api.ValueFloatDataSet(rv, val, VALUE_TYPE.T_ANGLE, 0)
rv._throw_if(ok)
return rv
def __init__(self, val=None):
super().__init__()
self.data = SCITER_VALUE()
self.ptr = ctypes.pointer(self.data)
self._as_parameter_ = self.ptr
_api.ValueInit(self.ptr)
if val is not None:
self.set_value(val)
pass
def __del__(self):
self.clear()
pass
def __call__(self, *args, **kwargs):
return self.call(*args, **kwargs)
def __repr__(self):
t = VALUE_TYPE(self.data.t)
tname = _value_type_names.get(self.data.t, hex(self.data.t))
if t in (VALUE_TYPE.T_UNDEFINED, VALUE_TYPE.T_NULL):
return "<%s>" % (tname)
if self.data.u != 0:
subtypes = _value_subtypes.get(t)
if subtypes:
tname = tname + ':' + subtypes.get(self.data.u, hex(self.data.u))
return "<%s: %s>" % (tname, str(self))
def __str__(self):
copy = self.copy()
ok = _api.ValueToString(copy, VALUE_STRING_CVT_TYPE.CVT_JSON_LITERAL)
self._throw_if(ok)
return copy.get_value()
|
MIT License
|
adeelmufti/cryptobot
|
app/model/model.py
|
cross_validate
|
python
|
def cross_validate(X, y, model, window):
in_sample_score = []
out_sample_score = []
for i in range(1, len(y)/window):
train_index = np.arange(0, i*window)
test_index = np.arange(i*window, (i+1)*window)
print 'Train index',train_index
print 'Test index', test_index
y_train = y.take(train_index)
y_test = y.take(test_index)
X_train = X.take(train_index, axis=0)
X_test = X.take(test_index, axis=0)
model.fit(X_train, y_train)
in_sample_score.append(model.score(X_train, y_train))
out_sample_score.append(model.score(X_test, y_test))
print 'Window', i
print 'in-sample score', in_sample_score[-1]
print 'out-sample score:', out_sample_score[-1]
print '---'
return model, np.mean(in_sample_score), np.mean(out_sample_score)
|
Cross validates time series data using a shifting window where train data is
always before test data
|
https://github.com/adeelmufti/cryptobot/blob/6cbdfea43af9690d289f92db6b1b3b371abbd2fb/app/model/model.py#L8-L31
|
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
import pickle
import pandas as pd
|
Apache License 2.0
|
learningequality/studio
|
contentcuration/contentcuration/views/nodes.py
|
get_channel_details
|
python
|
def get_channel_details(request, channel_id):
channel = get_object_or_404(Channel.filter_view_queryset(Channel.objects.all(), request.user), id=channel_id)
if not channel.main_tree:
raise Http404
data = get_node_details_cached(channel.main_tree, channel_id=channel_id)
return HttpResponse(json.dumps(data))
|
Generates data for channel contents. Used for look-inside previews
Keyword arguments:
channel_id (str): id of channel to get details from
|
https://github.com/learningequality/studio/blob/92ef5585d9dd4f61d060c01437e233674258cad6/contentcuration/contentcuration/views/nodes.py#L32-L42
|
import json
from datetime import datetime
from django.conf import settings
from django.core.cache import cache
from django.db.models import Max
from django.db.models import Q
from django.http import Http404
from django.http import HttpResponse
from django.http import HttpResponseNotFound
from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework.authentication import SessionAuthentication
from rest_framework.authentication import TokenAuthentication
from rest_framework.decorators import api_view
from rest_framework.decorators import authentication_classes
from rest_framework.decorators import permission_classes
from rest_framework.permissions import AllowAny
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from contentcuration.models import Channel
from contentcuration.models import ContentNode
from contentcuration.models import Task
from contentcuration.tasks import create_async_task
from contentcuration.tasks import getnodedetails_task
from contentcuration.utils.nodes import get_diff
@api_view(["GET"])
@permission_classes((AllowAny,))
|
MIT License
|
kthyng/tracpy
|
tracpy/tracpy_class.py
|
Tracpy.prepare_for_model_step
|
python
|
def prepare_for_model_step(self, tind, nc, flag, xend, yend, zend, j,
nsubstep, T0):
xstart = xend[:, j*self.N]
ystart = yend[:, j*self.N]
zstart = zend[:, j*self.N]
xstart = np.ma.masked_where(flag[:] == 1, xstart)
ystart = np.ma.masked_where(flag[:] == 1, ystart)
zstart = np.ma.masked_where(flag[:] == 1, zstart)
if T0 is not None:
T0 = np.ma.masked_where(flag[:] == 1, T0)
self.uf[0, :, :, :] = self.uf[1, :, :, :].copy()
self.vf[0, :, :, :] = self.vf[1, :, :, :].copy()
self.dzt[0, :, :, :] = self.dzt[1, :, :, :].copy()
self.zrt[0, :, :, :] = self.zrt[1, :, :, :].copy()
self.zwt[0, :, :, :] = self.zwt[1, :, :, :].copy()
if isinstance(self.z0, str):
self.uf[1, :, :, :], self.vf[1, :, :, :], self.dzt[1, :, :, :], self.zrt[1, :, :, :], self.zwt[1, :, :, :] = tracpy.inout.readfields(tind, self.grid, nc, self.z0,
self.zpar, zparuv=self.zparuv)
else:
self.uf[1, :, :, :], self.vf[1, :, :, :], self.dzt[1, :, :, :], self.zrt[1, :, :, :], self.zwt[1, :, :, :] = tracpy.inout.readfields(tind, self.grid, nc)
ufsub = np.ones(self.uf.shape)*np.nan
vfsub = np.ones(self.vf.shape)*np.nan
rp = nsubstep/self.nsubsteps
rm = 1 - rp
ufsub[0, :, :, :] = rm*self.uf[0, :, :, :] + rp*self.uf[1, :, :, :]
vfsub[0, :, :, :] = rm*self.vf[0, :, :, :] + rp*self.vf[1, :, :, :]
rp = (nsubstep+1)/self.nsubsteps
rm = 1 - rp
ufsub[1, :, :, :] = rm*self.uf[0, :, :, :] + rp*self.uf[1, :, :, :]
vfsub[1, :, :, :] = rm*self.vf[0, :, :, :] + rp*self.vf[1, :, :, :]
xstart, ystart = tracpy.tools.convert_indices('py2f', xstart, ystart)
ufsub = np.ma.masked_where(ufsub>1e30, ufsub)
vfsub = np.ma.masked_where(vfsub>1e30, vfsub)
return xstart, ystart, zstart, ufsub, vfsub, T0
|
Already in a step, get ready to actually do step
|
https://github.com/kthyng/tracpy/blob/6cc53006cba58381daa543089787e1e77625f3c6/tracpy/tracpy_class.py#L373-L432
|
import tracpy
import numpy as np
from . import tracmass
from matplotlib.mlab import find
class Tracpy(object):
def __init__(self, currents_filename, grid, nsteps=1, ndays=1, ff=1,
tseas=3600., ah=0., av=0., z0='s', zpar=1, do3d=0, doturb=0,
name='test', dostream=0, N=1,
time_units='seconds since 1970-01-01', dtFromTracmass=None,
zparuv=None, tseas_use=None, savell=True, doperiodic=0,
usespherical=True, ellps='WGS84'):
self.currents_filename = currents_filename
self.grid = grid
self.nsteps = nsteps
self.ndays = ndays
self.ff = ff
self.tseas = float(tseas)
self.ah = ah
self.av = av
self.z0 = z0
self.zpar = zpar
self.do3d = do3d
self.doturb = doturb
self.name = name
self.dostream = dostream
self.N = N
self.time_units = time_units
self.savell = savell
self.doperiodic = doperiodic
self.usespherical = usespherical
if dtFromTracmass is None:
self.dtFromTracmass = tseas
else:
self.N = 1
self.dtFromTracmass = dtFromTracmass
self.nsubsteps = int(self.tseas/self.dtFromTracmass)
if zparuv is None:
self.zparuv = zpar
else:
self.zparuv = zparuv
if tseas_use is None:
self.tseas_use = tseas
self.tout = np.int(np.ceil((ndays*(24*3600))/tseas + 1))
self.tstride = int(self.tseas_use/self.tseas)
self.uf = None
self.vf = None
self.dzt = None
self.zrt = None
self.zwt = None
def prepare_for_model_run(self, date, lon0, lat0):
nc, tinds = tracpy.inout.setupROMSfiles(self.currents_filename, date,
self.ff, self.tout,
self.time_units,
tstride=self.tstride)
if self.usespherical:
xstart0, ystart0, _ = tracpy.tools.interpolate2d(lon0, lat0,
self.grid,
'd_ll2ij')
else:
xstart0, ystart0, _ = tracpy.tools.interpolate2d(lon0, lat0,
self.grid,
'd_xy2ij')
ia = np.ceil(xstart0).astype(int)
ja = np.ceil(ystart0).astype(int)
ind2 = ~np.isnan(ia) * ~np.isnan(ja)
ia = ia[ind2]
ja = ja[ind2]
xstart0 = xstart0[ind2]
ystart0 = ystart0[ind2]
unmasked = np.where(self.grid.mask_rho[ja, ia] == 1)[0]
ia = ia[unmasked]
ja = ja[unmasked]
xstart0 = xstart0[unmasked]
ystart0 = ystart0[unmasked]
if 'ocean_time' in nc.variables:
dates = nc.variables['ocean_time'][:]
elif 'time' in nc.variables:
dates = nc.variables['time'][:]
t0save = dates[tinds[0]]
xend = np.ones((ia.size, (len(tinds)-1)*self.N+1))*np.nan
yend = np.ones((ia.size, (len(tinds)-1)*self.N+1))*np.nan
zend = np.ones((ia.size, (len(tinds)-1)*self.N+1))*np.nan
zp = np.ones((ia.size, (len(tinds)-1)*self.N+1))*np.nan
ttend = np.ones((ia.size, (len(tinds)-1)*self.N+1))
flag = np.zeros((ia.size), dtype=np.int)
lx = self.grid.imt
ly = self.grid.jmt
try:
lk = self.grid.sc_r.size
except:
lk = 2
self.uf = np.ones((2, lk-1, ly, lx-1))*np.nan
self.vf = np.ones((2, lk-1, ly-1, lx))*np.nan
self.dzt = np.ones((2, lk-1, ly, lx))*np.nan
self.zrt = np.ones((2, lk-1, ly, lx))*np.nan
self.zwt = np.ones((2, lk, ly, lx))*np.nan
if isinstance(self.z0, str):
self.uf[1, :, :, :], self.vf[1, :, :, :], self.dzt[1, :, :, :], self.zrt[1, :, :, :], self.zwt[1, :, :, :] = tracpy.inout.readfields(tinds[0], self.grid, nc, self.z0,
self.zpar, zparuv=self.zparuv)
else:
self.uf[1, :, :, :], self.vf[1, :, :, :], self.dzt[1, :, :, :], self.zrt[1, :, :, :], self.zwt[1, :, :, :] = tracpy.inout.readfields(tinds[0], self.grid, nc)
if isinstance(self.z0, str):
ka = np.ones(ia.size)
zstart0 = np.ones(ia.size)*0.5
else:
ka = np.ones(ia.size, dtype=int)*-999
zstart0 = np.ones(ia.size)*np.nan
if self.zpar == 'fromMSL':
raise NotImplementedError("zpar==''fromMSL'' not implemented\
yet...")
elif self.zpar == 'fromZeta':
for i in range(ia.size):
ind = (self.zwt[1, :, ja[i], ia[i]] <= self.z0[i])
ka[i] = find(ind)[-1]
if (self.z0[i] != self.zwt[1, ka[i], ja[i], ia[i]]) and (ka[i] != self.grid.km):
ka[i] = ka[i]+1
zstart0[i] = ka[i] - abs(self.z0[i]-self.zwt[1, ka[i], ja[i], ia[i]]) / abs(self.zwt[1, ka[i]-1, ja[i], ia[i]] -
self.zwt[1, ka[i], ja[i], ia[i]])
xend[:, 0] = xstart0
yend[:, 0] = ystart0
zend[:, 0] = zstart0
return tinds, nc, t0save, xend, yend, zend, zp, ttend, flag
|
MIT License
|
necaris/python3-openid
|
openid/yadis/xri.py
|
rootAuthority
|
python
|
def rootAuthority(xri):
if xri.startswith('xri://'):
xri = xri[6:]
authority = xri.split('/', 1)[0]
if authority[0] == '(':
root = authority[:authority.index(')') + 1]
elif authority[0] in XRI_AUTHORITIES:
root = authority[0]
else:
segments = authority.split('!')
segments = reduce(list.__add__, [s.split('*') for s in segments])
root = segments[0]
return XRI(root)
|
Return the root authority for an XRI.
Example::
rootAuthority("xri://@example") == "xri://@"
@type xri: unicode
@returntype: unicode
|
https://github.com/necaris/python3-openid/blob/5c7f8f8fa4d2a0124516046ab2f84130eb8c10cb/openid/yadis/xri.py#L78-L107
|
import re
from functools import reduce
from openid import codecutil
XRI_AUTHORITIES = ['!', '=', '@', '+', '$', '(']
def identifierScheme(identifier):
if identifier.startswith('xri://') or (identifier and
identifier[0] in XRI_AUTHORITIES):
return "XRI"
else:
return "URI"
def toIRINormal(xri):
if not xri.startswith('xri://'):
xri = 'xri://' + xri
return escapeForIRI(xri)
_xref_re = re.compile(r'\((.*?)\)')
def _escape_xref(xref_match):
xref = xref_match.group()
xref = xref.replace('/', '%2F')
xref = xref.replace('?', '%3F')
xref = xref.replace('#', '%23')
return xref
def escapeForIRI(xri):
xri = xri.replace('%', '%25')
xri = _xref_re.sub(_escape_xref, xri)
return xri
def toURINormal(xri):
return iriToURI(toIRINormal(xri))
def iriToURI(iri):
if isinstance(iri, bytes):
iri = str(iri, encoding="utf-8")
return iri.encode('ascii', errors='oid_percent_escape').decode()
def providerIsAuthoritative(providerID, canonicalID):
lastbang = canonicalID.rindex('!')
parent = canonicalID[:lastbang]
return parent == providerID
|
Apache License 2.0
|
ambron60/l-system-drawing
|
venv/Lib/site-packages/pip-9.0.1-py3.5.egg/pip/_vendor/appdirs.py
|
_get_win_folder_from_registry
|
python
|
def _get_win_folder_from_registry(csidl_name):
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
|
This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
|
https://github.com/ambron60/l-system-drawing/blob/3a4ecface1d862b87acd58ff2d5303cd4475370b/venv/Lib/site-packages/pip-9.0.1-py3.5.egg/pip/_vendor/appdirs.py#L408-L426
|
__version_info__ = (1, 4, 0)
__version__ = '.'.join(map(str, __version_info__))
import sys
import os
PY3 = sys.version_info[0] == 3
if PY3:
unicode = str
if sys.platform.startswith('java'):
import platform
os_name = platform.java_ver()[3][0]
if os_name.startswith('Windows'):
system = 'win32'
elif os_name.startswith('Mac'):
system = 'darwin'
else:
system = 'linux2'
else:
system = sys.platform
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
if system == "win32":
if appauthor is None:
appauthor = appname
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('~/Library/Application Support/')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('/Library/Application Support')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_DATA_DIRS',
os.pathsep.join(['/usr/local/share', '/usr/share']))
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version)
return path
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
if system in ["win32", "darwin"]:
path = site_data_dir(appname, appauthor)
if appname and version:
path = os.path.join(path, version)
else:
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
if opinion:
path = os.path.join(path, "Cache")
elif system == 'darwin':
path = os.path.expanduser('~/Library/Caches')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
if system == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Logs'),
appname)
elif system == "win32":
path = user_data_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "Logs")
else:
path = user_cache_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "log")
if appname and version:
path = os.path.join(path, version)
return path
class AppDirs(object):
def __init__(self, appname, appauthor=None, version=None, roaming=False,
multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_data_dir(self):
return user_data_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_data_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_config_dir(self):
return user_config_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_config_dir(self):
return site_config_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor,
version=self.version)
|
MIT License
|
mosdef-hub/gmso
|
gmso/abc/abstract_site.py
|
Site.name
|
python
|
def name(self) -> str:
return self.__dict__.get("name_")
|
Return the name of the site.
|
https://github.com/mosdef-hub/gmso/blob/0f2ef1e79d43579da70e472c008e7fd25a9b2dee/gmso/abc/abstract_site.py#L68-L70
|
import warnings
from typing import Any, ClassVar, Optional, Sequence, TypeVar, Union
import numpy as np
import unyt as u
from pydantic import Field, StrictInt, StrictStr, validator
from unyt.exceptions import InvalidUnitOperation
from gmso.abc.gmso_base import GMSOBase
from gmso.exceptions import GMSOError
PositionType = Union[Sequence[float], np.ndarray, u.unyt_array]
SiteT = TypeVar("SiteT", bound="Site")
BASE_DOC_ATTR = "__base_doc__"
FIELDS_IN_DOCSTRING = "alias_to_fields"
def default_position():
return u.unyt_array([np.nan] * 3, u.nm)
class Site(GMSOBase):
__iterable_attributes__: ClassVar[set] = {
"label",
"residue_name",
"residue_number",
}
__base_doc__: ClassVar[
str
] = """An interaction site object in the topology hierarchy.
Site is the object that represents any general interaction site in a molecular simulation.
Sites have been designed to be as general as possible, making no assumptions about representing atoms or beads, or
having mass or charge. That is, a Site can represent an atom in an atomistic system,
a bead in a coarse-grained system, and much more.
Notes
-----
The label attribute for a site takes its meaning when used with some sort of container (like topology)
such that a label for a site can then be used to group sites together. The rules for defining a site label
and their meaning the responsibility of the container where the sites will reside.
"""
name_: str = Field(
"",
description="Name of the site, defaults to class name",
)
label_: str = Field("", description="Label to be assigned to the site")
residue_number_: Optional[StrictInt] = Field(
None, description="Residue number for the site"
)
residue_name_: Optional[StrictStr] = Field(
None, description="Residue label for the site"
)
position_: PositionType = Field(
default_factory=default_position,
description="The 3D Cartesian coordinates of the position of the site",
)
@property
|
MIT License
|
yaqiangcao/cloops
|
cLoops/io.py
|
parseRawBedpe2
|
python
|
def parseRawBedpe2(fs, fout, cs, cut, logger):
chroms = {}
cfs = []
ds = []
i, j, = 0, 0
for f in fs:
r = "Parsing PETs from %s, requiring initial distance cutoff > %s" % (
f, cut)
logger.info(r)
if f.endswith(".gz"):
of = gzip.open(f, "rb")
else:
of = open(f)
for line in of:
i += 1
if i % 100000 == 0:
cFlush("%s PETs processed from %s" % (i, f))
line = line.split("\n")[0].split("\t")
if "*" in line and "-1" in line:
continue
if len(line) < 6:
continue
try:
pet = PET(line)
except:
continue
if pet.chromA != pet.chromB:
continue
if len(cs) > 0 and (not (pet.chromA in cs and pet.chromB in cs)):
continue
if cut > 0 and pet.distance < cut:
continue
if pet.chromA not in chroms:
cf = os.path.join(fout,
"%s-%s" % (pet.chromA, pet.chromB) + ".txt")
chroms[pet.chromA] = {"f": open(cf, "w"), "c": 0}
cfs.append(cf)
nline = [chroms[pet.chromA]["c"], pet.cA, pet.cB]
chroms[pet.chromA]["f"].write("\t".join(map(str, nline)) + "\n")
chroms[pet.chromA]["c"] += 1
j += 1
print()
del(chroms)
r = "Totaly %s PETs from %s, in which %s cis PETs" % (i, ",".join(fs), j)
logger.info(r)
return cfs
|
Get the cis-PETs, organized by chromosomes. Input could be mixed PETs in bedpe.gz or bedpe. Also change read id to numbers to minize memory usage.
@param fs: bedpe files of replicates, could be .bedpe or .bedpe.gz
@param fout: output prefix, the name for directory
@param cs: chroms that wanted, list like ["chr1","chr2"]
|
https://github.com/yaqiangcao/cloops/blob/12472a63d6ecaa190f79c22140e860d2aae7c827/cLoops/io.py#L132-L189
|
__author__ = "CAO Yaqiang"
__date__ = ""
__modified__ = ""
__email__ = "caoyaqiang0410@gmail.com"
import os, random, gzip
import joblib
import numpy as np
from cLoops.utils import callSys
from cLoops.utils import cFlush
class PET(object):
__slots__ = [
"chromA", "chromB", "startA", "startB", "endA", "endB", "strandA",
"strandB", "cA", "cB", "distance"
]
def __init__(self, d):
self.chromA = d[0]
self.startA = int(d[1])
self.endA = int(d[2])
self.strandA = d[8]
self.chromB = d[3]
self.startB = int(d[4])
self.endB = int(d[5])
self.strandB = d[9]
if self.chromA == self.chromB:
if self.startA + self.endA > self.startB + self.endB:
self.startA, self.startB = self.startB, self.startA
self.endA, self.endB = self.endB, self.endA
self.strandA, self.strandB = self.strandB, self.strandA
self.cA = (self.startA + self.endA) / 2
self.cB = (self.startB + self.endB) / 2
self.distance = self.cB - self.cA
else:
self.cA, self.cB, self.distance = None, None, None
def parseRawBedpe(fs, fout, cs, cut, logger):
chroms = {}
cfs = []
ds = []
i, j, = 0, 0
for f in fs:
r = "Parsing PETs from %s, requiring initial distance cutoff > %s" % (
f, cut)
logger.info(r)
if f.endswith(".gz"):
of = gzip.open(f, "rb")
else:
of = open(f)
for line in of:
i += 1
if i % 100000 == 0:
cFlush("%s PETs processed from %s" % (i, f))
line = line.split("\n")[0].split("\t")
if "*" in line and "-1" in line:
continue
if len(line) < 6:
continue
try:
pet = PET(line)
except:
continue
if pet.chromA != pet.chromB:
continue
if len(cs) > 0 and (not (pet.chromA in cs)):
continue
if cut > 0 and pet.distance < cut:
continue
if pet.chromA not in chroms:
cf = os.path.join(fout,
"%s-%s" % (pet.chromA, pet.chromB) + ".txt")
chroms[pet.chromA] = {"f": open(cf, "w"), "c": 0, "r": set()}
cfs.append(cf)
if (pet.cA, pet.cB) in chroms[pet.chromA]["r"]:
continue
nline = [chroms[pet.chromA]["c"], pet.cA, pet.cB]
chroms[pet.chromA]["f"].write("\t".join(map(str, nline)) + "\n")
chroms[pet.chromA]["c"] += 1
chroms[pet.chromA]["r"].add((pet.cA, pet.cB))
j += 1
if pet.strandA != pet.strandB:
ds.append(pet.distance)
print()
del chroms
r = "Totaly %s PETs from %s, in which %s cis PETs" % (i, ",".join(fs), j)
logger.info(r)
return cfs, ds
|
MIT License
|
kyrus/pymyo
|
modules/geoip/command.py
|
parse_record
|
python
|
def parse_record(pymyo, record):
for key, value in record.items():
pymyo.output("%s :\t%s"%(key, value))
|
Pretty print the record data
|
https://github.com/kyrus/pymyo/blob/0c5c63813b176484083cc50dc0d072264bf7a9ef/modules/geoip/command.py#L69-L74
|
import os
import socket
import pygeoip
MODULE_LOCATION = os.path.abspath(os.path.dirname(__file__))
__author__ = "rich@kyr.us"
__version__ = 1.0
__updated__ = "26/09/2012"
__help__ = "Module for looking up the geoip location of a host"
__alias__ = ["geo"]
def Command(pymyo, name, *args):
target = args[0]
if target == "update":
return update(pymyo)
try:
socket.inet_pton(socket.AF_INET, target)
gi = pygeoip.GeoIP(os.path.join(MODULE_LOCATION, "GeoLiteCity.dat"))
record = gi.record_by_addr(args[0])
pymyo.output("ipv4 :\t%s"%(args[0]))
parse_record(pymyo, record)
return None
except Exception, err:
pass
try:
gi = pygeoip.GeoIP(os.path.join(MODULE_LOCATION, "GeoLiteCity.dat"))
record = gi.record_by_name(args[0])
pymyo.output("hostname :\t%s"%(args[0]) )
parse_record(pymyo,record)
return None
except Exception, err:
pymyo.error("Unable to use geoip lookup on %s"%(target))
|
BSD 3-Clause New or Revised License
|
pypperoni/pypperoni
|
cmake.py
|
CMakeFileGenerator.add_directory
|
python
|
def add_directory(self, path):
cwd = os.getcwd()
path = os.path.abspath(path)
os.chdir(path)
try:
for root, _, filenames in os.walk('.'):
for filename in filenames:
if filename.endswith('.py'):
self.add_file(os.path.join(root, filename))
finally:
os.chdir(cwd)
|
Adds all Python files (.py) in a directory to modules.
For example,
dir1/
file1.py
file2.py
will add the modules "file1" and "file2"
|
https://github.com/pypperoni/pypperoni/blob/d3bee7a5eecbc4b94227b39f0ec8a0d07bf4e09c/cmake.py#L76-L97
|
from .files import ConditionalFile, FileContainer
from .module import Module, PackageModule, write_modules_file
from .modulereducer import reduce_modules
from .util import safePrint
from threading import Thread, Lock
from queue import Queue, Empty
import traceback
import hashlib
import math
import sys
import os
PYPPERONI_ROOT = os.path.abspath(os.path.dirname(__file__))
PYTHON_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'python'))
class CMakeFileGenerator:
def __init__(self, project, outputdir='build', nthreads=4):
self.project = project
self.outputdir = outputdir
self.nthreads = nthreads
self.modules = {}
self.__files = []
self.cmake_in_file = os.path.join(PYPPERONI_ROOT, 'cmake.in')
self.add_directory(os.path.join(PYTHON_ROOT, 'Lib'))
self.generate_codecs_index()
def add_file(self, filename, name=None):
with open(filename, 'rb') as f:
data = f.read()
is_pkg = False
if name is None:
name = os.path.normpath(filename.rsplit('.', 1)[0]).replace(os.sep, '.')
if name.endswith('.__init__'):
name = name[:-9]
is_pkg = True
self.add_module(name, data, is_pkg)
def add_module(self, name, data, is_pkg=False):
if is_pkg:
obj = PackageModule(name, data)
else:
obj = Module(name, data)
self.modules[name] = obj
|
MIT License
|
bloodaxe/pytorch-toolbelt
|
pytorch_toolbelt/utils/catalyst/visualization.py
|
ShowPolarBatchesCallback.__init__
|
python
|
def __init__(
self,
visualize_batch: Callable,
metric: str = "loss",
minimize: bool = True,
min_delta: float = 1e-6,
targets="tensorboard",
):
super().__init__(CallbackOrder.Logging)
assert isinstance(targets, (list, str))
self.best_score = None
self.best_input = None
self.best_output = None
self.worst_score = None
self.worst_input = None
self.worst_output = None
self.target_metric = metric
self.num_bad_epochs = 0
self.is_better = None
self.visualize_batch = visualize_batch
self.targets = [targets] if isinstance(targets, str) else targets
if minimize:
self.is_better = lambda score, best: score <= (best - min_delta)
self.is_worse = lambda score, worst: score >= (worst + min_delta)
else:
self.is_better = lambda score, best: score >= (best + min_delta)
self.is_worse = lambda score, worst: score <= (worst - min_delta)
|
:param visualize_batch: Visualization function that must return list of images.
It's takes two arguments: (batch input, predicted output).
:param metric:
:param minimize:
:param min_delta:
:param targets: Str 'tensorboard' or 'matplotlib, or ['tensorboard', 'matplotlib']
|
https://github.com/bloodaxe/pytorch-toolbelt/blob/1728059f9d2a799cc91cb6e8bf230e5421b6888f/pytorch_toolbelt/utils/catalyst/visualization.py#L39-L78
|
import warnings
from typing import Callable, Optional, List, Union, Dict, Iterable
import cv2
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn.functional as F
from catalyst.dl import Callback, CallbackOrder, IRunner, CallbackNode
from catalyst.dl.callbacks import TensorboardLogger
from catalyst.contrib.tools.tensorboard import SummaryWriter
from pytorch_toolbelt.utils import render_figure_to_tensor
from pytorch_toolbelt.utils.distributed import all_gather
from ..torch_utils import rgb_image_from_tensor, to_numpy, image_to_tensor
__all__ = [
"get_tensorboard_logger",
"ShowPolarBatchesCallback",
"ShowEmbeddingsCallback",
"UMAPCallback",
"draw_binary_segmentation_predictions",
"draw_semantic_segmentation_predictions",
]
def get_tensorboard_logger(runner: IRunner, tensorboard_callback_name: str = "_tensorboard") -> SummaryWriter:
tb_callback: TensorboardLogger = runner.callbacks[tensorboard_callback_name]
if runner.loader_name not in tb_callback.loggers:
raise RuntimeError(f"Cannot find Tensorboard logger for loader {runner.loader_name}")
return tb_callback.loggers[runner.loader_name]
class ShowPolarBatchesCallback(Callback):
|
MIT License
|
geoscienceaustralia/agdc
|
src/abstract_ingester/collection.py
|
Lock.__exit__
|
python
|
def __exit__(self, exc_type, exc_value, exc_traceback):
for object_to_unlock in self.lock_list:
self.datacube.unlock_object(object_to_unlock)
|
Auto-called on 'with' statement exit.
Releases the locks whether or not there has been an
exception. Implicitly returns None which causes any
exception to be re-raised.
|
https://github.com/geoscienceaustralia/agdc/blob/2e22c6bdd9305555db3615305ff6a5df6219cd51/src/abstract_ingester/collection.py#L484-L493
|
import logging
import os
import time
import shutil
from agdc.cube_util import DatasetError, create_directory
from tile_contents import TileContents
from acquisition_record import AcquisitionRecord
from ingest_db_wrapper import IngestDBWrapper
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
class Collection(object):
def __init__(self, datacube):
self.datacube = datacube
self.db = IngestDBWrapper(datacube.db_connection)
self.new_bands = self.__reindex_bands(datacube.bands)
self.transaction_stack = []
self.temp_tile_directory = os.path.join(self.datacube.tile_root,
'ingest_temp',
self.datacube.process_id)
create_directory(self.temp_tile_directory)
def cleanup(self):
shutil.rmtree(self.temp_tile_directory, ignore_errors=True)
@staticmethod
def get_dataset_key(dataset):
derived_levels = {'PQA', 'FC'}
satellite = dataset.get_satellite_tag()
sensor = dataset.get_sensor_name()
level = dataset.get_processing_level()
if level in derived_levels:
satellite = 'DERIVED'
sensor = level
return (satellite, sensor, level)
def get_temp_tile_directory(self):
return self.temp_tile_directory
def check_metadata(self, dataset):
self.__check_satellite_and_sensor(dataset)
self.__check_processing_level(dataset)
self.__check_bands(dataset)
def transaction(self, db=None):
return Transaction(self.db if db is None else db,
self.transaction_stack)
def lock_datasets(self, dataset_list):
lock_list = ['Dataset-' + str(dataset_id)
for dataset_id in dataset_list]
return Lock(self.datacube, lock_list)
def create_acquisition_record(self, dataset):
return AcquisitionRecord(self, dataset)
def create_tile_contents(self, tile_type_id, tile_footprint,
band_stack):
tile_type_info = self.datacube.tile_type_dict[tile_type_id]
tile_contents = TileContents(self.datacube.tile_root, tile_type_info,
tile_footprint, band_stack)
return tile_contents
def current_transaction(self):
return self.transaction_stack[-1]
def mark_tile_for_removal(self, tile_pathname):
self.current_transaction().mark_tile_for_removal(tile_pathname)
def mark_tile_for_creation(self, tile_contents):
self.current_transaction().mark_tile_for_creation(tile_contents)
@staticmethod
def __reindex_bands(bands):
new_bands = {}
for (tile_type, band_dict) in bands.items():
for ((satellite, sensor), sensor_dict) in band_dict.items():
for (file_number, band_info) in sensor_dict.items():
dataset_key = (satellite, sensor, band_info['level_name'])
new_bands.setdefault(dataset_key, {})
new_bands[dataset_key].setdefault(tile_type, {})
new_bands[dataset_key][tile_type][file_number] = band_info
return new_bands
def __check_satellite_and_sensor(self, dataset):
satellite_id = self.db.get_satellite_id(dataset.get_satellite_tag())
if satellite_id is None:
raise DatasetError("Unknown satellite tag: '%s'" %
dataset.get_satellite_tag())
sensor_id = self.db.get_sensor_id(satellite_id,
dataset.get_sensor_name())
if sensor_id is None:
msg = ("Unknown satellite and sensor pair: '%s', '%s'" %
(dataset.get_satellite_tag(), dataset.get_sensor_name()))
raise DatasetError(msg)
def __check_processing_level(self, dataset):
level_id = self.db.get_level_id(dataset.get_processing_level())
if level_id is None:
raise DatasetError("Unknown processing level: '%s'" %
dataset.get_processing_level())
def __check_bands(self, dataset):
try:
dataset_bands = self.new_bands[self.get_dataset_key(dataset)]
except KeyError:
raise DatasetError('No tile types for this dataset.')
for tile_type_bands in dataset_bands.values():
for band_info in tile_type_bands.values():
dataset.find_band_file(band_info['file_pattern'])
class Transaction(object):
def __init__(self, db, tr_stack=None):
self.db = db
self.tr_stack = tr_stack
self.tile_remove_list = None
self.tile_create_list = None
self.previous_commit_mode = None
def __enter__(self):
self.tile_remove_list = []
self.tile_create_list = []
self.previous_commit_mode = self.db.turn_off_autocommit()
if self.tr_stack is not None:
self.tr_stack.append(self)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
if exc_type is None:
self.__commit()
else:
self.__rollback()
self.tile_remove_list = None
self.tile_create_list = None
self.db.restore_commit_mode(self.previous_commit_mode)
if self.tr_stack is not None:
tr = self.tr_stack.pop()
assert tr is self, "Unexpected value on transaction stack."
def __commit(self):
for tile_contents in self.tile_create_list:
tile_contents.make_permanent()
self.db.commit()
tile_create_set = {t.get_output_path()
for t in self.tile_create_list}
for tile_pathname in self.tile_remove_list:
if tile_pathname not in tile_create_set:
if os.path.isfile(tile_pathname):
os.remove(tile_pathname)
def __rollback(self):
for tile_contents in self.tile_create_list:
tile_contents.remove()
self.db.rollback()
def mark_tile_for_removal(self, tile_pathname):
if tile_pathname not in self.tile_remove_list:
self.tile_remove_list.append(tile_pathname)
def mark_tile_for_creation(self, tile_contents):
self.tile_create_list.append(tile_contents)
class Lock(object):
DEFAULT_WAIT = 10
DEFAULT_RETRIES = 6
def __init__(self,
datacube,
lock_list,
wait=DEFAULT_WAIT,
retries=DEFAULT_RETRIES):
self.datacube = datacube
self.lock_list = sorted(lock_list)
self.wait = wait
self.retries = retries
def __enter__(self):
for dummy_tries in range(self.retries + 1):
try:
self.__acquire_locks(self.lock_list)
break
except LockError:
time.sleep(self.wait)
else:
raise LockError(("Unable to lock objects after %s tries: " %
self.retries) +
self.lock_list)
return self
|
BSD 3-Clause New or Revised License
|
pwwang/datar
|
datar/dplyr/group_data.py
|
group_size
|
python
|
def group_size(_data: DataFrame) -> List[int]:
return [_data.shape[0]]
|
Gives the size of each group
|
https://github.com/pwwang/datar/blob/4e2b5db026ad35918954576badef9951928c0cb1/datar/dplyr/group_data.py#L117-L119
|
from typing import List
from pandas import DataFrame
from pipda import register_verb
from pipda.utils import CallingEnvs
from ..core.grouped import DataFrameGroupBy
from ..base import setdiff
@register_verb(DataFrame)
def group_data(_data: DataFrame) -> DataFrame:
rows = list(range(_data.shape[0]))
return DataFrame({"_rows": [rows]})
@group_data.register(DataFrameGroupBy)
def _(_data: DataFrameGroupBy) -> DataFrame:
return _data._group_data
@register_verb(DataFrame)
def group_keys(_data: DataFrame) -> DataFrame:
return DataFrame(index=[0])
@group_keys.register(DataFrameGroupBy)
def _(_data: DataFrameGroupBy) -> DataFrame:
return (
group_data(_data, __calling_env=CallingEnvs.REGULAR).iloc[:, :-1].copy()
)
@register_verb(DataFrame)
def group_rows(_data: DataFrame) -> List[List[int]]:
rows = list(range(_data.shape[0]))
return [rows]
@group_rows.register(DataFrameGroupBy)
def _(_data: DataFrame) -> List[List[int]]:
return group_data(_data, __calling_env=CallingEnvs.REGULAR)[
"_rows"
].tolist()
@register_verb(DataFrame)
def group_indices(_data: DataFrame) -> List[int]:
return [0] * _data.shape[0]
@group_indices.register(DataFrameGroupBy)
def _(_data: DataFrameGroupBy) -> List[int]:
ret = {}
for row in group_data(
_data, __calling_env=CallingEnvs.REGULAR
).itertuples():
for index in row[-1]:
ret[index] = row.Index
return [ret[key] for key in sorted(ret)]
@register_verb(DataFrame)
def group_vars(_data: DataFrame) -> List[str]:
index = _data.attrs.get("_group_index", None)
if index is None:
return []
gdata = _data.attrs["_group_data"]
return setdiff(gdata.columns, ["_rows"], __calling_env=CallingEnvs.REGULAR)
@group_vars.register(DataFrameGroupBy)
def _(_data: DataFrameGroupBy) -> List[str]:
return _data.attrs["_group_vars"]
groups = group_vars
group_cols = group_vars
@register_verb(DataFrame)
|
MIT License
|
aakhundov/tf-example-models
|
models/tf_cnn.py
|
conv_pool_layer
|
python
|
def conv_pool_layer(x, in_channels, out_channels, layer_name):
with tf.name_scope(layer_name):
weights = tf.Variable(tf.truncated_normal([5, 5, in_channels, out_channels], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[out_channels]))
conv = tf.nn.relu(tf.nn.conv2d(x, weights, strides=[1, 1, 1, 1], padding='SAME') + biases)
pool = tf.nn.max_pool(conv, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
return pool
|
Creates a single convpool layer of a NN
|
https://github.com/aakhundov/tf-example-models/blob/40b32991a76cb8d7201f9a5851789847db310b79/models/tf_cnn.py#L41-L56
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
LOCAL_FOLDER = "MNIST_data/"
IMAGE_SIZE = 28
IMAGE_PIXELS = 784
NUM_CLASSES = 10
CONVPOOL1_CHANNELS = 32
CONVPOOL2_CHANNELS = 64
DENSE_HIDDEN_UNITS = 1024
LEARNING_RATE = 1e-4
TRAINING_STEPS = 2000
BATCH_SIZE = 100
def dense_layer(x, in_dim, out_dim, layer_name, act):
with tf.name_scope(layer_name):
weights = tf.Variable(
tf.truncated_normal(
[in_dim, out_dim],
stddev=1.0 / tf.sqrt(float(out_dim))
), name="weights"
)
biases = tf.Variable(tf.zeros([out_dim]), name="biases")
layer = act(tf.matmul(x, weights) + biases, name="activations")
return layer
|
Apache License 2.0
|
openstack/networking-hyperv
|
networking_hyperv/neutron/agent/layer2.py
|
Layer2Agent._load_physical_network_mappings
|
python
|
def _load_physical_network_mappings(self, phys_net_vswitch_mappings):
for mapping in phys_net_vswitch_mappings:
parts = mapping.split(':')
if len(parts) != 2:
LOG.debug('Invalid physical network mapping: %s', mapping)
else:
pattern = re.escape(parts[0].strip()).replace('\\*', '.*')
pattern = pattern + '$'
vswitch = parts[1].strip()
self._physical_network_mappings[pattern] = vswitch
|
Load all the information regarding the physical network.
|
https://github.com/openstack/networking-hyperv/blob/7619f8c9974dcceeb4f04b43c30564a753c357ec/networking_hyperv/neutron/agent/layer2.py#L134-L144
|
import abc
import collections
import re
import eventlet
from eventlet import tpool
from neutron.agent import rpc as agent_rpc
from neutron_lib.agent import topics
from neutron_lib import constants as n_const
from neutron_lib import rpc as n_rpc
from os_win import constants as os_win_const
from os_win import exceptions as os_win_exc
from oslo_concurrency import lockutils
from oslo_log import log as logging
from oslo_service import loopingcall
import six
from networking_hyperv.common.i18n import _, _LI, _LE
from networking_hyperv.neutron import _common_utils as c_util
from networking_hyperv.neutron.agent import base as base_agent
from networking_hyperv.neutron import config
from networking_hyperv.neutron import constants
from networking_hyperv.neutron import exception
LOG = logging.getLogger(__name__)
CONF = config.CONF
_synchronized = lockutils.synchronized_with_prefix('n-hv-agent-')
_port_synchronized = c_util.get_port_synchronized_decorator('n-hv-agent-')
class Layer2Agent(base_agent.BaseAgent):
_AGENT_TOPIC = n_const.L2_AGENT_TOPIC
_OVS_EXT_NAME_RE = re.compile(r'.*((open.?v.?switch)|(ovs)).*',
re.IGNORECASE)
def __init__(self):
super(Layer2Agent, self).__init__()
self._network_vswitch_map = {}
self._added_ports = set()
self._removed_ports = set()
self._bound_ports = set()
self._unbound_ports = set()
self._physical_network_mappings = collections.OrderedDict()
self._consumers = []
self._event_callback_pairs = []
self._setup()
self._set_agent_state()
self._setup_rpc()
def _setup(self):
agent_config = CONF.get("AGENT", {})
self._worker_count = agent_config.get('worker_count')
self._phys_net_map = agent_config.get(
'physical_network_vswitch_mappings', [])
self._local_network_vswitch = agent_config.get(
'local_network_vswitch')
self._load_physical_network_mappings(self._phys_net_map)
self._validate_vswitches()
self._endpoints.append(self)
self._event_callback_pairs.extend([
(self._utils.EVENT_TYPE_CREATE, self._process_added_port_event),
(self._utils.EVENT_TYPE_DELETE, self._process_removed_port_event)
])
tpool.set_num_threads(self._worker_count)
def _setup_qos_extension(self):
pass
def _setup_rpc(self):
self._plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
self._state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self._client = n_rpc.get_client(self.target)
self._consumers.extend([
[topics.PORT, topics.UPDATE], [topics.NETWORK, topics.DELETE],
[topics.PORT, topics.DELETE]
])
self._connection = agent_rpc.create_consumers(
self._endpoints, self._topic, self._consumers,
start_listening=False
)
self._setup_qos_extension()
self._connection.consume_in_threads()
report_interval = CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def _process_added_port_event(self, port_name):
LOG.info("Hyper-V VM vNIC added: %s", port_name)
self._added_ports.add(port_name)
def _process_removed_port_event(self, port_name):
LOG.info("Hyper-V VM vNIC removed: %s", port_name)
self._removed_ports.add(port_name)
|
Apache License 2.0
|
purestorage-openconnect/py-pure-client
|
pypureclient/flasharray/FA_2_0/models/volume_snapshot_transfer_get_response.py
|
VolumeSnapshotTransferGetResponse.__init__
|
python
|
def __init__(
self,
more_items_remaining=None,
total_item_count=None,
items=None,
total=None,
):
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if items is not None:
self.items = items
if total is not None:
self.total = total
|
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
items (list[VolumeSnapshotTransfer]): Returns a list of all items after filtering. The values are displayed for each name where meaningful. If `total_only=true`, the `items` list will be empty.
total (list[VolumeSnapshotTransfer]): The aggregate value of all items after filtering. Where it makes more sense, the average value is displayed instead. The values are displayed for each field where meaningful.
|
https://github.com/purestorage-openconnect/py-pure-client/blob/2d9fdef0b73321cea9613e7d1eb881b42845099b/pypureclient/flasharray/FA_2_0/models/volume_snapshot_transfer_get_response.py#L49-L70
|
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_0 import models
class VolumeSnapshotTransferGetResponse(object):
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'items': 'list[VolumeSnapshotTransfer]',
'total': 'list[VolumeSnapshotTransfer]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'items': 'items',
'total': 'total'
}
required_args = {
}
|
BSD 2-Clause Simplified License
|
alecwangcq/grasp
|
utils/prune_utils.py
|
ComputeMatGrad.linear
|
python
|
def linear(input, grad_output, layer):
with torch.no_grad():
if layer.bias is not None:
input = torch.cat([input, input.new(input.size(0), 1).fill_(1)], 1)
input = input.unsqueeze(1)
grad_output = grad_output.unsqueeze(2)
grad = torch.bmm(grad_output, input)
return grad
|
:param input: batch_size * input_dim
:param grad_output: batch_size * output_dim
:param layer: [nn.module] output_dim * input_dim
:return: batch_size * output_dim * (input_dim + [1 if with bias])
|
https://github.com/alecwangcq/grasp/blob/f17d87ac537bfce476ac2424171a8b6eb56499db/utils/prune_utils.py#L89-L102
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .common_utils import try_contiguous
def _fetch_weights_collections(scores, _prev_masks):
weights = []
eps = 1e-10
if _prev_masks is None:
for m in scores.keys():
if isinstance(m, (nn.Linear, nn.Conv2d)):
w = scores[m].view(-1).data.cpu().numpy()
weights.extend(w.tolist())
else:
for m in scores.keys():
if isinstance(m, (nn.Linear, nn.Conv2d)):
w = scores[m]
w = filter_weights(w, _prev_masks[m])
weights.extend(w)
return weights
def _extract_patches(x, kernel_size, stride, padding):
if padding[0] + padding[1] > 0:
x = F.pad(x, (padding[1], padding[1], padding[0],
padding[0])).data
x = x.unfold(2, kernel_size[0], stride[0])
x = x.unfold(3, kernel_size[1], stride[1])
x = x.transpose_(1, 2).transpose_(2, 3).contiguous()
x = x.view(
x.size(0), x.size(1), x.size(2),
x.size(3) * x.size(4) * x.size(5))
return x
def filter_weights(weights, mask):
w = weights.view(-1).tolist()
m = mask.view(-1).tolist()
res = []
for idx in range(len(m)):
if m[idx] > 0.5:
res.append(w[idx])
return res
def _extract_patches(x, kernel_size, stride, padding):
if padding[0] + padding[1] > 0:
x = F.pad(x, (padding[1], padding[1], padding[0],
padding[0])).data
x = x.unfold(2, kernel_size[0], stride[0])
x = x.unfold(3, kernel_size[1], stride[1])
x = x.transpose_(1, 2).transpose_(2, 3).contiguous()
x = x.view(
x.size(0), x.size(1), x.size(2),
x.size(3) * x.size(4) * x.size(5))
return x
class ComputeMatGrad:
@classmethod
def __call__(cls, input, grad_output, layer):
if isinstance(layer, nn.Linear):
grad = cls.linear(input, grad_output, layer)
elif isinstance(layer, nn.Conv2d):
grad = cls.conv2d(input, grad_output, layer)
else:
raise NotImplementedError
return grad
@staticmethod
|
MIT License
|
vishal-v/stackgan
|
model.py
|
load_images
|
python
|
def load_images(image_path, bounding_box, size):
image = Image.open(image_path).convert('RGB')
w, h = image.size
if bounding_box is not None:
r = int(np.maximum(bounding_box[2], bounding_box[3]) * 0.75)
c_x = int((bounding_box[0] + bounding_box[2]) / 2)
c_y = int((bounding_box[1] + bounding_box[3]) / 2)
y1 = np.maximum(0, c_y - r)
y2 = np.minimum(h, c_y + r)
x1 = np.maximum(0, c_x - r)
x2 = np.minimum(w, c_x + r)
image = image.crop([x1, y1, x2, y2])
image = image.resize(size, PIL.Image.BILINEAR)
return image
|
Crops the image to the bounding box and then resizes it.
|
https://github.com/vishal-v/stackgan/blob/f83587e5f73f9fa05f31e14dcf4b67daa8e6e118/model.py#L461-L477
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import random
import time
import numpy as np
import pandas as pd
import tensorflow as tf
assert tf.__version__.startswith('2')
import PIL
from PIL import Image
import matplotlib.pyplot as plt
import tensorflow.keras.backend as K
from tensorflow.keras import Input, Model
from tensorflow.keras.layers import LeakyReLU, BatchNormalization, ReLU, Activation
from tensorflow.keras.layers import UpSampling2D, Conv2D, Concatenate, Dense, concatenate
from tensorflow.keras.layers import Flatten, Lambda, Reshape, ZeroPadding2D, add
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.image import ImageDataGenerator
def conditioning_augmentation(x):
mean = x[:, :128]
log_sigma = x[:, 128:]
stddev = tf.math.exp(log_sigma)
epsilon = K.random_normal(shape=K.constant((mean.shape[1], ), dtype='int32'))
c = mean + stddev * epsilon
return c
def build_ca_network():
input_layer1 = Input(shape=(1024,))
mls = Dense(256)(input_layer1)
mls = LeakyReLU(alpha=0.2)(mls)
ca = Lambda(conditioning_augmentation)(mls)
return Model(inputs=[input_layer1], outputs=[ca])
def UpSamplingBlock(x, num_kernels):
x = UpSampling2D(size=(2,2))(x)
x = Conv2D(num_kernels, kernel_size=(3,3), padding='same', strides=1, use_bias=False,
kernel_initializer='he_uniform')(x)
x = BatchNormalization(gamma_initializer='ones', beta_initializer='zeros')(x)
x = ReLU()(x)
return x
def build_stage1_generator():
input_layer1 = Input(shape=(1024,))
ca = Dense(256)(input_layer1)
ca = LeakyReLU(alpha=0.2)(ca)
c = Lambda(conditioning_augmentation)(ca)
input_layer2 = Input(shape=(100,))
concat = Concatenate(axis=1)([c, input_layer2])
x = Dense(16384, use_bias=False)(concat)
x = ReLU()(x)
x = Reshape((4, 4, 1024), input_shape=(16384,))(x)
x = UpSamplingBlock(x, 512)
x = UpSamplingBlock(x, 256)
x = UpSamplingBlock(x, 128)
x = UpSamplingBlock(x, 64)
x = Conv2D(3, kernel_size=3, padding='same', strides=1, use_bias=False,
kernel_initializer='he_uniform')(x)
x = Activation('tanh')(x)
stage1_gen = Model(inputs=[input_layer1, input_layer2], outputs=[x, ca])
return stage1_gen
def ConvBlock(x, num_kernels, kernel_size=(4,4), strides=2, activation=True):
x = Conv2D(num_kernels, kernel_size=kernel_size, padding='same', strides=strides, use_bias=False,
kernel_initializer='he_uniform')(x)
x = BatchNormalization(gamma_initializer='ones', beta_initializer='zeros')(x)
if activation:
x = LeakyReLU(alpha=0.2)(x)
return x
def build_embedding_compressor():
input_layer1 = Input(shape=(1024,))
x = Dense(128)(input_layer1)
x = ReLU()(x)
model = Model(inputs=[input_layer1], outputs=[x])
return model
def build_stage1_discriminator():
input_layer1 = Input(shape=(64, 64, 3))
x = Conv2D(64, kernel_size=(4,4), strides=2, padding='same', use_bias=False,
kernel_initializer='he_uniform')(input_layer1)
x = LeakyReLU(alpha=0.2)(x)
x = ConvBlock(x, 128)
x = ConvBlock(x, 256)
x = ConvBlock(x, 512)
input_layer2 = Input(shape=(4, 4, 128))
concat = concatenate([x, input_layer2])
x1 = Conv2D(512, kernel_size=(1,1), padding='same', strides=1, use_bias=False,
kernel_initializer='he_uniform')(concat)
x1 = BatchNormalization(gamma_initializer='ones', beta_initializer='zeros')(x)
x1 = LeakyReLU(alpha=0.2)(x)
x1 = Flatten()(x1)
x1 = Dense(1)(x1)
x1 = Activation('sigmoid')(x1)
stage1_dis = Model(inputs=[input_layer1, input_layer2], outputs=[x1])
return stage1_dis
def build_adversarial(generator_model, discriminator_model):
input_layer1 = Input(shape=(1024,))
input_layer2 = Input(shape=(100,))
input_layer3 = Input(shape=(4, 4, 128))
x, ca = generator_model([input_layer1, input_layer2])
discriminator_model.trainable = False
probabilities = discriminator_model([x, input_layer3])
adversarial_model = Model(inputs=[input_layer1, input_layer2, input_layer3], outputs=[probabilities, ca])
return adversarial_model
def concat_along_dims(inputs):
c = inputs[0]
x = inputs[1]
c = K.expand_dims(c, axis=1)
c = K.expand_dims(c, axis=1)
c = K.tile(c, [1, 16, 16, 1])
return K.concatenate([c, x], axis = 3)
def residual_block(input):
x = Conv2D(512, kernel_size=(3,3), padding='same', use_bias=False,
kernel_initializer='he_uniform')(input)
x = BatchNormalization(gamma_initializer='ones', beta_initializer='zeros')(x)
x = ReLU()(x)
x = Conv2D(512, kernel_size=(3,3), padding='same', use_bias=False,
kernel_initializer='he_uniform')(x)
x = BatchNormalization(gamma_initializer='ones', beta_initializer='zeros')(x)
x = add([x, input])
x = ReLU()(x)
return x
def build_stage2_generator():
input_layer1 = Input(shape=(1024,))
input_images = Input(shape=(64, 64, 3))
ca = Dense(256)(input_layer1)
mls = LeakyReLU(alpha=0.2)(ca)
c = Lambda(conditioning_augmentation)(mls)
x = ZeroPadding2D(padding=(1,1))(input_images)
x = Conv2D(128, kernel_size=(3,3), strides=1, use_bias=False,
kernel_initializer='he_uniform')(x)
x = ReLU()(x)
x = ZeroPadding2D(padding=(1,1))(x)
x = Conv2D(256, kernel_size=(4,4), strides=2, use_bias=False,
kernel_initializer='he_uniform')(x)
x = BatchNormalization(gamma_initializer='ones', beta_initializer='zeros')(x)
x = ReLU()(x)
x = ZeroPadding2D(padding=(1,1))(x)
x = Conv2D(512, kernel_size=(4,4), strides=2, use_bias=False,
kernel_initializer='he_uniform')(x)
x = BatchNormalization(gamma_initializer='ones', beta_initializer='zeros')(x)
x = ReLU()(x)
concat = concat_along_dims([c, x])
x = ZeroPadding2D(padding=(1,1))(concat)
x = Conv2D(512, kernel_size=(3,3), use_bias=False, kernel_initializer='he_uniform')(x)
x = BatchNormalization(gamma_initializer='ones', beta_initializer='zeros')(x)
x = ReLU()(x)
x = residual_block(x)
x = residual_block(x)
x = residual_block(x)
x = residual_block(x)
x = UpSamplingBlock(x, 512)
x = UpSamplingBlock(x, 256)
x = UpSamplingBlock(x, 128)
x = UpSamplingBlock(x, 64)
x = Conv2D(3, kernel_size=(3,3), padding='same', use_bias=False, kernel_initializer='he_uniform')(x)
x = Activation('tanh')(x)
stage2_gen = Model(inputs=[input_layer1, input_images], outputs=[x, mls])
return stage2_gen
def build_stage2_discriminator():
input_layer1 = Input(shape=(256, 256, 3))
x = Conv2D(64, kernel_size=(4,4), padding='same', strides=2, use_bias=False,
kernel_initializer='he_uniform')(input_layer1)
x = LeakyReLU(alpha=0.2)(x)
x = ConvBlock(x, 128)
x = ConvBlock(x, 256)
x = ConvBlock(x, 512)
x = ConvBlock(x, 1024)
x = ConvBlock(x, 2048)
x = ConvBlock(x, 1024, (1,1), 1)
x = ConvBlock(x, 512, (1,1), 1, False)
x1 = ConvBlock(x, 128, (1,1), 1)
x1 = ConvBlock(x1, 128, (3,3), 1)
x1 = ConvBlock(x1, 512, (3,3), 1, False)
x2 = add([x, x1])
x2 = LeakyReLU(alpha=0.2)(x2)
input_layer2 = Input(shape=(4, 4, 128))
concat = concatenate([x2, input_layer2])
x3 = Conv2D(512, kernel_size=(1,1), strides=1, padding='same', kernel_initializer='he_uniform')(concat)
x3 = BatchNormalization(gamma_initializer='ones', beta_initializer='zeros')(x3)
x3 = LeakyReLU(alpha=0.2)(x3)
x3 = Flatten()(x3)
x3 = Dense(1)(x3)
x3 = Activation('sigmoid')(x3)
stage2_dis = Model(inputs=[input_layer1, input_layer2], outputs=[x3])
return stage2_dis
def stage2_adversarial_network(stage2_disc, stage2_gen, stage1_gen):
conditioned_embedding = Input(shape=(1024, ))
latent_space = Input(shape=(100, ))
compressed_replicated = Input(shape=(4, 4, 128))
input_images, ca = stage1_gen([conditioned_embedding, latent_space])
stage2_disc.trainable = False
stage1_gen.trainable = False
images, ca2 = stage2_gen([conditioned_embedding, input_images])
probability = stage2_disc([images, compressed_replicated])
return Model(inputs=[conditioned_embedding, latent_space, compressed_replicated],
outputs=[probability, ca2])
def checkpoint_prefix():
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt')
return checkpoint_prefix
def adversarial_loss(y_true, y_pred):
mean = y_pred[:, :128]
ls = y_pred[:, 128:]
loss = -ls + 0.5 * (-1 + tf.math.exp(2.0 * ls) + tf.math.square(mean))
loss = K.mean(loss)
return loss
def normalize(input_image, real_image):
input_image = (input_image / 127.5) - 1
real_image = (real_image / 127.5) - 1
return input_image, real_image
def load_class_ids_filenames(class_id_path, filename_path):
with open(class_id_path, 'rb') as file:
class_id = pickle.load(file, encoding='latin1')
with open(filename_path, 'rb') as file:
filename = pickle.load(file, encoding='latin1')
return class_id, filename
def load_text_embeddings(text_embeddings):
with open(text_embeddings, 'rb') as file:
embeds = pickle.load(file, encoding='latin1')
embeds = np.array(embeds)
return embeds
def load_bbox(data_path):
bbox_path = data_path + '/bounding_boxes.txt'
image_path = data_path + '/images.txt'
bbox_df = pd.read_csv(bbox_path, delim_whitespace=True, header=None).astype(int)
filename_df = pd.read_csv(image_path, delim_whitespace=True, header=None)
filenames = filename_df[1].tolist()
bbox_dict = {i[:-4]:[] for i in filenames[:2]}
for i in range(0, len(filenames)):
bbox = bbox_df.iloc[i][1:].tolist()
dict_key = filenames[i][:-4]
bbox_dict[dict_key] = bbox
return bbox_dict
|
MIT License
|
tarsqi/ttk
|
components/merging/sputlink/graph.py
|
Graph._remove_derived_relations
|
python
|
def _remove_derived_relations(self):
for edge in self.get_edges():
if edge.is_derived():
edge.remove_constraint()
|
Remove all derived relations from the graph.
|
https://github.com/tarsqi/ttk/blob/773273098475e1631084aca5264f5732724cd8dd/components/merging/sputlink/graph.py#L318-L322
|
from __future__ import absolute_import
from __future__ import print_function
from .objects import Node, Edge, Constraint
from .utils import intersect_relations
from .utils import compare_id
from .utils import html_graph_prefix
from .mappings import invert_interval_relation
from .mappings import abbreviate_convex_relation
from utilities import logger
from library.main import LIBRARY
from io import open
DEBUG = True
DEBUG = False
TIMEX = LIBRARY.timeml.TIMEX
TID = LIBRARY.timeml.TID
EVENT = LIBRARY.timeml.EVENT
EID = LIBRARY.timeml.EID
EIID = LIBRARY.timeml.EIID
EVENTID = LIBRARY.timeml.EVENTID
FORM = LIBRARY.timeml.FORM
VALUE = LIBRARY.timeml.VALUE
class Graph(object):
def __init__(self, compositions):
self.compositions = compositions
self.cycle = 0
self.queue = []
self.nodes = {}
self.edges = {}
def add_nodes(self, events, timexes):
for timex in timexes:
node = Node(timex=timex)
self.nodes[node.id] = node
for event in events:
node = Node(event=event)
self.nodes[node.id] = node
for n1 in self.nodes.keys():
self.edges[n1] = {}
for n2 in self.nodes.keys():
self.edges[n1][n2] = Edge(n1, n2, self)
def add_nodes(self, sources, source_type):
for source in sources:
if source_type == 'IDENTIFIER':
identifier = source
text = ''
elif source_type == TIMEX:
identifier = source.attrs[TID]
text = source.attrs[VALUE]
elif source_type == EVENT:
identifier = source.attrs[EIID]
text = source.attrs[FORM]
node = Node(source, identifier, source_type, text)
self.nodes[node.id] = node
for n1 in self.nodes.keys():
self.edges[n1] = {}
for n2 in self.nodes.keys():
self.edges[n1][n2] = Edge(n1, n2, self)
def propagate(self, constraint):
self.cycle += 1
if constraint.is_garbage():
return
self.added = []
self.queue.append(constraint)
debug(str="\n%d %s\n" % (self.cycle, constraint))
while self.queue:
constraint_i_j = self.queue.pop(0)
constraint_i_j.cycle = self.cycle
debug(1, "POP QUEUE: %s" % (constraint_i_j))
edge_i_j = self.edges[constraint_i_j.node1][constraint_i_j.node2]
(status, intersection) = self._intersect_constraints(edge_i_j,
constraint_i_j)
if status == 'INTERSECTION-IS-MORE-SPECIFIC':
self.added.append(constraint_i_j)
self._update_constraint(edge_i_j, constraint_i_j, intersection)
def reduce(self):
self.cycle += 1
self.added = []
self._remove_derived_relations()
def remove_node(self, node_id):
node = self.nodes[node_id]
for node_in_id in node.edges_in.keys():
del self.nodes[node_in_id].edges_out[node_id]
for node_out_id in node.edges_out.keys():
del self.nodes[node_out_id].edges_in[node_id]
del self.nodes[node_id]
del self.edges[node_id]
for other_node_id in self.edges.keys():
del self.edges[other_node_id][node_id]
def _update_constraint(self, edge_i_j, constraint_i_j, intersection):
constraint_i_j.relset = intersection
self._add_constraint_to_edge(constraint_i_j, edge_i_j)
node_i = constraint_i_j.get_node1()
node_j = constraint_i_j.get_node2()
node_i.edges_out[constraint_i_j.node2] = edge_i_j
node_j.edges_in[constraint_i_j.node1] = edge_i_j
self._check_all_k_i_j(node_i, node_j, edge_i_j)
self._check_all_i_j_k(node_i, node_j, edge_i_j)
def _check_all_k_i_j(self, node_i, node_j, edge_i_j):
debug(1, "CHECKING: X --> %s --> %s" % (node_i.id, node_j.id))
for edge_k_i in node_i.edges_in.values():
debug(2, "%s * %s" % (edge_k_i, edge_i_j))
self._check_k_i_j(edge_k_i, edge_i_j, node_i, node_j)
def _check_all_i_j_k(self, node_i, node_j, edge_i_j):
debug(1, "CHECKING: %s --> %s --> X" % (node_i.id, node_j.id))
for edge_j_k in node_j.edges_out.values():
debug(2, "%s * %s" % (edge_i_j, edge_j_k))
self._check_i_j_k(edge_i_j, edge_j_k, node_i, node_j)
def _check_k_i_j(self, edge_k_i, edge_i_j, node_i, node_j):
node_k = edge_k_i.get_node1()
if node_k.id == node_j.id:
return
edge_k_j = self._get_edge(node_k, node_j)
relset_k_j = self._compose(edge_k_i, edge_i_j.constraint)
debug(3, "{%s} * {%s} --> {%s} || %s "
% (edge_k_i.constraint.relset, edge_i_j.constraint.relset,
relset_k_j, edge_k_j.constraint))
if relset_k_j is not None:
self._combine(edge_k_j, relset_k_j,
edge_k_i.constraint, edge_i_j.constraint)
def _check_i_j_k(self, edge_i_j, edge_j_k, node_i, node_j):
node_k = edge_j_k.get_node2()
if node_k.id == node_i.id:
return
edge_i_k = self._get_edge(node_i, node_k)
relset_i_k = self._compose(edge_i_j.constraint, edge_j_k)
debug(3, "{%s} * {%s} --> {%s} || %s "
% (edge_i_j.constraint.relset, edge_j_k.constraint.relset,
relset_i_k, edge_i_k.constraint))
if relset_i_k is not None:
self._combine(edge_i_k, relset_i_k,
edge_i_j.constraint, edge_j_k.constraint)
def _combine(self, edge, relset, c1, c2):
edge_relset = edge.relset
intersection = intersect_relations(edge_relset, relset)
if intersection == '':
debug(4, "WARNING: found an inconsistency where it shouldn't be")
pass
elif intersection is None:
debug(4, "WARNING: intersection is None, this should not happen")
pass
elif edge_relset is None:
self._add_constraint_to_queue(edge, intersection, c1, c2)
elif len(intersection) < len(edge_relset):
self._add_constraint_to_queue(edge, intersection, c1, c2)
def _add_constraint_to_queue(self, edge, relset, c1, c2):
new_constraint = Constraint(edge.node1, relset, edge.node2,
cycle=self.cycle, source='closure',
history=(c1, c2))
self.queue.append(new_constraint)
debug(3, "ADD QUEUE %s " % new_constraint)
add_inverted = False
if add_inverted:
relset = invert_interval_relation(relset)
new_constraint2 = Constraint(edge.node2, relset, edge.node1,
cycle=self.cycle,
source='closure-inverted',
history=(c1, c2))
self.queue.append(new_constraint2)
debug(3, "ADD QUEUE %s " % new_constraint2)
def _intersect_constraints(self, edge, constraint):
edge = self.edges[constraint.node1][constraint.node2]
new_relset = constraint.relset
existing_relset = edge.relset
intersection = intersect_relations(new_relset, existing_relset)
debug(2, "INTERSECT NEW {%s} WITH EXISTING {%s} --> {%s}"
% (constraint.relset, edge.relset, intersection))
if intersection == '':
status = 'INCONSISTENT'
logger.warn("Inconsistent new contraint: %s" % constraint)
logger.warn("Clashes with: [%s] (derived from %s)"
% (edge.constraint, edge.constraint.history_string()))
elif new_relset == existing_relset:
status = 'NEW=EXISTING'
elif intersection == existing_relset:
status = 'INTERSECTION=EXISTING'
else:
status = 'INTERSECTION-IS-MORE-SPECIFIC'
debug(2, "STATUS: %s" % status)
return (status, intersection)
def _compose(self, object1, object2):
rels1 = object1.relset
rels2 = object2.relset
return self.compositions.compose_rels(rels1, rels2)
def _add_constraint_to_edge(self, constraint, edge):
edge.add_constraint(constraint)
constraint.edge = edge
def _get_edge(self, node1, node2):
return self.edges[node1.id][node2.id]
def get_edges(self):
edges = []
for n1 in self.edges.keys():
for n2 in self.edges[n1].keys():
edge = self.edges[n1][n2]
if n1 != n2 and edge.constraint:
edges.append(edge)
return edges
def _remove_disjunctions(self):
for edge in self.get_edges():
if edge.constraint:
if edge.constraint.is_disjunction():
edge.remove_constraint()
|
Apache License 2.0
|
databricks/koalas
|
databricks/koalas/frame.py
|
_create_tuple_for_frame_type
|
python
|
def _create_tuple_for_frame_type(params):
from databricks.koalas.typedef import NameTypeHolder
if isinstance(params, zip):
params = [slice(name, tpe) for name, tpe in params]
if isinstance(params, slice):
params = (params,)
if (
hasattr(params, "__len__")
and isinstance(params, Iterable)
and all(isinstance(param, slice) for param in params)
):
for param in params:
if isinstance(param.start, str) and param.step is not None:
raise TypeError(
"Type hints should be specified as "
"DataFrame['name': type]; however, got %s" % param
)
name_classes = []
for param in params:
new_class = type("NameType", (NameTypeHolder,), {})
new_class.name = param.start
new_class.tpe = param.stop.type if isinstance(param.stop, np.dtype) else param.stop
name_classes.append(new_class)
return Tuple[tuple(name_classes)]
if not isinstance(params, Iterable):
params = [params]
new_params = []
for param in params:
if isinstance(param, ExtensionDtype):
new_class = type("NameType", (NameTypeHolder,), {})
new_class.tpe = param
new_params.append(new_class)
else:
new_params.append(param.type if isinstance(param, np.dtype) else param)
return Tuple[tuple(new_params)]
|
This is a workaround to support variadic generic in DataFrame.
See https://github.com/python/typing/issues/193
we always wraps the given type hints by a tuple to mimic the variadic generic.
|
https://github.com/databricks/koalas/blob/e971d6f37ede45297bbf9d509ae2a7b51717f322/databricks/koalas/frame.py#L338-L386
|
from collections import OrderedDict, defaultdict, namedtuple
from collections.abc import Mapping
from distutils.version import LooseVersion
import re
import warnings
import inspect
import json
import types
from functools import partial, reduce
import sys
from itertools import zip_longest
from typing import (
Any,
Optional,
List,
Tuple,
Union,
Generic,
TypeVar,
Iterable,
Iterator,
Dict,
Callable,
cast,
TYPE_CHECKING,
)
import datetime
import numpy as np
import pandas as pd
from pandas.api.types import is_list_like, is_dict_like, is_scalar
from pandas.api.extensions import ExtensionDtype
from pandas.tseries.frequencies import DateOffset, to_offset
if TYPE_CHECKING:
from pandas.io.formats.style import Styler
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
from pandas.core.dtypes.common import infer_dtype_from_object
else:
from pandas.core.dtypes.common import _get_dtype_from_object as infer_dtype_from_object
from pandas.core.accessor import CachedAccessor
from pandas.core.dtypes.inference import is_sequence
import pyspark
from pyspark import StorageLevel
from pyspark import sql as spark
from pyspark.sql import Column, DataFrame as SparkDataFrame, functions as F
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import (
BooleanType,
DoubleType,
FloatType,
NumericType,
StringType,
StructType,
StructField,
ArrayType,
)
from pyspark.sql.window import Window
from databricks import koalas as ks
from databricks.koalas.accessors import KoalasFrameMethods
from databricks.koalas.config import option_context, get_option
from databricks.koalas.spark import functions as SF
from databricks.koalas.spark.accessors import SparkFrameMethods, CachedSparkFrameMethods
from databricks.koalas.utils import (
align_diff_frames,
column_labels_level,
combine_frames,
default_session,
is_name_like_tuple,
is_name_like_value,
is_testing,
name_like_string,
same_anchor,
scol_for,
validate_arguments_and_invoke_function,
validate_axis,
validate_bool_kwarg,
validate_how,
verify_temp_column_name,
)
from databricks.koalas.spark.utils import as_nullable_spark_type, force_decimal_precision_scale
from databricks.koalas.generic import Frame
from databricks.koalas.internal import (
InternalFrame,
HIDDEN_COLUMNS,
NATURAL_ORDER_COLUMN_NAME,
SPARK_INDEX_NAME_FORMAT,
SPARK_DEFAULT_INDEX_NAME,
SPARK_DEFAULT_SERIES_NAME,
)
from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame
from databricks.koalas.ml import corr
from databricks.koalas.typedef import (
as_spark_type,
infer_return_type,
spark_type_to_pandas_dtype,
DataFrameType,
SeriesType,
Scalar,
ScalarType,
)
from databricks.koalas.plot import KoalasPlotAccessor
if TYPE_CHECKING:
from databricks.koalas.indexes import Index
from databricks.koalas.series import Series
REPR_PATTERN = re.compile(r"\n\n\[(?P<rows>[0-9]+) rows x (?P<columns>[0-9]+) columns\]$")
REPR_HTML_PATTERN = re.compile(
r"\n\<p\>(?P<rows>[0-9]+) rows × (?P<columns>[0-9]+) columns\<\/p\>\n\<\/div\>$"
)
_flex_doc_FRAME = """
Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`).
Equivalent to ``{equiv}``. With reverse version, `{reverse}`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`.
Parameters
----------
other : scalar
Any single data
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> df = ks.DataFrame({{'angles': [0, 3, 4],
... 'degrees': [360, 180, 360]}},
... index=['circle', 'triangle', 'rectangle'],
... columns=['angles', 'degrees'])
>>> df
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Add a scalar with operator version which return the same
results. Also reverse version.
>>> df + 1
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
>>> df.add(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
>>> df.add(df)
angles degrees
circle 0 720
triangle 6 360
rectangle 8 720
>>> df + df + df
angles degrees
circle 0 1080
triangle 9 540
rectangle 12 1080
>>> df.radd(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
Divide and true divide by constant with reverse version.
>>> df / 10
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.div(10)
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.rdiv(10)
angles degrees
circle inf 0.027778
triangle 3.333333 0.055556
rectangle 2.500000 0.027778
>>> df.truediv(10)
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.rtruediv(10)
angles degrees
circle inf 0.027778
triangle 3.333333 0.055556
rectangle 2.500000 0.027778
Subtract by constant with reverse version.
>>> df - 1
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
>>> df.sub(1)
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
>>> df.rsub(1)
angles degrees
circle 1 -359
triangle -2 -179
rectangle -3 -359
Multiply by constant with reverse version.
>>> df * 1
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
>>> df.mul(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
>>> df.rmul(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Floor Divide by constant with reverse version.
>>> df // 10
angles degrees
circle 0.0 36.0
triangle 0.0 18.0
rectangle 0.0 36.0
>>> df.floordiv(10)
angles degrees
circle 0.0 36.0
triangle 0.0 18.0
rectangle 0.0 36.0
>>> df.rfloordiv(10) # doctest: +SKIP
angles degrees
circle inf 0.0
triangle 3.0 0.0
rectangle 2.0 0.0
Mod by constant with reverse version.
>>> df % 2
angles degrees
circle 0 0
triangle 1 0
rectangle 0 0
>>> df.mod(2)
angles degrees
circle 0 0
triangle 1 0
rectangle 0 0
>>> df.rmod(2)
angles degrees
circle NaN 2
triangle 2.0 2
rectangle 2.0 2
Power by constant with reverse version.
>>> df ** 2
angles degrees
circle 0.0 129600.0
triangle 9.0 32400.0
rectangle 16.0 129600.0
>>> df.pow(2)
angles degrees
circle 0.0 129600.0
triangle 9.0 32400.0
rectangle 16.0 129600.0
>>> df.rpow(2)
angles degrees
circle 1.0 2.348543e+108
triangle 8.0 1.532496e+54
rectangle 16.0 2.348543e+108
"""
T = TypeVar("T")
|
Apache License 2.0
|
docusign/docusign-python-client
|
docusign_esign/models/document_template.py
|
DocumentTemplate.document_end_page
|
python
|
def document_end_page(self, document_end_page):
self._document_end_page = document_end_page
|
Sets the document_end_page of this DocumentTemplate.
# noqa: E501
:param document_end_page: The document_end_page of this DocumentTemplate. # noqa: E501
:type: str
|
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/document_template.py#L82-L91
|
import pprint
import re
import six
from docusign_esign.client.configuration import Configuration
class DocumentTemplate(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'document_end_page': 'str',
'document_id': 'str',
'document_start_page': 'str',
'error_details': 'ErrorDetails',
'template_id': 'str'
}
attribute_map = {
'document_end_page': 'documentEndPage',
'document_id': 'documentId',
'document_start_page': 'documentStartPage',
'error_details': 'errorDetails',
'template_id': 'templateId'
}
def __init__(self, _configuration=None, **kwargs):
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._document_end_page = None
self._document_id = None
self._document_start_page = None
self._error_details = None
self._template_id = None
self.discriminator = None
setattr(self, "_{}".format('document_end_page'), kwargs.get('document_end_page', None))
setattr(self, "_{}".format('document_id'), kwargs.get('document_id', None))
setattr(self, "_{}".format('document_start_page'), kwargs.get('document_start_page', None))
setattr(self, "_{}".format('error_details'), kwargs.get('error_details', None))
setattr(self, "_{}".format('template_id'), kwargs.get('template_id', None))
@property
def document_end_page(self):
return self._document_end_page
@document_end_page.setter
|
MIT License
|
centerforopenscience/osf.io
|
tests/base.py
|
CaptureSignals.signals_sent
|
python
|
def signals_sent(self):
return set([signal for signal, _ in self._records.items() if self._records[signal]])
|
Return a set of the signals sent.
:rtype: list of blinker `NamedSignals`.
|
https://github.com/centerforopenscience/osf.io/blob/6552a01fe250997cd3eb67cf72fc7157d9bc5af6/tests/base.py#L355-L360
|
import abc
import datetime as dt
import functools
import logging
import re
import unittest
import uuid
import blinker
import responses
import mock
import pytest
from django.test import TestCase as DjangoTestCase
from django.test import override_settings
from faker import Factory
from framework.auth.core import Auth
from framework.celery_tasks.handlers import celery_before_request
from framework.celery_tasks.handlers import handlers as celery_handlers
from framework.django.handlers import handlers as django_handlers
from framework.flask import rm_handlers
from osf.models import RegistrationSchema
from website import settings
from website.app import init_app
from website.notifications.listeners import (subscribe_contributor,
subscribe_creator)
from website.project.signals import contributor_added, project_created
from website.project.views.contributor import notify_added_contributor
from website.signals import ALL_SIGNALS
from webtest_plus import TestApp
from .json_api_test_app import JSONAPITestApp
from nose.tools import *
logger = logging.getLogger(__name__)
def get_default_metaschema():
return RegistrationSchema.objects.first()
try:
test_app = init_app(routes=True, set_backends=False)
except AssertionError:
test_app = init_app(routes=False, set_backends=False)
rm_handlers(test_app, django_handlers)
rm_handlers(test_app, celery_handlers)
test_app.testing = True
SILENT_LOGGERS = [
'api.caching.tasks',
'factory.generate',
'factory.containers',
'framework.analytics',
'framework.auth.core',
'website.app',
'website.archiver.tasks',
'website.mails',
'website.notifications.listeners',
'website.search.elastic_search',
'website.search_migration.migrate',
'website.util.paths',
'requests_oauthlib.oauth2_session',
'raven.base.Client',
'raven.contrib.django.client.DjangoClient',
'transitions.core',
'MARKDOWN',
]
for logger_name in SILENT_LOGGERS:
logging.getLogger(logger_name).setLevel(logging.CRITICAL)
fake = Factory.create()
@pytest.mark.django_db
class DbTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(DbTestCase, cls).setUpClass()
cls._original_enable_email_subscriptions = settings.ENABLE_EMAIL_SUBSCRIPTIONS
settings.ENABLE_EMAIL_SUBSCRIPTIONS = False
cls._original_bcrypt_log_rounds = settings.BCRYPT_LOG_ROUNDS
settings.BCRYPT_LOG_ROUNDS = 4
@classmethod
def tearDownClass(cls):
super(DbTestCase, cls).tearDownClass()
settings.ENABLE_EMAIL_SUBSCRIPTIONS = cls._original_enable_email_subscriptions
settings.BCRYPT_LOG_ROUNDS = cls._original_bcrypt_log_rounds
class AppTestCase(unittest.TestCase):
PUSH_CONTEXT = True
DISCONNECTED_SIGNALS = {
contributor_added: [notify_added_contributor]
}
def setUp(self):
super(AppTestCase, self).setUp()
self.app = TestApp(test_app)
self.app.lint = False
if not self.PUSH_CONTEXT:
return
self.context = test_app.test_request_context(headers={
'Remote-Addr': '146.9.219.56',
'User-Agent': 'Mozilla/5.0 (X11; U; SunOS sun4u; en-US; rv:0.9.4.1) Gecko/20020518 Netscape6/6.2.3'
})
self.context.push()
with self.context:
celery_before_request()
for signal in self.DISCONNECTED_SIGNALS:
for receiver in self.DISCONNECTED_SIGNALS[signal]:
signal.disconnect(receiver)
def tearDown(self):
super(AppTestCase, self).tearDown()
if not self.PUSH_CONTEXT:
return
with mock.patch('website.mailchimp_utils.get_mailchimp_api'):
self.context.pop()
for signal in self.DISCONNECTED_SIGNALS:
for receiver in self.DISCONNECTED_SIGNALS[signal]:
signal.connect(receiver)
class ApiAppTestCase(unittest.TestCase):
allow_database_queries = True
def setUp(self):
super(ApiAppTestCase, self).setUp()
self.app = JSONAPITestApp()
class SearchTestCase(unittest.TestCase):
def setUp(self):
settings.ELASTIC_INDEX = uuid.uuid1().hex
settings.ELASTIC_TIMEOUT = 60
from website.search import elastic_search
elastic_search.INDEX = settings.ELASTIC_INDEX
elastic_search.create_index(settings.ELASTIC_INDEX)
super(SearchTestCase, self).setUp()
def tearDown(self):
super(SearchTestCase, self).tearDown()
from website.search import elastic_search
elastic_search.delete_index(settings.ELASTIC_INDEX)
class OsfTestCase(DbTestCase, AppTestCase, SearchTestCase):
pass
class ApiTestCase(DbTestCase, ApiAppTestCase, SearchTestCase):
def setUp(self):
super(ApiTestCase, self).setUp()
settings.USE_EMAIL = False
class ApiAddonTestCase(ApiTestCase):
DISABLE_OUTGOING_CONNECTIONS = True
@abc.abstractproperty
def short_name(self):
pass
@abc.abstractproperty
def addon_type(self):
pass
@abc.abstractmethod
def _apply_auth_configuration(self):
pass
@abc.abstractmethod
def _set_urls(self):
pass
def _settings_kwargs(self, node, user_settings):
return {
'user_settings': self.user_settings,
'folder_id': '1234567890',
'owner': self.node
}
def setUp(self):
super(ApiAddonTestCase, self).setUp()
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory,
)
from addons.base.models import (
BaseOAuthNodeSettings,
BaseOAuthUserSettings
)
assert self.addon_type in ('CONFIGURABLE', 'OAUTH', 'UNMANAGEABLE', 'INVALID')
self.account = None
self.node_settings = None
self.user_settings = None
self.user = AuthUserFactory()
self.auth = Auth(self.user)
self.node = ProjectFactory(creator=self.user)
if self.addon_type not in ('UNMANAGEABLE', 'INVALID'):
if self.addon_type in ('OAUTH', 'CONFIGURABLE'):
self.account = self.AccountFactory()
self.user.external_accounts.add(self.account)
self.user.save()
self.user_settings = self.user.get_or_add_addon(self.short_name)
self.node_settings = self.node.get_or_add_addon(self.short_name, auth=self.auth)
if self.addon_type in ('OAUTH', 'CONFIGURABLE'):
self.node_settings.set_auth(self.account, self.user)
self._apply_auth_configuration()
if self.addon_type in ('OAUTH', 'CONFIGURABLE'):
assert isinstance(self.node_settings, BaseOAuthNodeSettings)
assert isinstance(self.user_settings, BaseOAuthUserSettings)
self.node_settings.reload()
self.user_settings.reload()
self.account_id = self.account._id if self.account else None
self.set_urls()
def tearDown(self):
super(ApiAddonTestCase, self).tearDown()
self.user.delete()
self.node.delete()
if self.account:
self.account.delete()
@override_settings(ROOT_URLCONF='admin.base.urls')
class AdminTestCase(DbTestCase, DjangoTestCase, SearchTestCase):
pass
class NotificationTestCase(OsfTestCase):
DISCONNECTED_SIGNALS = {
contributor_added: [notify_added_contributor, subscribe_contributor],
project_created: [subscribe_creator]
}
def setUp(self):
super(NotificationTestCase, self).setUp()
def tearDown(self):
super(NotificationTestCase, self).tearDown()
class ApiWikiTestCase(ApiTestCase):
def setUp(self):
from osf_tests.factories import AuthUserFactory
super(ApiWikiTestCase, self).setUp()
self.user = AuthUserFactory()
self.non_contributor = AuthUserFactory()
def _add_project_wiki_page(self, node, user):
from addons.wiki.tests.factories import WikiFactory, WikiVersionFactory
with mock.patch('osf.models.AbstractNode.update_search'):
wiki_page = WikiFactory(node=node, user=user)
wiki_version = WikiVersionFactory(wiki_page=wiki_page)
return wiki_page
def _add_project_wiki_version(self, node, user):
from addons.wiki.tests.factories import WikiFactory, WikiVersionFactory
with mock.patch('osf.models.AbstractNode.update_search'):
wiki_page = WikiFactory(node=node, user=user)
wiki_version = WikiVersionFactory(wiki_page=wiki_page, user=user)
return wiki_version
class CaptureSignals(object):
def __init__(self, signals):
self._records = {}
self._receivers = {}
for signal in signals:
self._records[signal] = []
self._receivers[signal] = functools.partial(self._record, signal)
def __getitem__(self, signal):
if isinstance(signal, blinker.base.NamedSignal):
return self._records[signal]
else:
super(CaptureSignals, self).__setitem__(signal)
def _record(self, signal, *args, **kwargs):
self._records[signal].append((args, kwargs))
def __enter__(self):
for signal, receiver in self._receivers.items():
signal.connect(receiver)
return self
def __exit__(self, type, value, traceback):
for signal, receiver in self._receivers.items():
signal.disconnect(receiver)
|
Apache License 2.0
|
fredreichbier/genie
|
browse_slp.py
|
SLPLoader.get_files
|
python
|
def get_files(self):
for table in self.drs_file.tables:
for embedded in table.embedded_files.itervalues():
fname = '%d.%s' % (embedded.resource_id, table.file_extension)
yield (embedded.resource_id, fname)
|
yield all SLP files as tuples (resource id, pseudo-filename)
|
https://github.com/fredreichbier/genie/blob/befe10c56207fb471d7f8a9fb9f8ee2e9bee6617/browse_slp.py#L45-L52
|
import sys
import argparse
import shlex
import traceback
import cmd
import os
import fnmatch
import subprocess
from StringIO import StringIO
import pyglet
from genie import slp, drs
from genie.environment import Environment, INTERFAC_DRS
from genie.slp.pyglet_adapter import PygletAdapter, load_aoe_animations
BAR_HEIGHT = 12
def _get_resource_id(filename):
if not filename:
return -1
return int(filename.split('.')[0])
class SLPLoader(object):
def __init__(self, path, audio_player, interfac_drs=INTERFAC_DRS):
self.env = Environment(path, interfac_drs=interfac_drs)
self._drs_filename = None
self.audio_player = audio_player
self.palette = 0
self.player = 0
def _set_drs_filename(self, drs_filename):
self._drs_filename = drs_filename
def _get_drs_filename(self):
return self._drs_filename
drs_filename = property(_get_drs_filename, _set_drs_filename)
@property
def drs_file(self):
return self.env.get_drs(self.drs_filename)
|
BSD 2-Clause Simplified License
|
mcieslik-mctp/papy
|
src/papy/util/runtime.py
|
get_runtime
|
python
|
def get_runtime():
PAPY_RUNTIME = {}
return PAPY_RUNTIME
|
Returns a PAPY_RUNTIME dictionary.
|
https://github.com/mcieslik-mctp/papy/blob/708e50827b5db46bbea081982cb74b9b0e464064/src/papy/util/runtime.py#L9-L15
|
MIT License
|
|
volkamerlab/opencadd
|
opencadd/io/dataframe.py
|
DataFrame.from_text
|
python
|
def from_text(cls, text, ext, verbose=False, **kwargs):
if ext == "mol2":
return cls._from_mol2_text(text, verbose)
elif ext == "pdb":
return cls._from_pdb_text(text, verbose)
else:
raise ValueError(f"The {ext} format is not supported or invalid.")
|
Load structures as DataFrame from text (file content as string).
Parameters
----------
text : str
Structure file content as string.
ext : str
Structure format: "pdb" or "mol2".
verbose : bool
Show only default columns (False) or additionally input-format specific columns (True).
Returns
-------
pandas.DataFrame
Structure as DataFrame.
|
https://github.com/volkamerlab/opencadd/blob/ce20b50b7e65b6a11e5f1fed050d2d4b7f3b65b9/opencadd/io/dataframe.py#L55-L79
|
import logging
from biopandas.mol2 import PandasMol2
from biopandas.pdb import PandasPdb
import numpy as np
import pandas as pd
from .core import _Base
from .schema import DATAFRAME_COLUMNS, PDB_COLUMNS, MOL2_COLUMNS
logger = logging.getLogger(__name__)
class DataFrame(_Base):
@classmethod
def from_file(cls, filepath, verbose=False, **kwargs):
filepath = cls._convert_filepath(filepath)
if filepath.suffix == ".mol2":
return cls._from_mol2_file(filepath, verbose)
elif filepath.suffix == ".pdb":
return cls._from_pdb_file(filepath, verbose)
else:
raise ValueError(f"The {filepath.suffix} format is not supported or invalid.")
return dataframe
@classmethod
|
MIT License
|
tensorflow/privacy
|
tensorflow_privacy/privacy/analysis/rdp_privacy_accountant.py
|
_compute_rdp_sample_wor_gaussian_int
|
python
|
def _compute_rdp_sample_wor_gaussian_int(q, sigma, alpha):
max_alpha = 256
assert isinstance(alpha, six.integer_types)
if np.isinf(alpha):
return np.inf
elif alpha == 1:
return 0
def cgf(x):
return x * 1.0 * (x + 1) / (2.0 * sigma**2)
def func(x):
return 1.0 * x / (2.0 * sigma**2)
log_a = 0
log_f2m1 = func(2.0) + np.log(1 - np.exp(-func(2.0)))
if alpha <= max_alpha:
deltas, _ = _get_forward_diffs(cgf, alpha)
for i in range(2, alpha + 1):
if i == 2:
s = 2 * np.log(q) + _log_comb(alpha, 2) + np.minimum(
np.log(4) + log_f2m1,
func(2.0) + np.log(2))
elif i > 2:
delta_lo = deltas[int(2 * np.floor(i / 2.0)) - 1]
delta_hi = deltas[int(2 * np.ceil(i / 2.0)) - 1]
s = np.log(4) + 0.5 * (delta_lo + delta_hi)
s = np.minimum(s, np.log(2) + cgf(i - 1))
s += i * np.log(q) + _log_comb(alpha, i)
log_a = _log_add(log_a, s)
return float(log_a)
else:
for i in range(2, alpha + 1):
if i == 2:
s = 2 * np.log(q) + _log_comb(alpha, 2) + np.minimum(
np.log(4) + log_f2m1,
func(2.0) + np.log(2))
else:
s = np.log(2) + cgf(i - 1) + i * np.log(q) + _log_comb(alpha, i)
log_a = _log_add(log_a, s)
return log_a
|
Compute log(A_alpha) for integer alpha, subsampling without replacement.
When alpha is smaller than max_alpha, compute the bound Theorem 27 exactly,
otherwise compute the bound with Stirling approximation.
Args:
q: The sampling proportion = m / n. Assume m is an integer <= n.
sigma: The std of the additive Gaussian noise.
alpha: The order at which RDP is computed.
Returns:
RDP at alpha, can be np.inf.
|
https://github.com/tensorflow/privacy/blob/977647a3bf3ff11643804169b52be5bdddb8f666/tensorflow_privacy/privacy/analysis/rdp_privacy_accountant.py#L417-L483
|
import math
from typing import Collection, Optional
import numpy as np
from scipy import special
import six
from tensorflow_privacy.privacy.analysis import dp_event
from tensorflow_privacy.privacy.analysis import privacy_accountant
NeighborRel = privacy_accountant.NeighboringRelation
def _log_add(logx, logy):
a, b = min(logx, logy), max(logx, logy)
if a == -np.inf:
return b
return math.log1p(math.exp(a - b)) + b
def _log_sub(logx, logy):
if logx < logy:
raise ValueError('The result of subtraction must be non-negative.')
if logy == -np.inf:
return logx
if logx == logy:
return -np.inf
try:
return math.log(math.expm1(logx - logy)) + logy
except OverflowError:
return logx
def _log_sub_sign(logx, logy):
if logx > logy:
s = True
mag = logx + np.log(1 - np.exp(logy - logx))
elif logx < logy:
s = False
mag = logy + np.log(1 - np.exp(logx - logy))
else:
s = True
mag = -np.inf
return s, mag
def _log_comb(n, k):
return (special.gammaln(n + 1) - special.gammaln(k + 1) -
special.gammaln(n - k + 1))
def _compute_log_a_int(q, sigma, alpha):
assert isinstance(alpha, six.integer_types)
log_a = -np.inf
for i in range(alpha + 1):
log_coef_i = (
_log_comb(alpha, i) + i * math.log(q) + (alpha - i) * math.log(1 - q))
s = log_coef_i + (i * i - i) / (2 * (sigma**2))
log_a = _log_add(log_a, s)
return float(log_a)
def _compute_log_a_frac(q, sigma, alpha):
log_a0, log_a1 = -np.inf, -np.inf
i = 0
z0 = sigma**2 * math.log(1 / q - 1) + .5
while True:
coef = special.binom(alpha, i)
log_coef = math.log(abs(coef))
j = alpha - i
log_t0 = log_coef + i * math.log(q) + j * math.log(1 - q)
log_t1 = log_coef + j * math.log(q) + i * math.log(1 - q)
log_e0 = math.log(.5) + _log_erfc((i - z0) / (math.sqrt(2) * sigma))
log_e1 = math.log(.5) + _log_erfc((z0 - j) / (math.sqrt(2) * sigma))
log_s0 = log_t0 + (i * i - i) / (2 * (sigma**2)) + log_e0
log_s1 = log_t1 + (j * j - j) / (2 * (sigma**2)) + log_e1
if coef > 0:
log_a0 = _log_add(log_a0, log_s0)
log_a1 = _log_add(log_a1, log_s1)
else:
log_a0 = _log_sub(log_a0, log_s0)
log_a1 = _log_sub(log_a1, log_s1)
i += 1
if max(log_s0, log_s1) < -30:
break
return _log_add(log_a0, log_a1)
def _log_erfc(x):
try:
return math.log(2) + special.log_ndtr(-x * 2**.5)
except NameError:
r = special.erfc(x)
if r == 0.0:
return (-math.log(math.pi) / 2 - math.log(x) - x**2 - .5 * x**-2 +
.625 * x**-4 - 37. / 24. * x**-6 + 353. / 64. * x**-8)
else:
return math.log(r)
def _compute_delta(orders, rdp, epsilon):
if epsilon < 0:
raise ValueError(f'Epsilon cannot be negative. Found {epsilon}.')
if len(orders) != len(rdp):
raise ValueError('Input lists must have the same length.')
logdeltas = []
for (a, r) in zip(orders, rdp):
if a < 1:
raise ValueError(f'Renyi divergence order must be at least 1. Found {a}.')
if r < 0:
raise ValueError(f'Renyi divergence cannot be negative. Found {r}.')
if r == 0:
logdelta = -np.inf
else:
logdelta = 0.5 * math.log1p(-math.exp(-r))
if a > 1.01:
rdp_bound = (a - 1) * (r - epsilon + math.log1p(-1 / a)) - math.log(a)
logdelta = min(logdelta, rdp_bound)
logdeltas.append(logdelta)
return min(math.exp(np.min(logdeltas)), 1.)
def _compute_epsilon(orders, rdp, delta):
if delta < 0:
raise ValueError(f'Delta cannot be negative. Found {delta}.')
if delta == 0:
if all(r == 0 for r in rdp):
return 0
else:
return np.inf
if len(orders) != len(rdp):
raise ValueError('Input lists must have the same length.')
eps = []
for (a, r) in zip(orders, rdp):
if a < 1:
raise ValueError(f'Renyi divergence order must be at least 1. Found {a}.')
if r < 0:
raise ValueError(f'Renyi divergence cannot be negative. Found {r}.')
if delta**2 + math.expm1(-r) > 0:
epsilon = 0
elif a > 1.01:
epsilon = r + math.log1p(-1 / a) - math.log(delta * a) / (a - 1)
else:
epsilon = np.inf
eps.append(epsilon)
return max(0, np.min(eps))
def _stable_inplace_diff_in_log(vec, signs, n=-1):
assert vec.shape == signs.shape
if n < 0:
n = np.max(vec.shape) - 1
else:
assert np.max(vec.shape) >= n + 1
for j in range(0, n, 1):
if signs[j] == signs[j + 1]:
signs[j], vec[j] = _log_sub_sign(vec[j + 1], vec[j])
if not signs[j + 1]:
signs[j] = ~signs[j]
else:
vec[j] = _log_add(vec[j], vec[j + 1])
signs[j] = signs[j + 1]
def _get_forward_diffs(fun, n):
func_vec = np.zeros(n + 3)
signs_func_vec = np.ones(n + 3, dtype=bool)
deltas = np.zeros(n + 2)
signs_deltas = np.zeros(n + 2, dtype=bool)
for i in range(1, n + 3, 1):
func_vec[i] = fun(1.0 * (i - 1))
for i in range(0, n + 2, 1):
_stable_inplace_diff_in_log(func_vec, signs_func_vec, n=n + 2 - i)
deltas[i] = func_vec[0]
signs_deltas[i] = signs_func_vec[0]
return deltas, signs_deltas
def _compute_log_a(q, noise_multiplier, alpha):
if float(alpha).is_integer():
return _compute_log_a_int(q, noise_multiplier, int(alpha))
else:
return _compute_log_a_frac(q, noise_multiplier, alpha)
def _compute_rdp_poisson_subsampled_gaussian(q, noise_multiplier, orders):
def compute_one_order(q, alpha):
if np.isinf(alpha) or noise_multiplier == 0:
return np.inf
if q == 0:
return 0
if q == 1.:
return alpha / (2 * noise_multiplier**2)
return _compute_log_a(q, noise_multiplier, alpha) / (alpha - 1)
return np.array([compute_one_order(q, order) for order in orders])
def _compute_rdp_sample_wor_gaussian(q, noise_multiplier, orders):
return np.array([
_compute_rdp_sample_wor_gaussian_scalar(q, noise_multiplier, order)
for order in orders
])
def _compute_rdp_sample_wor_gaussian_scalar(q, sigma, alpha):
assert (q <= 1) and (q >= 0) and (alpha >= 1)
if q == 0:
return 0
if q == 1.:
return alpha / (2 * sigma**2)
if np.isinf(alpha):
return np.inf
if float(alpha).is_integer():
return _compute_rdp_sample_wor_gaussian_int(q, sigma, int(alpha)) / (
alpha - 1)
else:
alpha_f = math.floor(alpha)
alpha_c = math.ceil(alpha)
x = _compute_rdp_sample_wor_gaussian_int(q, sigma, alpha_f)
y = _compute_rdp_sample_wor_gaussian_int(q, sigma, alpha_c)
t = alpha - alpha_f
return ((1 - t) * x + t * y) / (alpha - 1)
|
Apache License 2.0
|
larsks/kiwi
|
kiwi/addresswatcher.py
|
iter_events
|
python
|
def iter_events(url, interval=1, recursive=True):
waitindex = None
while True:
try:
params = {'recursive': recursive,
'wait': True,
'waitIndex': waitindex}
r = requests.get(url, params=params)
r.raise_for_status()
event = r.json()
waitindex = event['node']['modifiedIndex'] + 1
yield event
except Exception as exc:
LOG.error('connection failed: %s' % exc)
time.sleep(interval)
|
Produces an inifite stream of events from etcd regarding the given
URL.
|
https://github.com/larsks/kiwi/blob/8f591dd8bc0f8c8fbb2bc587eb7eb6e7092b4c8d/kiwi/addresswatcher.py#L12-L32
|
import logging
import requests
import time
import re
import defaults
LOG = logging.getLogger(__name__)
re_address = re.compile('\d+\.\d+\.\d+\.\d+')
|
Apache License 2.0
|
uber-archive/plato-research-dialogue-system
|
plato/agent/component/dialogue_policy/deep_learning/reinforce_policy.py
|
ReinforcePolicy.softmax
|
python
|
def softmax(x):
e_x = np.exp(x - np.max(x))
out = e_x / e_x.sum()
return out
|
Calculates the softmax of x
:param x: a number
:return: the softmax of the number
|
https://github.com/uber-archive/plato-research-dialogue-system/blob/1db30be390df6903be89fdf5a515debc7d7defb4/plato/agent/component/dialogue_policy/deep_learning/reinforce_policy.py#L345-L354
|
__author__ = "Alexandros Papangelis"
from .. import dialogue_policy
from plato.agent.component.dialogue_policy.slot_filling_policy import HandcraftedPolicy
from plato.domain.ontology import Ontology
from plato.domain.database import DataBase
from plato.dialogue.action import DialogueAct, DialogueActItem, Operator
from plato.dialogue.state import SlotFillingDialogueState
from plato.agent.component.user_simulator. agenda_based_user_simulator.agenda_based_us import AgendaBasedUS
from copy import deepcopy
import numpy as np
import random
import os
import pickle
class ReinforcePolicy(dialogue_policy.DialoguePolicy):
def __init__(self, args):
super(ReinforcePolicy, self).__init__()
self.ontology = None
if 'ontology' in args:
ontology = args['ontology']
if isinstance(ontology, Ontology):
self.ontology = ontology
else:
raise ValueError('ReinforcePolicy Unacceptable '
'ontology type %s ' % ontology)
else:
raise ValueError('ReinforcePolicy: No ontology provided')
self.database = None
if 'database' in args:
database = args['database']
if isinstance(database, DataBase):
self.database = database
else:
raise ValueError('ReinforcePolicy: Unacceptable '
'database type %s ' % database)
else:
raise ValueError('ReinforcePolicy: No database provided')
self.agent_id = args['agent_id'] if 'agent_id' in args else 0
self.agent_role = args['agent_role'] if 'agent_role' in args else 'system'
domain = args['domain'] if 'domain' in args else None
self.alpha = args['alpha'] if 'alpha' in args else 0.2
self.gamma = args['gamma'] if 'gamma' in args else 0.95
self.epsilon = args['epsilon'] if 'epsilon' in args else 0.95
self.alpha_decay_rate = args['alpha_decay'] if 'alpha_decay' in args else 0.995
self.exploration_decay_rate = args['epsilon_decay'] if 'epsilon_decay' in args else 0.9995
self.IS_GREEDY = False
self.policy_path = None
self.weights = None
self.sess = None
self.warmup_policy = None
self.warmup_simulator = None
if self.agent_role == 'system':
self.warmup_policy = HandcraftedPolicy({
'ontology': self.ontology})
elif self.agent_role == 'user':
usim_args = dict(
zip(['ontology', 'database'],
[self.ontology, self.database]))
self.warmup_simulator = AgendaBasedUS(usim_args)
self.tf_scope = "policy_" + self.agent_role + '_' + str(self.agent_id)
self.is_training = True
self.informable_slots = deepcopy(list(self.ontology.ontology['informable'].keys()))
self.requestable_slots = deepcopy(self.ontology.ontology['requestable'])
self.system_requestable_slots = deepcopy(self.ontology.ontology['system_requestable'])
if not domain:
self.NStateFeatures = 56
self.dstc2_acts = ['inform', 'offer', 'request', 'canthelp',
'affirm', 'negate', 'deny', 'ack', 'thankyou',
'bye', 'reqmore', 'hello', 'welcomemsg',
'expl-conf', 'select', 'repeat', 'reqalts',
'confirm-domain', 'confirm']
else:
if domain in ['CamRest', 'SFH', 'SlotFilling']:
d_state = SlotFillingDialogueState(
{'slots': self.system_requestable_slots})
if domain == 'CamRest':
self.dstc2_acts_sys = ['offer', 'canthelp', 'affirm',
'deny', 'ack', 'bye',
'reqmore', 'welcomemsg',
'expl-conf', 'select', 'repeat',
'confirm-domain', 'confirm']
self.dstc2_acts_usr = ['affirm', 'negate', 'deny', 'ack',
'thankyou', 'bye',
'reqmore', 'hello', 'expl-conf',
'repeat', 'reqalts', 'restart',
'confirm']
else:
print('Warning! domain has not been defined. Using '
'Slot-Filling dialogue State')
d_state = SlotFillingDialogueState({'slots': self.informable_slots})
d_state.initialize()
self.NStateFeatures = len(self.encode_state(d_state))
print('Reinforce policy {0} automatically determined '
'number of state features: {1}'
.format(self.agent_role, self.NStateFeatures))
if domain == 'CamRest' and self.dstc2_acts_sys:
if self.agent_role == 'system':
self.NActions = len(self.dstc2_acts_sys) + len(self.requestable_slots) + len(self.system_requestable_slots)
self.NOtherActions = len(self.dstc2_acts_usr) + 2 * len(self.requestable_slots)
elif self.agent_role == 'user':
self.NActions = len(self.dstc2_acts_usr) + 2 * len(self.requestable_slots)
self.NOtherActions = len(self.dstc2_acts_sys) + len(self.requestable_slots) + len(self.system_requestable_slots)
else:
if self.agent_role == 'system':
self.NActions = 3 + len(self.system_requestable_slots) + len(self.requestable_slots)
self.NOtherActions = 2 + len(self.requestable_slots) + len(self.requestable_slots)
elif self.agent_role == 'user':
self.NActions = 2 + len(self.requestable_slots) + len(self.requestable_slots)
self.NOtherActions = 3 + len(self.system_requestable_slots) + len(self.requestable_slots)
print('Reinforce {0} policy Number of Actions: {1}'
.format(self.agent_role, self.NActions))
def initialize(self, args):
if 'is_training' in args:
self.is_training = bool(args['is_training'])
if self.agent_role == 'user' and self.warmup_simulator:
if 'goal' in args:
self.warmup_simulator.initialize({args['goal']})
else:
print('WARNING ! No goal provided for Reinforce policy '
'user simulator @ initialize')
self.warmup_simulator.initialize({})
if 'policy_path' in args:
self.policy_path = args['policy_path']
if 'learning_rate' in args:
self.alpha = args['learning_rate']
if 'learning_decay_rate' in args:
self.alpha_decay_rate = args['learning_decay_rate']
if 'discount_factor' in args:
self.gamma = args['discount_factor']
if 'exploration_rate' in args:
self.alpha = args['exploration_rate']
if 'exploration_decay_rate' in args:
self.exploration_decay_rate = args['exploration_decay_rate']
if self.weights is None:
self.weights = np.random.rand(self.NStateFeatures, self.NActions)
def restart(self, args):
if self.agent_role == 'user' and self.warmup_simulator:
if 'goal' in args:
self.warmup_simulator.initialize(args)
else:
print('WARNING! No goal provided for Reinforce '
'policy user simulator @ restart')
self.warmup_simulator.initialize({})
def next_action(self, state):
if self.is_training and random.random() < self.epsilon:
if random.random() < 0.5:
print('--- {0}: Selecting warmup action.'
.format(self.agent_role))
if self.agent_role == 'system':
return self.warmup_policy.next_action(state)
else:
self.warmup_simulator.receive_input(
state.user_acts, state.user_goal)
return self.warmup_simulator.respond()
else:
print('--- {0}: Selecting random action.'
.format(self.agent_role))
return self.decode_action(
random.choice(
range(0, self.NActions)),
self.agent_role == "system")
probs = self.calculate_policy(self.encode_state(state))
if any(np.isnan(probs)):
print('WARNING! NAN detected in action probabilities! Selecting '
'random action.')
return self.decode_action(
random.choice(range(0, self.NActions)),
self.agent_role == "system")
if self.IS_GREEDY:
max_pi = max(probs)
maxima = [i for i, j in enumerate(probs) if j == max_pi]
if maxima:
sys_acts = self.decode_action(
random.choice(maxima), self.agent_role == 'system')
else:
print(
f'--- {self.agent_role}: Warning! No maximum value '
f'identified for policy. Selecting random action.')
return self.decode_action(
random.choice(
range(0, self.NActions)),
self.agent_role == 'system')
else:
top_3 = np.argsort(-probs)[0:2]
sys_acts = self.decode_action(
random.choices(
top_3, probs[top_3])[0], self.agent_role == 'system')
return sys_acts
@staticmethod
|
Apache License 2.0
|
pykeen/pykeen
|
src/pykeen/triples/triples_factory.py
|
TriplesFactory.entity_word_cloud
|
python
|
def entity_word_cloud(self, top: Optional[int] = None):
return self._word_cloud(
ids=self.mapped_triples[:, [0, 2]],
id_to_label=self.entity_labeling.id_to_label,
top=top or 100,
)
|
Make a word cloud based on the frequency of occurrence of each entity in a Jupyter notebook.
:param top: The number of top entities to show. Defaults to 100.
.. warning::
This function requires the ``word_cloud`` package. Use ``pip install pykeen[plotting]`` to
install it automatically, or install it yourself with
``pip install git+https://github.com/kavgan/word_cloud.git``.
|
https://github.com/pykeen/pykeen/blob/7ee2a6f9bc43200d44c874bfc3de0a747bd3632d/src/pykeen/triples/triples_factory.py#L869-L884
|
import dataclasses
import itertools
import logging
import pathlib
import re
from typing import Any, Callable, Collection, Dict, List, Mapping, Optional, Sequence, Set, TextIO, Type, Union, cast
import numpy as np
import pandas as pd
import torch
from .instances import Instances, LCWAInstances, SLCWAInstances
from .splitting import split
from .utils import TRIPLES_DF_COLUMNS, get_entities, get_relations, load_triples, tensor_to_df
from ..typing import EntityMapping, LabeledTriples, MappedTriples, RelationMapping, TorchRandomHint
from ..utils import compact_mapping, format_relative_comparison, invert_mapping, torch_is_in_1d
__all__ = [
"CoreTriplesFactory",
"TriplesFactory",
"create_entity_mapping",
"create_relation_mapping",
"INVERSE_SUFFIX",
"cat_triples",
"splits_steps",
"splits_similarity",
]
logger = logging.getLogger(__name__)
INVERSE_SUFFIX = "_inverse"
def create_entity_mapping(triples: LabeledTriples) -> EntityMapping:
heads, tails = triples[:, 0], triples[:, 2]
entity_labels = sorted(set(heads).union(tails))
return {str(label): i for (i, label) in enumerate(entity_labels)}
def create_relation_mapping(relations: set) -> RelationMapping:
relation_labels = sorted(
set(relations),
key=lambda x: (re.sub(f"{INVERSE_SUFFIX}$", "", x), x.endswith(f"{INVERSE_SUFFIX}")),
)
return {str(label): i for (i, label) in enumerate(relation_labels)}
def _map_triples_elements_to_ids(
triples: LabeledTriples,
entity_to_id: EntityMapping,
relation_to_id: RelationMapping,
) -> MappedTriples:
if triples.size == 0:
logger.warning("Provided empty triples to map.")
return torch.empty(0, 3, dtype=torch.long)
entity_getter = np.vectorize(entity_to_id.get)
head_column = entity_getter(triples[:, 0:1], [-1])
tail_column = entity_getter(triples[:, 2:3], [-1])
relation_getter = np.vectorize(relation_to_id.get)
relation_column = relation_getter(triples[:, 1:2], [-1])
head_filter = head_column < 0
relation_filter = relation_column < 0
tail_filter = tail_column < 0
num_no_head = head_filter.sum()
num_no_relation = relation_filter.sum()
num_no_tail = tail_filter.sum()
if (num_no_head > 0) or (num_no_relation > 0) or (num_no_tail > 0):
logger.warning(
f"You're trying to map triples with {num_no_head + num_no_tail} entities and {num_no_relation} relations"
f" that are not in the training set. These triples will be excluded from the mapping.",
)
non_mappable_triples = head_filter | relation_filter | tail_filter
head_column = head_column[~non_mappable_triples, None]
relation_column = relation_column[~non_mappable_triples, None]
tail_column = tail_column[~non_mappable_triples, None]
logger.warning(
f"In total {non_mappable_triples.sum():.0f} from {triples.shape[0]:.0f} triples were filtered out",
)
triples_of_ids = np.concatenate([head_column, relation_column, tail_column], axis=1)
triples_of_ids = np.array(triples_of_ids, dtype=np.int64)
unique_mapped_triples = np.unique(ar=triples_of_ids, axis=0)
return torch.tensor(unique_mapped_triples, dtype=torch.long)
def _get_triple_mask(
ids: Collection[int],
triples: MappedTriples,
columns: Union[int, Collection[int]],
invert: bool = False,
max_id: Optional[int] = None,
) -> torch.BoolTensor:
triples = triples[:, columns]
if isinstance(columns, int):
columns = [columns]
mask = torch_is_in_1d(
query_tensor=triples,
test_tensor=ids,
max_id=max_id,
invert=invert,
)
if len(columns) > 1:
mask = mask.all(dim=-1)
return mask
def _ensure_ids(
labels_or_ids: Union[Collection[int], Collection[str]],
label_to_id: Mapping[str, int],
) -> Collection[int]:
return [label_to_id[l_or_i] if isinstance(l_or_i, str) else l_or_i for l_or_i in labels_or_ids]
@dataclasses.dataclass
class Labeling:
label_to_id: Mapping[str, int]
id_to_label: Mapping[int, str] = dataclasses.field(init=False)
_vectorized_mapper: Callable[..., np.ndarray] = dataclasses.field(init=False)
_vectorized_labeler: Callable[..., np.ndarray] = dataclasses.field(init=False)
def __post_init__(self):
self.id_to_label = invert_mapping(mapping=self.label_to_id)
self._vectorized_mapper = np.vectorize(self.label_to_id.get, otypes=[int])
self._vectorized_labeler = np.vectorize(self.id_to_label.get, otypes=[str])
def label(
self,
ids: Union[int, Sequence[int], np.ndarray, torch.LongTensor],
unknown_label: str = "unknown",
) -> np.ndarray:
if isinstance(ids, torch.Tensor):
ids = ids.cpu().numpy()
if isinstance(ids, int):
ids = [ids]
ids = np.asanyarray(ids)
return self._vectorized_labeler(ids, (unknown_label,))
@dataclasses.dataclass
class CoreTriplesFactory:
def __init__(
self,
mapped_triples: MappedTriples,
num_entities: int,
num_relations: int,
entity_ids: Collection[int],
relation_ids: Collection[int],
create_inverse_triples: bool = False,
metadata: Optional[Mapping[str, Any]] = None,
):
super().__init__()
self.mapped_triples = mapped_triples
self._num_entities = num_entities
self._num_relations = num_relations
self.entity_ids = entity_ids
self.relation_ids = relation_ids
self.create_inverse_triples = create_inverse_triples
if metadata is None:
metadata = dict()
self.metadata = metadata
@classmethod
def create(
cls,
mapped_triples: MappedTriples,
num_entities: Optional[int] = None,
num_relations: Optional[int] = None,
entity_ids: Collection[int] = None,
relation_ids: Collection[int] = None,
create_inverse_triples: bool = False,
metadata: Optional[Mapping[str, Any]] = None,
) -> "CoreTriplesFactory":
if num_entities is None:
num_entities = mapped_triples[:, [0, 2]].max().item() + 1
if num_relations is None:
num_relations = mapped_triples[:, 1].max().item() + 1
if entity_ids is None:
entity_ids = get_entities(mapped_triples)
if relation_ids is None:
relation_ids = get_relations(mapped_triples)
return CoreTriplesFactory(
mapped_triples=mapped_triples,
num_entities=num_entities,
num_relations=num_relations,
entity_ids=entity_ids,
relation_ids=relation_ids,
create_inverse_triples=create_inverse_triples,
metadata=metadata,
)
@property
def num_entities(self) -> int:
return self._num_entities
@property
def num_relations(self) -> int:
if self.create_inverse_triples:
return 2 * self.real_num_relations
return self.real_num_relations
@property
def real_num_relations(self) -> int:
return self._num_relations
@property
def num_triples(self) -> int:
return self.mapped_triples.shape[0]
def extra_repr(self) -> str:
d = [
("num_entities", self.num_entities),
("num_relations", self.num_relations),
("num_triples", self.num_triples),
("inverse_triples", self.create_inverse_triples),
]
d.extend(sorted(self.metadata.items()))
return ", ".join(f'{k}="{v}"' if isinstance(v, (str, pathlib.Path)) else f"{k}={v}" for k, v in d)
def __repr__(self):
return f"{self.__class__.__name__}({self.extra_repr()})"
def with_labels(
self,
entity_to_id: Mapping[str, int],
relation_to_id: Mapping[str, int],
) -> "TriplesFactory":
for name, columns, new_labeling in (
("entity", [0, 2], entity_to_id),
("relation", 1, relation_to_id),
):
existing_ids = set(self.mapped_triples[:, columns].unique().tolist())
if not existing_ids.issubset(new_labeling.values()):
diff = existing_ids.difference(new_labeling.values())
raise ValueError(f"Some existing IDs do not occur in the new {name} labeling: {diff}")
return TriplesFactory(
mapped_triples=self.mapped_triples,
entity_to_id=entity_to_id,
relation_to_id=relation_to_id,
create_inverse_triples=self.create_inverse_triples,
metadata=self.metadata,
)
def get_inverse_relation_id(self, relation: int) -> int:
if not self.create_inverse_triples:
raise ValueError("Can not get inverse triple, they have not been created.")
return self._get_inverse_relation_id(relation)
@staticmethod
def _get_inverse_relation_id(relation_id: Union[int, torch.LongTensor]) -> Union[int, torch.LongTensor]:
return relation_id + 1
def _add_inverse_triples_if_necessary(self, mapped_triples: MappedTriples) -> MappedTriples:
if not self.create_inverse_triples:
return mapped_triples
logger.info("Creating inverse triples.")
h, r, t = mapped_triples.t()
r = 2 * r
return torch.cat(
[
torch.stack([h, r, t], dim=-1),
torch.stack([t, self._get_inverse_relation_id(r), h], dim=-1),
]
)
def create_slcwa_instances(self) -> Instances:
return self._create_instances(SLCWAInstances)
def create_lcwa_instances(self, use_tqdm: Optional[bool] = None, target: Optional[int] = None) -> Instances:
return self._create_instances(LCWAInstances, target=target)
def _create_instances(self, instances_cls: Type[Instances], **kwargs) -> Instances:
return instances_cls.from_triples(
mapped_triples=self._add_inverse_triples_if_necessary(mapped_triples=self.mapped_triples),
num_entities=self.num_entities,
num_relations=self.num_relations,
**kwargs,
)
def get_most_frequent_relations(self, n: Union[int, float]) -> Set[int]:
logger.info(f"applying cutoff of {n} to {self}")
if isinstance(n, float):
assert 0 < n < 1
n = int(self.num_relations * n)
elif not isinstance(n, int):
raise TypeError("n must be either an integer or a float")
uniq, counts = self.mapped_triples[:, 1].unique(return_counts=True)
top_counts, top_ids = counts.topk(k=n, largest=True)
return set(uniq[top_ids].tolist())
def clone_and_exchange_triples(
self,
mapped_triples: MappedTriples,
extra_metadata: Optional[Dict[str, Any]] = None,
keep_metadata: bool = True,
create_inverse_triples: Optional[bool] = None,
) -> "CoreTriplesFactory":
if create_inverse_triples is None:
create_inverse_triples = self.create_inverse_triples
return CoreTriplesFactory(
mapped_triples=mapped_triples,
num_entities=self.num_entities,
num_relations=self.real_num_relations,
entity_ids=self.entity_ids,
relation_ids=self.relation_ids,
create_inverse_triples=create_inverse_triples,
metadata={
**(extra_metadata or {}),
**(self.metadata if keep_metadata else {}),
},
)
def split(
self,
ratios: Union[float, Sequence[float]] = 0.8,
*,
random_state: TorchRandomHint = None,
randomize_cleanup: bool = False,
method: Optional[str] = None,
) -> List["CoreTriplesFactory"]:
return [
self.clone_and_exchange_triples(
mapped_triples=triples,
create_inverse_triples=None if i == 0 else False,
)
for i, triples in enumerate(
split(
mapped_triples=self.mapped_triples,
ratios=ratios,
random_state=random_state,
randomize_cleanup=randomize_cleanup,
method=method,
)
)
]
def get_mask_for_entities(
self,
entities: Union[Collection[int]],
invert: bool = False,
) -> torch.BoolTensor:
return _get_triple_mask(
ids=entities,
triples=self.mapped_triples,
columns=(0, 2),
invert=invert,
max_id=self.num_entities,
)
def get_mask_for_relations(
self,
relations: Collection[int],
invert: bool = False,
) -> torch.BoolTensor:
return _get_triple_mask(
ids=relations,
triples=self.mapped_triples,
columns=1,
invert=invert,
max_id=self.num_relations,
)
def tensor_to_df(
self,
tensor: torch.LongTensor,
**kwargs: Union[torch.Tensor, np.ndarray, Sequence],
) -> pd.DataFrame:
return tensor_to_df(tensor=tensor, **kwargs)
def new_with_restriction(
self,
entities: Union[None, Collection[int], Collection[str]] = None,
relations: Union[None, Collection[int], Collection[str]] = None,
invert_entity_selection: bool = False,
invert_relation_selection: bool = False,
) -> "CoreTriplesFactory":
keep_mask: Optional[torch.BoolTensor] = None
extra_metadata = {}
if entities is not None:
if any(isinstance(e, str) for e in entities):
raise ValueError(f"{self.__class__} does not support label-based restriction.")
entities = cast(Collection[int], entities)
extra_metadata["entity_restriction"] = entities
keep_mask = self.get_mask_for_entities(entities=entities, invert=invert_entity_selection)
remaining_entities = self.num_entities - len(entities) if invert_entity_selection else len(entities)
logger.info(f"keeping {format_relative_comparison(remaining_entities, self.num_entities)} entities.")
if relations is not None:
if any(isinstance(r, str) for r in relations):
raise ValueError(f"{self.__class__} does not support label-based restriction.")
relations = cast(Collection[int], relations)
extra_metadata["relation_restriction"] = relations
relation_mask = self.get_mask_for_relations(relations=relations, invert=invert_relation_selection)
remaining_relations = self.num_relations - len(relations) if invert_entity_selection else len(relations)
logger.info(f"keeping {format_relative_comparison(remaining_relations, self.num_relations)} relations.")
keep_mask = relation_mask if keep_mask is None else keep_mask & relation_mask
if keep_mask is None:
return self
num_triples = keep_mask.sum().item()
logger.info(f"keeping {format_relative_comparison(num_triples, self.num_triples)} triples.")
return self.clone_and_exchange_triples(
mapped_triples=self.mapped_triples[keep_mask],
extra_metadata=extra_metadata,
)
class TriplesFactory(CoreTriplesFactory):
def __init__(
self,
mapped_triples: MappedTriples,
entity_to_id: EntityMapping,
relation_to_id: RelationMapping,
create_inverse_triples: bool = False,
metadata: Optional[Mapping[str, Any]] = None,
):
super().__init__(
mapped_triples=mapped_triples,
num_entities=len(entity_to_id),
num_relations=len(relation_to_id),
entity_ids=sorted(entity_to_id.values()),
relation_ids=sorted(relation_to_id.values()),
create_inverse_triples=create_inverse_triples,
metadata=metadata,
)
self.entity_labeling = Labeling(label_to_id=entity_to_id)
self.relation_labeling = Labeling(label_to_id=relation_to_id)
@classmethod
def from_labeled_triples(
cls,
triples: LabeledTriples,
create_inverse_triples: bool = False,
entity_to_id: Optional[EntityMapping] = None,
relation_to_id: Optional[RelationMapping] = None,
compact_id: bool = True,
filter_out_candidate_inverse_relations: bool = True,
metadata: Optional[Dict[str, Any]] = None,
) -> "TriplesFactory":
if filter_out_candidate_inverse_relations:
unique_relations, inverse = np.unique(triples[:, 1], return_inverse=True)
suspected_to_be_inverse_relations = {r for r in unique_relations if r.endswith(INVERSE_SUFFIX)}
if len(suspected_to_be_inverse_relations) > 0:
logger.warning(
f"Some triples already have the inverse relation suffix {INVERSE_SUFFIX}. "
f"Re-creating inverse triples to ensure consistency. You may disable this behaviour by passing "
f"filter_out_candidate_inverse_relations=False",
)
relation_ids_to_remove = [
i for i, r in enumerate(unique_relations.tolist()) if r in suspected_to_be_inverse_relations
]
mask = np.isin(element=inverse, test_elements=relation_ids_to_remove, invert=True)
logger.info(f"keeping {mask.sum() / mask.shape[0]} triples.")
triples = triples[mask]
if entity_to_id is None:
entity_to_id = create_entity_mapping(triples=triples)
if compact_id:
entity_to_id = compact_mapping(mapping=entity_to_id)[0]
if relation_to_id is None:
relation_to_id = create_relation_mapping(triples[:, 1])
if compact_id:
relation_to_id = compact_mapping(mapping=relation_to_id)[0]
mapped_triples = _map_triples_elements_to_ids(
triples=triples,
entity_to_id=entity_to_id,
relation_to_id=relation_to_id,
)
return cls(
entity_to_id=entity_to_id,
relation_to_id=relation_to_id,
mapped_triples=mapped_triples,
create_inverse_triples=create_inverse_triples,
metadata=metadata,
)
@classmethod
def from_path(
cls,
path: Union[str, pathlib.Path, TextIO],
create_inverse_triples: bool = False,
entity_to_id: Optional[EntityMapping] = None,
relation_to_id: Optional[RelationMapping] = None,
compact_id: bool = True,
metadata: Optional[Dict[str, Any]] = None,
load_triples_kwargs: Optional[Mapping[str, Any]] = None,
) -> "TriplesFactory":
path = normalize_path(path)
triples = load_triples(path, **(load_triples_kwargs or {}))
return cls.from_labeled_triples(
triples=triples,
create_inverse_triples=create_inverse_triples,
entity_to_id=entity_to_id,
relation_to_id=relation_to_id,
compact_id=compact_id,
metadata={
"path": path,
**(metadata or {}),
},
)
def clone_and_exchange_triples(
self,
mapped_triples: MappedTriples,
extra_metadata: Optional[Dict[str, Any]] = None,
keep_metadata: bool = True,
create_inverse_triples: Optional[bool] = None,
) -> "TriplesFactory":
if create_inverse_triples is None:
create_inverse_triples = self.create_inverse_triples
return TriplesFactory(
entity_to_id=self.entity_to_id,
relation_to_id=self.relation_to_id,
mapped_triples=mapped_triples,
create_inverse_triples=create_inverse_triples,
metadata={
**(extra_metadata or {}),
**(self.metadata if keep_metadata else {}),
},
)
@property
def entity_to_id(self) -> Mapping[str, int]:
return self.entity_labeling.label_to_id
@property
def entity_id_to_label(self) -> Mapping[int, str]:
return self.entity_labeling.id_to_label
@property
def relation_to_id(self) -> Mapping[str, int]:
return self.relation_labeling.label_to_id
@property
def relation_id_to_label(self) -> Mapping[int, str]:
return self.relation_labeling.id_to_label
@property
def triples(self) -> np.ndarray:
logger.warning("Reconstructing all label-based triples. This is expensive and rarely needed.")
return self.label_triples(self.mapped_triples)
def get_inverse_relation_id(self, relation: Union[str, int]) -> int:
relation = next(iter(self.relations_to_ids(relations=[relation])))
return super().get_inverse_relation_id(relation=relation)
def label_triples(
self,
triples: MappedTriples,
unknown_entity_label: str = "[UNKNOWN]",
unknown_relation_label: Optional[str] = None,
) -> LabeledTriples:
if len(triples) == 0:
return np.empty(shape=(0, 3), dtype=str)
if unknown_relation_label is None:
unknown_relation_label = unknown_entity_label
return np.stack(
[
labeling.label(ids=column, unknown_label=unknown_label)
for (labeling, unknown_label), column in zip(
[
(self.entity_labeling, unknown_entity_label),
(self.relation_labeling, unknown_relation_label),
(self.entity_labeling, unknown_entity_label),
],
triples.t().numpy(),
)
],
axis=1,
)
def entities_to_ids(self, entities: Union[Collection[int], Collection[str]]) -> Collection[int]:
return _ensure_ids(labels_or_ids=entities, label_to_id=self.entity_labeling.label_to_id)
def get_mask_for_entities(
self,
entities: Union[Collection[int], Collection[str]],
invert: bool = False,
) -> torch.BoolTensor:
return super().get_mask_for_entities(entities=self.entities_to_ids(entities=entities))
def relations_to_ids(
self,
relations: Union[Collection[int], Collection[str]],
) -> Collection[int]:
return _ensure_ids(labels_or_ids=relations, label_to_id=self.relation_labeling.label_to_id)
def get_mask_for_relations(
self,
relations: Union[Collection[int], Collection[str]],
invert: bool = False,
) -> torch.BoolTensor:
return super().get_mask_for_relations(relations=self.relations_to_ids(relations=relations))
|
MIT License
|
mgear-dev/mgear_core
|
scripts/mgear/core/attribute.py
|
setInvertMirror
|
python
|
def setInvertMirror(node, invList=None):
aDic = {"tx": "invTx",
"ty": "invTy",
"tz": "invTz",
"rx": "invRx",
"ry": "invRy",
"rz": "invRz",
"sx": "invSx",
"sy": "invSy",
"sz": "invSz"}
for axis in invList:
if axis not in aDic:
mgear.log("Invalid Invert Axis : " + axis, mgear.sev_error)
return False
node.setAttr(aDic[axis], True)
|
Set invert mirror pose values
Arguments:
node (dagNode): The object to set invert mirror Values
|
https://github.com/mgear-dev/mgear_core/blob/bb450fda44ff79c57f5f73d5a58c97a6b5c5d848/scripts/mgear/core/attribute.py#L521-L544
|
import collections
import mgear
import pymel.core as pm
import maya.cmds as cmds
import pymel.core.datatypes as datatypes
def addAttribute(node,
longName,
attributeType,
value=None,
niceName=None,
shortName=None,
minValue=None,
maxValue=None,
keyable=True,
readable=True,
storable=True,
writable=True,
channelBox=False):
if node.hasAttr(longName):
mgear.log("Attribute already exists", mgear.sev_error)
return
data = {}
if shortName is not None:
data["shortName"] = shortName
if niceName is not None:
data["niceName"] = niceName
if attributeType == "string":
data["dataType"] = attributeType
else:
data["attributeType"] = attributeType
if minValue is not None and minValue is not False:
data["minValue"] = minValue
if maxValue is not None and maxValue is not False:
data["maxValue"] = maxValue
data["keyable"] = keyable
data["readable"] = readable
data["storable"] = storable
data["writable"] = writable
if value is not None and attributeType not in ["string"]:
data["defaultValue"] = value
node.addAttr(longName, **data)
if value is not None:
node.setAttr(longName, value)
if channelBox:
node.attr(longName).set(channelBox=True)
return node.attr(longName)
def addColorAttribute(node,
longName,
value=False,
keyable=True,
readable=True,
storable=True,
writable=True,
niceName=None,
shortName=None):
if node.hasAttr(longName):
mgear.log("Attribute already exists", mgear.sev_error)
return
data = {}
data["attributeType"] = "float3"
if shortName is not None:
data["shortName"] = shortName
if niceName is not None:
data["niceName"] = niceName
data["usedAsColor"] = True
data["keyable"] = keyable
data["readable"] = readable
data["storable"] = storable
data["writable"] = writable
dataChild = {}
dataChild["attributeType"] = 'float'
dataChild["parent"] = longName
node.addAttr(longName, **data)
node.addAttr(longName + "_r", **dataChild)
node.addAttr(longName + "_g", **dataChild)
node.addAttr(longName + "_b", **dataChild)
if value:
node.setAttr(longName + "_r", value[0])
node.setAttr(longName + "_g", value[1])
node.setAttr(longName + "_b", value[2])
return node.attr(longName)
def addEnumAttribute(node,
longName,
value,
enum,
niceName=None,
shortName=None,
keyable=True,
readable=True,
storable=True,
writable=True):
if node.hasAttr(longName):
mgear.log("Attribute '" + longName + "' already exists",
mgear.sev_warning)
return
data = {}
if shortName is not None:
data["shortName"] = shortName
if niceName is not None:
data["niceName"] = niceName
data["attributeType"] = "enum"
data["en"] = ":".join(enum)
data["keyable"] = keyable
data["readable"] = readable
data["storable"] = storable
data["writable"] = writable
node.addAttr(longName, **data)
node.setAttr(longName, value)
return node.attr(longName)
def addProxyAttribute(sourceAttrs, targets, duplicatedPolicy=None):
if not isinstance(targets, list):
targets = [targets]
if not isinstance(sourceAttrs, list):
sourceAttrs = [sourceAttrs]
for sourceAttr in sourceAttrs:
for target in targets:
attrName = sourceAttr.longName()
if target.hasAttr(sourceAttr.longName()):
if duplicatedPolicy == "index":
i = 0
while target.hasAttr(sourceAttr.longName() + str(i)):
i += 1
attrName = sourceAttr.longName() + str(i)
elif duplicatedPolicy == "fullName":
attrName = "{}_{}".format(sourceAttr.nodeName(),
sourceAttr.longName())
if not target.hasAttr(attrName):
target.addAttr(attrName, pxy=sourceAttr)
else:
pm.displayWarning(
"The proxy channel %s already exist on: %s."
% (sourceAttr.longName(), target.name()))
def moveChannel(attr, sourceNode, targetNode, duplicatedPolicy=None):
if isinstance(sourceNode, str) or isinstance(sourceNode, unicode):
sourceNode = pm.PyNode(sourceNode)
if isinstance(targetNode, str) or isinstance(targetNode, unicode):
targetNode = pm.PyNode(targetNode)
try:
at = sourceNode.attr(attr)
if pm.addAttr(at, q=True, usedAsProxy=True):
pm.displayWarning("{} is a proxy channel and move operation is "
"not yet supported.".format(attr))
return
except Exception:
pm.displayWarning("Looks like the {} is not in the"
" source: {}".format(attr, sourceNode.name()))
return
atType = at.type()
if atType in ["double", "float", "enum"]:
newAtt = None
attrName = attr
nName = pm.attributeQuery(
at.shortName(), node=at.node(), niceName=True)
if sourceNode.name() != targetNode.name():
if pm.attributeQuery(attr, node=targetNode, exists=True):
if duplicatedPolicy == "index":
i = 0
while targetNode.hasAttr(attr + str(i)):
i += 1
attrName = attr + str(i)
elif duplicatedPolicy == "fullName":
attrName = "{}_{}".format(sourceNode.name(), attr)
elif duplicatedPolicy == "merge":
newAtt = pm.PyNode(".".join([targetNode.name(), attr]))
else:
pm.displayWarning("Duplicated channel policy, is not "
"defined. Move channel operation will "
"fail if the channel already exist on "
"the target.")
return False
outcnx = at.listConnections(p=True)
if not newAtt:
value = at.get()
if atType in ["double", "float"]:
kwargs = {}
min = at.getMin()
if min:
kwargs["min"] = min
max = at.getMax()
if max:
kwargs["max"] = max
elif atType == "enum":
en = at.getEnums()
oEn = collections.OrderedDict(sorted(en.items(),
key=lambda t: t[1]))
enStr = ":".join([n for n in oEn])
pm.deleteAttr(at)
if atType in ["double", "float"]:
pm.addAttr(targetNode,
ln=attrName,
niceName=nName,
at=atType,
dv=value,
k=True,
**kwargs)
elif atType == "enum":
pm.addAttr(targetNode,
ln=attrName,
niceName=nName,
at="enum",
en=enStr,
dv=value,
k=True)
newAtt = pm.PyNode(".".join([targetNode.name(), attrName]))
else:
pm.deleteAttr(at)
for cnx in outcnx:
try:
pm.connectAttr(newAtt, cnx, f=True)
except RuntimeError:
pm.displayError("There is a problem connecting the "
"channel %s maybe is already move? Please "
"check your configuration" % newAtt.name())
else:
pm.displayWarning("MoveChannel function can't handle an attribute "
"of type: %s. Only supported 'double' adn 'enum' "
"types." % atType)
def lockAttribute(node,
attributes=["tx", "ty", "tz",
"rx", "ry", "rz",
"sx", "sy", "sz",
"v"]):
_lockUnlockAttribute(node, attributes, lock=True, keyable=False)
def unlockAttribute(node,
attributes=["tx", "ty", "tz",
"rx", "ry", "rz",
"sx", "sy", "sz",
"v"]):
_lockUnlockAttribute(node, attributes, lock=False, keyable=True)
def _lockUnlockAttribute(node, attributes, lock, keyable):
if not isinstance(attributes, list):
attributes = [attributes]
for attr_name in attributes:
node.setAttr(attr_name, lock=lock, keyable=keyable)
def setKeyableAttributes(nodes,
params=["tx", "ty", "tz",
"ro", "rx", "ry", "rz",
"sx", "sy", "sz"]):
localParams = ["tx", "ty", "tz",
"ro", "rx", "ry", "rz",
"sx", "sy", "sz",
"v"]
if not isinstance(nodes, list):
nodes = [nodes]
for attr_name in params:
for node in nodes:
node.setAttr(attr_name, lock=False, keyable=True)
for attr_name in localParams:
if attr_name not in params:
for node in nodes:
node.setAttr(attr_name, lock=True, keyable=False)
def setNotKeyableAttributes(nodes,
attributes=["tx", "ty", "tz",
"ro", "rx", "ry", "rz",
"sx", "sy", "sz",
"v"]):
if not isinstance(nodes, list):
nodes = [nodes]
for attr_name in attributes:
for node in nodes:
node.setAttr(attr_name, lock=False, keyable=False, cb=True)
def setRotOrder(node, s="XYZ"):
a = ["XYZ", "YZX", "ZXY", "XZY", "YXZ", "ZYX"]
if s not in a:
mgear.log("Invalid Rotorder : " + s, mgear.sev_error)
return False
er = datatypes.EulerRotation([pm.getAttr(node + ".rx"),
pm.getAttr(node + ".ry"),
pm.getAttr(node + ".rz")],
unit="degrees")
er.reorderIt(s)
node.setAttr("ro", a.index(s))
node.setAttr("rotate", er.x, er.y, er.z)
|
MIT License
|
brainiak/brainiak
|
brainiak/utils/fmrisim_real_time_generator.py
|
generate_data
|
python
|
def generate_data(outputDir,
user_settings):
data_dict = default_settings.copy()
data_dict.update(user_settings)
os.system('mkdir -p %s' % outputDir)
logger.info('Load template of average voxel value')
ROI_A_file, ROI_B_file, template_path, noise_dict_file = _get_input_names(data_dict)
if isinstance(template_path, str):
template_nii = nibabel.load(template_path)
template = template_nii.get_data()
else:
template = template_path
dimensions = np.array(template.shape[0:3])
logger.info('Create binary mask and normalize the template range')
mask, template = sim.mask_brain(volume=template,
mask_self=True,
)
outFile = os.path.join(outputDir, 'mask.npy')
np.save(outFile, mask.astype(np.uint8))
logger.info('Loading noise parameters')
if type(noise_dict_file) is str:
with open(noise_dict_file, 'r') as f:
noise_dict = f.read()
else:
noise_dict = noise_dict_file.decode()
noise_dict = eval(noise_dict)
noise_dict['matched'] = 0
data_dict['noise_dict'] = noise_dict
logger.info('Generating noise')
temp_stimfunction = np.zeros((data_dict['numTRs'], 1))
noise = sim.generate_noise(dimensions=dimensions,
stimfunction_tr=temp_stimfunction,
tr_duration=int(data_dict['trDuration']),
template=template,
mask=mask,
noise_dict=noise_dict,
)
total_time = int(data_dict['numTRs'] * data_dict['trDuration'])
onsets_A = []
onsets_B = []
curr_time = data_dict['burn_in']
while curr_time < (total_time - data_dict['event_duration']):
if np.random.randint(0, 2) == 1:
onsets_A.append(curr_time)
else:
onsets_B.append(curr_time)
curr_time += data_dict['event_duration'] + data_dict['isi']
temporal_res = 1 / data_dict['trDuration']
event_durations = [data_dict['event_duration']]
stimfunc_A = sim.generate_stimfunction(onsets=onsets_A,
event_durations=event_durations,
total_time=total_time,
temporal_resolution=temporal_res,
)
stimfunc_B = sim.generate_stimfunction(onsets=onsets_B,
event_durations=event_durations,
total_time=total_time,
temporal_resolution=temporal_res,
)
outFile = os.path.join(outputDir, 'labels.npy')
np.save(outFile, (stimfunc_A + (stimfunc_B * 2)))
signal_A = _generate_ROIs(ROI_A_file,
stimfunc_A,
noise,
data_dict['scale_percentage'],
data_dict)
if data_dict['different_ROIs'] is True:
signal_B = _generate_ROIs(ROI_B_file,
stimfunc_B,
noise,
data_dict['scale_percentage'],
data_dict)
else:
if data_dict['multivariate_pattern'] is False:
signal_B = _generate_ROIs(ROI_A_file,
stimfunc_B,
noise,
data_dict['scale_percentage'] * 0.5,
data_dict)
else:
signal_B = _generate_ROIs(ROI_A_file,
stimfunc_B,
noise,
data_dict['scale_percentage'],
data_dict)
signal = signal_A + signal_B
logger.info('Generating TRs in real time')
for idx in range(data_dict['numTRs']):
brain = noise[:, :, :, idx] + signal[:, :, :, idx]
brain_int32 = brain.astype(np.int32)
if data_dict['save_dicom'] is True:
output_file = os.path.join(outputDir, 'rt_' + format(idx, '03d')
+ '.dcm')
_write_dicom(output_file, brain_int32, idx+1)
else:
output_file = os.path.join(outputDir, 'rt_' + format(idx, '03d')
+ '.npy')
np.save(output_file, brain_int32)
logger.info("Generate {}".format(output_file))
if data_dict['save_realtime'] == 1:
time.sleep(data_dict['trDuration'])
|
Generate simulated fMRI data
Use a few parameters that might be relevant for real time analysis
Parameters
----------
outputDir : str
Specify output data dir where the data should be saved
user_settings : dict
A dictionary to specify the parameters used for making data,
specifying the following keys
numTRs - int - Specify the number of time points
multivariate_patterns - bool - Is the difference between conditions
univariate (0) or multivariate (1)
different_ROIs - bool - Are there different ROIs for each condition (
1) or is it in the same ROI (0). If it is the same ROI and you are
using univariate differences, the second condition will have a
smaller evoked response than the other.
event_duration - int - How long, in seconds, is each event
scale_percentage - float - What is the percent signal change
trDuration - float - How many seconds per volume
save_dicom - bool - Save to data as a dicom (1) or numpy (0)
save_realtime - bool - Do you want to save the data in real time (1)
or as fast as possible (0)?
isi - float - What is the time between each event (in seconds)
burn_in - int - How long before the first event (in seconds)
|
https://github.com/brainiak/brainiak/blob/ee093597c6c11597b0a59e95b48d2118e40394a5/brainiak/utils/fmrisim_real_time_generator.py#L338-L522
|
import os
import time
import argparse
import datetime
import nibabel
import numpy as np
import pydicom as dicom
from brainiak.utils import fmrisim as sim
import logging
from pkg_resources import resource_stream
from nibabel.nifti1 import Nifti1Image
import gzip
__all__ = ["generate_data"]
logger = logging.getLogger(__name__)
script_datetime = datetime.datetime.now()
default_settings = {
'ROI_A_file': None,
'ROI_B_file': None,
'template_path': None,
'noise_dict_file': None,
'numTRs': 200,
'trDuration': 2,
'isi': 6,
'burn_in': 6,
'event_duration': 10,
'scale_percentage': 0.5,
'multivariate_pattern': False,
'different_ROIs': False,
'save_dicom': False,
'save_realtime': False,
}
def _generate_ROIs(ROI_file,
stimfunc,
noise,
scale_percentage,
data_dict):
if isinstance(ROI_file, str):
logger.info('Loading', ROI_file)
nii = nibabel.load(ROI_file)
ROI = nii.get_data()
else:
ROI = ROI_file
idx_list = np.where(ROI == 1)
idxs = np.zeros([len(idx_list[0]), 3])
for idx_counter in list(range(0, len(idx_list[0]))):
idxs[idx_counter, 0] = int(idx_list[0][idx_counter])
idxs[idx_counter, 1] = int(idx_list[1][idx_counter])
idxs[idx_counter, 2] = int(idx_list[2][idx_counter])
idxs = idxs.astype('int8')
voxels = int(ROI.sum())
if data_dict['multivariate_pattern'] is True:
pattern = np.random.rand(voxels).reshape((voxels, 1))
else:
pattern = np.tile(1, voxels).reshape((voxels, 1))
weights = np.tile(stimfunc, voxels) * pattern.T
temporal_res = 1 / data_dict['trDuration']
signal_func = sim.convolve_hrf(stimfunction=weights,
tr_duration=data_dict['trDuration'],
temporal_resolution=temporal_res,
scale_function=1,
)
noise = noise.astype('double')
noise_function = noise[idxs[:, 0], idxs[:, 1], idxs[:, 2], :].T
sf_scaled = sim.compute_signal_change(signal_function=signal_func,
noise_function=noise_function,
noise_dict=data_dict['noise_dict'],
magnitude=[scale_percentage],
method='PSC',
)
signal = sim.apply_signal(sf_scaled,
ROI,
)
return signal
def _write_dicom(output_name,
data,
image_number=0):
dataInts = data.astype(np.int16)
file_meta = dicom.Dataset()
file_meta.MediaStorageSOPClassUID = '1.2'
file_meta.MediaStorageSOPInstanceUID = "1.2.3"
file_meta.ImplementationClassUID = "1.2.3.4"
file_meta.TransferSyntaxUID = '1.2.840.10008.1.2'
ds = dicom.FileDataset(output_name,
{},
file_meta=file_meta,
preamble=b"\0" * 128)
frames, rows, cols = dataInts.shape
ds.Rows = rows
ds.Columns = cols
ds.NumberOfFrames = frames
ds.SamplesPerPixel = 1
ds.BitsAllocated = 16
ds.BitsStored = 16
ds.PixelRepresentation = 0
ds.InstanceNumber = image_number
ds.ImagePositionPatient = [0, 0, 0]
ds.ImageOrientationPatient = [.01, 0, 0, 0, 0, 0]
ds.PhotometricInterpretation = 'MONOCHROME1'
ds.PatientName = "sim"
ds.PatientID = "sim"
ds.is_little_endian = True
ds.is_implicit_VR = True
image_datetime = script_datetime + datetime.timedelta(seconds=image_number)
timeStr = image_datetime.strftime('%H%M%S')
ds.ContentDate = image_datetime.strftime('%Y%m%d')
ds.ContentTime = timeStr
ds.PixelData = dataInts.tobytes()
ds.save_as(output_name)
def _get_input_names(data_dict):
if data_dict.get('ROI_A_file') is None:
vol = resource_stream(__name__, "sim_parameters/ROI_A.nii.gz").read()
ROI_A_file = Nifti1Image.from_bytes(gzip.decompress(vol)).get_data()
else:
ROI_A_file = data_dict['ROI_A_file']
if data_dict.get('ROI_B_file') is None:
vol = resource_stream(__name__, "sim_parameters/ROI_B.nii.gz").read()
ROI_B_file = Nifti1Image.from_bytes(gzip.decompress(vol)).get_data()
else:
ROI_B_file = data_dict['ROI_B_file']
if data_dict.get('template_path') is None:
vol = resource_stream(__name__,
"sim_parameters/sub_template.nii.gz").read()
template_path = Nifti1Image.from_bytes(gzip.decompress(vol)).get_data()
else:
template_path = data_dict['template_path']
if data_dict.get('noise_dict_file') is None:
file = resource_stream(__name__,
'sim_parameters/sub_noise_dict.txt').read()
noise_dict_file = file
else:
noise_dict_file = data_dict['noise_dict_file']
return ROI_A_file, ROI_B_file, template_path, noise_dict_file
|
Apache License 2.0
|
harrystech/arthur-redshift-etl
|
python/etl/extract/extractor.py
|
Extractor.source_info
|
python
|
def source_info(source: DataWarehouseSchema, relation: RelationDescription) -> Dict:
return {
"name": relation.source_name,
"schema": relation.source_table_name.schema,
"table": relation.source_table_name.table,
}
|
Return info for the job monitor that says from where the data is extracted.
Defaults to the relation's idea of the source but may be overridden by child classes.
|
https://github.com/harrystech/arthur-redshift-etl/blob/126b93e998b5b393597e333e4f6eb11f531809df/python/etl/extract/extractor.py#L68-L78
|
import concurrent.futures
import logging
import random
import time
from itertools import groupby
from operator import attrgetter
from typing import Dict, List, Set
import etl.config
import etl.db
import etl.file_sets
import etl.monitor
import etl.s3
from etl.config.dw import DataWarehouseSchema
from etl.errors import DataExtractError, ETLRuntimeError, MissingCsvFilesError
from etl.relation import RelationDescription
from etl.text import join_with_single_quotes
from etl.timer import Timer
from etl.util.retry import call_with_retry
class Extractor:
def __init__(
self,
name: str,
schemas: Dict[str, DataWarehouseSchema],
relations: List[RelationDescription],
keep_going: bool,
needs_to_wait: bool,
dry_run: bool,
) -> None:
self.name = name
self.schemas = schemas
self.relations = relations
self.keep_going = keep_going
self.needs_to_wait = needs_to_wait
self.dry_run = dry_run
self.logger = logging.getLogger(__name__)
self.failed_sources: Set[str] = set()
def options_info(self) -> List[str]:
return ["with-{0.name}-extractor".format(self)]
@staticmethod
|
MIT License
|
jeetshetty/greenpithumb_frontend
|
server/images.py
|
Indexer.index
|
python
|
def index(self):
file_index = []
for filename in _files_in_directory(self._images_path):
try:
file_index.append(_filename_to_index_entry(filename))
except ValueError:
pass
return file_index
|
Generates an index of the GreenPiThumb image files.
Creates an index of all the GreenPiThumb image files in the specified
images path. If there are non-image files in the directory, these are
ignored. Any GreenPiThumb image files in subdirectories are also
ignored.
Returns:
A list of dictionaries, one for each GreenPiThumb image file, where
the dictionary has keys 'timestamp' with the datetime when the file
was created and 'filename' of the image's filename (without path
prefix).
|
https://github.com/jeetshetty/greenpithumb_frontend/blob/4d2700ff7fbb22b26f697500badfbda1c0a13646/server/images.py#L38-L59
|
import datetime
import os
import pytz
_FILENAME_FORMAT = '%Y-%m-%dT%H%MZ.jpg'
def _files_in_directory(directory):
paths = os.listdir(directory)
return [f for f in paths if os.path.isfile(os.path.join(directory, f))]
def _timestamp_from_filename(filename):
return datetime.datetime.strptime(filename, _FILENAME_FORMAT).replace(
tzinfo=pytz.utc)
def _filename_to_index_entry(filename):
return {
'timestamp': _timestamp_from_filename(filename),
'filename': filename,
}
class Indexer(object):
def __init__(self, images_path):
self._images_path = images_path
|
Apache License 2.0
|
netket/netket
|
netket/graph/lattice.py
|
Lattice.vector_to_site
|
python
|
def vector_to_site(self, vector: CoordT) -> int:
return self.id_from_basis_coords([*vector, 0])
|
Deprecated. please use :code:`id_from_basis_coords([*vector, 0])` instead.
|
https://github.com/netket/netket/blob/74248a39e86bb501eaf6822e76107c4926321f80/netket/graph/lattice.py#L743-L748
|
from dataclasses import dataclass
from math import pi
from netket.utils.types import Array
from typing import Callable, Dict, Sequence, Tuple, Union, Optional, TYPE_CHECKING
import warnings
import igraph
import numpy as _np
from scipy.spatial import cKDTree
from scipy.sparse import find, triu
from netket.utils.deprecation import deprecated as _deprecated
from netket.utils import HashableArray
from netket.utils.float import comparable, comparable_periodic, is_approx_int
from netket.utils.group import PointGroup, PermutationGroup, trivial_point_group
from .graph import Graph
if TYPE_CHECKING:
from .space_group import SpaceGroupBuilder
PositionT = _np.ndarray
CoordT = _np.ndarray
class InvalidSiteError(Exception):
pass
class InvalidWaveVectorError(Exception):
pass
@dataclass
class LatticeSite:
id: int
position: PositionT
basis_coord: CoordT
def __repr__(self):
s = ", ".join(map(str, (self.id, self.basis_coord)))
return f"LatticeSite({s})"
def create_sites(
basis_vectors, extent, apositions, pbc
) -> Tuple[Sequence[LatticeSite], Sequence[bool], Dict[HashableArray, int]]:
shell_vec = _np.where(pbc, 2, 0)
shift_vec = _np.where(pbc, 1, 0)
shell_min = 0 - shift_vec
shell_max = _np.asarray(extent) + shell_vec - shift_vec
ranges = [slice(lo, hi) for lo, hi in zip(shell_min, shell_max)]
ranges += [slice(0, len(apositions))]
basis_coords = _np.vstack([_np.ravel(x) for x in _np.mgrid[ranges]]).T
site_coords = (
basis_coords[:, :-1]
+ _np.tile(apositions.T, reps=len(basis_coords) // len(apositions)).T
)
positions = site_coords @ basis_vectors
sites = []
coord_to_site = {}
for idx, (coord, pos) in enumerate(zip(basis_coords, positions)):
sites.append(
LatticeSite(
id=None,
basis_coord=coord,
position=pos,
),
)
coord_to_site[HashableArray(coord)] = idx
is_inside = ~(
_np.any(basis_coords[:, :-1] < 0, axis=1)
| _np.any(basis_coords[:, :-1] > (extent - 1), axis=1)
)
return sites, is_inside, coord_to_site
def get_edges(positions, cutoff):
kdtree = cKDTree(positions)
dist_matrix = kdtree.sparse_distance_matrix(kdtree, cutoff)
row, col, dst = find(triu(dist_matrix))
dst = comparable(dst)
_, ii = _np.unique(dst, return_inverse=True)
return sorted(list(zip(row[ii == 0], col[ii == 0])))
def get_true_edges(
basis_vectors: PositionT,
sites: Sequence[LatticeSite],
inside: Sequence[bool],
basis_coord_to_site,
extent,
distance_atol,
):
positions = _np.array([p.position for p in sites])
naive_edges = get_edges(
positions, _np.linalg.norm(basis_vectors, axis=1).max() + distance_atol
)
true_edges = set()
for node1, node2 in naive_edges:
site1, inside1 = sites[node1], inside[node1]
site2, inside2 = sites[node2], inside[node2]
if inside1 and inside2:
true_edges.add((node1, node2))
elif inside1 or inside2:
cell1 = site1.basis_coord
cell2 = site2.basis_coord
cell1[:-1] = cell1[:-1] % extent
cell2[:-1] = cell2[:-1] % extent
node1 = basis_coord_to_site[HashableArray(cell1)]
node2 = basis_coord_to_site[HashableArray(cell2)]
edge = (node1, node2)
if edge not in true_edges and (node2, node1) not in true_edges:
true_edges.add(edge)
return list(true_edges)
def deprecated(alternative):
def wrapper(fn):
msg = (
f"{fn.__name__} is deprecated and may be removed in the future. "
f"You can use `{alternative}`` instead."
)
f = _deprecated(msg)(fn)
return f
return wrapper
REPR_TEMPLATE = """Lattice(
n_nodes={},
extent={},
basis_vectors=
{},
site_offsets=
{},
)
"""
class Lattice(Graph):
def __init__(
self,
basis_vectors: _np.ndarray,
extent: _np.ndarray,
*,
pbc: Union[bool, Sequence[bool]] = True,
site_offsets: Optional[_np.ndarray] = None,
atoms_coord: Optional[_np.ndarray] = None,
distance_atol: float = 1e-5,
point_group: Optional[PointGroup] = None,
):
self._basis_vectors = self._clean_basis(basis_vectors)
self._ndim = self._basis_vectors.shape[1]
self._site_offsets, site_pos_fractional = self._clean_site_offsets(
site_offsets,
atoms_coord,
self._basis_vectors,
)
self._pbc = self._clean_pbc(pbc, self._ndim)
self._extent = _np.asarray(extent, dtype=int)
self._point_group = point_group
sites, inside, self._basis_coord_to_site = create_sites(
self._basis_vectors, self._extent, site_pos_fractional, self._pbc
)
edges = get_true_edges(
self._basis_vectors,
sites,
inside,
self._basis_coord_to_site,
self._extent,
distance_atol,
)
old_nodes = sorted(set(node for edge in edges for node in edge))
new_nodes = {old_node: new_node for new_node, old_node in enumerate(old_nodes)}
graph = igraph.Graph()
graph.add_vertices(len(old_nodes))
graph.add_edges([(new_nodes[edge[0]], new_nodes[edge[1]]) for edge in edges])
graph.simplify()
self._sites = []
for i, site in enumerate(sites[old_node] for old_node in old_nodes):
site.id = i
self._sites.append(site)
self._basis_coord_to_site = {
HashableArray(p.basis_coord): p.id for p in self._sites
}
self._positions = _np.array([p.position for p in self._sites])
self._basis_coords = _np.array([p.basis_coord for p in self._sites])
self._lattice_dims = _np.expand_dims(self._extent, 1) * self.basis_vectors
self._inv_dims = _np.linalg.inv(self._lattice_dims)
int_positions = self._to_integer_position(self._positions)
self._int_position_to_site = {
HashableArray(pos): index for index, pos in enumerate(int_positions)
}
super().__init__(list(graph.get_edgelist()), graph.vcount())
@staticmethod
def _clean_basis(basis_vectors):
basis_vectors = _np.asarray(basis_vectors)
if basis_vectors.ndim != 2:
raise ValueError(
"'basis_vectors' must have ndim==2 (as array of primtive vectors)"
)
if basis_vectors.shape[0] != basis_vectors.shape[1]:
raise ValueError("The number of primitive vectors must match their length")
return basis_vectors
@staticmethod
def _clean_site_offsets(site_offsets, atoms_coord, basis_vectors):
if atoms_coord is not None and site_offsets is not None:
raise ValueError(
"atoms_coord is deprecated and replaced by site_offsets, "
"so both cannot be specified at the same time."
)
if atoms_coord is not None:
warnings.warn(
"atoms_coord is deprecated and may be removed in future versions, "
"please use site_offsets instead",
FutureWarning,
)
site_offsets = atoms_coord
if site_offsets is None:
site_offsets = _np.zeros(basis_vectors.shape[0])[None, :]
site_offsets = _np.asarray(site_offsets)
fractional_coords = site_offsets @ _np.linalg.inv(basis_vectors)
fractional_coords_int = comparable_periodic(fractional_coords)
uniques, idx = _np.unique(fractional_coords_int, axis=0, return_index=True)
if len(site_offsets) != len(uniques):
site_offsets = site_offsets[idx]
fractional_coords = fractional_coords[idx]
fractional_coords_int = fractional_coords_int[idx]
warnings.warn(
"Some atom positions are not unique. Duplicates were dropped, and "
f"now atom positions are {site_offsets}",
UserWarning,
)
if _np.any(fractional_coords_int < comparable(0.0)) or _np.any(
fractional_coords_int > comparable(1.0)
):
warnings.warn(
"Some sites were specified outside the primitive unit cell. This may"
"cause errors in automatic edge finding.",
UserWarning,
)
return site_offsets, fractional_coords
@staticmethod
def _clean_pbc(pbc, ndim):
if isinstance(pbc, bool):
return _np.array([pbc] * ndim, dtype=bool)
elif (
not isinstance(pbc, Sequence)
or len(pbc) != ndim
or not all(isinstance(b, bool) for b in pbc)
):
raise ValueError(
"pbc must be either a boolean or a sequence of booleans with length"
"equal to the lattice dimenion"
)
else:
return _np.asarray(pbc, dtype=bool)
@property
def basis_vectors(self):
return self._basis_vectors
@property
def site_offsets(self):
return self._site_offsets
@property
def ndim(self):
return self._ndim
@property
def pbc(self):
return self._pbc
@property
def extent(self):
return self._extent
@property
def sites(self) -> Sequence[LatticeSite]:
return self._sites
@property
def positions(self) -> PositionT:
return self._positions
@property
def basis_coords(self) -> CoordT:
return self._basis_coords
def _to_integer_position(self, positions: PositionT) -> Array:
frac_positions = _np.matmul(positions, self._inv_dims)
return comparable_periodic(frac_positions, self.pbc)
@staticmethod
def _get_id_from_dict(
dict: Dict[HashableArray, int], key: Array
) -> Union[int, Array]:
try:
if key.ndim == 1:
return dict[HashableArray(key)]
elif key.ndim == 2:
return _np.array([dict[HashableArray(k)] for k in key])
else:
raise ValueError("Input needs to be rank 1 or rank 2 array")
except KeyError as e:
raise InvalidSiteError(
"Some coordinates do not correspond to a valid lattice site"
) from e
def id_from_position(self, position: PositionT) -> Union[int, Array]:
int_pos = self._to_integer_position(position)
ids = self._get_id_from_dict(self._int_position_to_site, int_pos)
return ids
def id_from_basis_coords(self, basis_coords: CoordT) -> Union[int, Array]:
key = _np.asarray(basis_coords)
return self._get_id_from_dict(self._basis_coord_to_site, key)
def position_from_basis_coords(self, basis_coords: CoordT) -> PositionT:
ids = self.id_from_basis_coords(basis_coords)
return self.positions[ids]
def to_reciprocal_lattice(self, ks: Array) -> Array:
ks = _np.asarray(ks)
if ks.ndim == 1:
ks = ks[_np.newaxis, :]
result = ks @ self._lattice_dims.T / (2 * pi)
is_valid = is_approx_int(result)
if not _np.all(is_valid):
raise InvalidWaveVectorError(
"Some wave vectors are not reciprocal lattice vectors of the simulation"
"box spanned by\n"
+ "\n".join(
[
str(self._lattice_dims[i])
+ (" (PBC)" if self.pbc[i] else " (OBC)")
for i in range(self.ndim)
]
)
)
result = _np.asarray(_np.rint(result), dtype=int)
is_valid = _np.logical_or(self.pbc, result == 0)
if not _np.all(is_valid):
raise InvalidWaveVectorError(
"Some wave vectors are inconisistent with open boundary conditions"
)
return result
def space_group_builder(
self, point_group: Optional[PointGroup] = None
) -> "SpaceGroupBuilder":
from .space_group import SpaceGroupBuilder
if point_group is None:
if isinstance(self._point_group, PointGroup):
point_group = self._point_group
elif isinstance(self._point_group, Callable):
self._point_group = self._point_group()
point_group = self._point_group
else:
raise RuntimeError(
"space_group_builder() missing required argument 'point_group'\n"
"(lattice has no default point group)"
)
return SpaceGroupBuilder(self, point_group)
def space_group(self, point_group: Optional[PointGroup] = None) -> PermutationGroup:
return self.space_group_builder(point_group).space_group
def point_group(self, point_group: Optional[PointGroup] = None) -> PermutationGroup:
return self.space_group_builder(point_group).point_group
def rotation_group(
self, point_group: Optional[PointGroup] = None
) -> PermutationGroup:
return self.space_group_builder(point_group).rotation_group
def translation_group(
self, dim: Optional[Union[int, Sequence[int]]] = None
) -> PermutationGroup:
return self.space_group_builder(
trivial_point_group(self.ndim)
).translation_group(dim)
def __repr__(self) -> str:
return REPR_TEMPLATE.format(
self.n_nodes,
self._extent,
str(self.basis_vectors).replace("\n", "\n" + " " * 8),
str(self.site_offsets).replace("\n", "\n" + " " * 8),
)
def draw(
self,
ax=None,
figsize: Optional[Tuple[Union[int, float]]] = None,
node_color: str = "#1f78b4",
node_size: int = 300,
edge_color: str = "k",
curvature: float = 0.2,
font_size: int = 12,
font_color: str = "k",
):
import matplotlib.pyplot as plt
if self._ndim == 1:
positions = _np.pad(self.positions, (0, 1), "constant")
elif self._ndim == 2:
positions = self.positions
else:
raise ValueError(
"Make sure that the graph is 1D or 2D in order to be drawn. "
f" Now it is {self._ndim}D"
)
if ax is None:
_, ax = plt.subplots(figsize=figsize)
for edge in self.edges():
x1, y1 = positions[edge[0]]
x2, y2 = positions[edge[1]]
annotation = ax.annotate(
"",
xy=(x1, y1),
xycoords="data",
xytext=(x2, y2),
textcoords="data",
arrowprops=dict(
arrowstyle="-",
color=edge_color,
shrinkA=0,
shrinkB=0,
patchA=None,
patchB=None,
connectionstyle=f"arc3,rad={curvature}",
),
)
ax.scatter(
*positions.T,
s=node_size,
c=node_color,
marker="o",
zorder=annotation.get_zorder() + 1,
)
for node in self.nodes():
x1, y1 = positions[node]
ax.text(
x1,
y1,
str(node),
horizontalalignment="center",
verticalalignment="center",
fontsize=font_size,
color=font_color,
zorder=annotation.get_zorder() + 1,
)
ax.axis("equal")
return ax
@deprecated("basis_coords[site_id, -1]")
def atom_label(self, site_id: int) -> int:
return self.basis_coords[site_id, -1]
@deprecated("basis_coords[site_id, :-1]")
def site_to_vector(self, site_id: int) -> CoordT:
return self.basis_coords[site_id, :-1]
@deprecated("positions[site_id]")
def site_to_coord(self, site_id: int) -> PositionT:
return self.positions[site_id]
@deprecated("id_from_basis_coords([*vector, 0])")
|
Apache License 2.0
|
tristandeleu/pytorch-meta
|
torchmeta/datasets/helpers_tabular.py
|
helper_with_default_tabular
|
python
|
def helper_with_default_tabular(klass, folder, shots, ways, shuffle=True,
test_shots=None, seed=None, defaults=None, **kwargs):
if defaults is None:
defaults = {}
if 'num_classes_per_task' in kwargs:
warnings.warn('Both arguments `ways` and `num_classes_per_task` were '
'set in the helper function for the number of classes per task. '
'Ignoring the argument `ways`.', stacklevel=2)
ways = kwargs['num_classes_per_task']
if 'transform' not in kwargs:
kwargs['transform'] = defaults.get('transform', NumpyToTorch())
if 'target_transform' not in kwargs:
kwargs['target_transform'] = defaults.get('target_transform',
Categorical(ways))
if 'class_augmentations' not in kwargs:
kwargs['class_augmentations'] = defaults.get('class_augmentations', None)
if test_shots is None:
test_shots = shots
dataset = klass(folder,
num_classes_per_task=ways,
**kwargs)
dataset = ClassSplitter(dataset,
shuffle=shuffle,
num_train_per_class=shots,
num_test_per_class=test_shots)
dataset.seed(seed)
return dataset
|
Parameters
----------
klass : CombinationMetaDataset
the class corresponding to the meta-dataset, e.g., Covertype
folder : string
Root directory where the dataset folder exists, e.g., `covertype_task_id_2118`.
shots : int
Number of (training) examples per class in each task. This corresponds
to `k` in `k-shot` classification.
ways : int
Number of classes per task. This corresponds to `N` in `N-way`
classification.
shuffle : bool (default: `True`)
Shuffle the examples when creating the tasks.
test_shots : int, optional
Number of test examples per class in each task. If `None`, then the
number of test examples is equal to the number of training examples per
class.
seed : int, optional
Random seed to be used in the meta-dataset.
kwargs
Additional arguments passed to the `TieredImagenet` class.
Returns
-------
klass
The meta-dataset with ClassSplitter applied, e.g., Covertype.
|
https://github.com/tristandeleu/pytorch-meta/blob/d55d89ebd47f340180267106bde3e4b723f23762/torchmeta/datasets/helpers_tabular.py#L16-L84
|
import warnings
from torchmeta.datasets import Letter, PlantsTexture, PlantsShape, PlantsMargin, Bach
from torchmeta.transforms import Categorical, ClassSplitter
from torchmeta.transforms.tabular_transforms import NumpyToTorch
__all__ = [
'letter',
'plants_texture',
'plants_shape',
'plants_margin',
'bach'
]
|
MIT License
|
googledatalab/pydatalab
|
datalab/utils/_utils.py
|
pick_unused_port
|
python
|
def pick_unused_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', 0))
addr, port = s.getsockname()
s.close()
return port
|
get an unused port on the VM.
Returns:
An unused port.
|
https://github.com/googledatalab/pydatalab/blob/1d6865237fdc8d184123d1e89193578da56d73b3/datalab/utils/_utils.py#L86-L96
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
try:
import http.client as httplib
except ImportError:
import httplib
import pytz
import subprocess
import socket
import traceback
import types
def print_exception_with_last_stack(e):
traceback.print_exc()
print(str(e))
def get_item(env, name, default=None):
for key in name.split('.'):
if isinstance(env, dict) and key in env:
env = env[key]
elif isinstance(env, types.ModuleType) and key in env.__dict__:
env = env.__dict__[key]
else:
return default
return env
def compare_datetimes(d1, d2):
if d1.tzinfo is None or d1.tzinfo.utcoffset(d1) is None:
d1 = d1.replace(tzinfo=pytz.UTC)
if d2.tzinfo is None or d2.tzinfo.utcoffset(d2) is None:
d2 = d2.replace(tzinfo=pytz.UTC)
if d1 < d2:
return -1
elif d1 > d2:
return 1
return 0
|
Apache License 2.0
|
zhenlohuang/pyaria2
|
pyaria2.py
|
PyAria2.changeGlobalOption
|
python
|
def changeGlobalOption(self, options):
return self.server.aria2.changeGlobalOption(options)
|
This method changes global options dynamically.
options: dict, the options.
return: This method returns OK for success.
|
https://github.com/zhenlohuang/pyaria2/blob/034965c0243db8157e8dc99767d076d43f9fc7c5/pyaria2.py#L338-L346
|
import subprocess
import xmlrpc.client
import os
import time
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 6800
SERVER_URI_FORMAT = 'http://{}:{:d}/rpc'
class PyAria2(object):
def __init__(self, host=DEFAULT_HOST, port=DEFAULT_PORT, session=None):
if not isAria2Installed():
raise Exception('aria2 is not installed, please install it before.')
if not isAria2rpcRunning():
cmd = 'aria2c' ' --enable-rpc' ' --rpc-listen-port %d' ' --continue' ' --max-concurrent-downloads=20' ' --max-connection-per-server=10' ' --rpc-max-request-size=1024M' % port
if not session is None:
cmd += ' --input-file=%s' ' --save-session-interval=60' ' --save-session=%s' % (session, session)
subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
count = 0
while True:
if isAria2rpcRunning():
break
else:
count += 1
time.sleep(3)
if count == 5:
raise Exception('aria2 RPC server started failure.')
print('aria2 RPC server is started.')
else:
print('aria2 RPC server is already running.')
server_uri = SERVER_URI_FORMAT.format(host, port)
self.server = xmlrpc.client.ServerProxy(server_uri, allow_none=True)
def addUri(self, uris, options=None, position=None):
return self.server.aria2.addUri(uris, options, position)
def addTorrent(self, torrent, uris=None, options=None, position=None):
return self.server.aria2.addTorrent(xmlrpc.client.Binary(open(torrent, 'rb').read()), uris, options, position)
def addMetalink(self, metalink, options=None, position=None):
return self.server.aria2.addMetalink(xmlrpc.client.Binary(open(metalink, 'rb').read()), options, position)
def remove(self, gid):
return self.server.aria2.remove(gid)
def forceRemove(self, gid):
return self.server.aria2.forceRemove(gid)
def pause(self, gid):
return self.server.aria2.pause(gid)
def pauseAll(self):
return self.server.aria2.pauseAll()
def forcePause(self, gid):
return self.server.aria2.forcePause(gid)
def forcePauseAll(self):
return self.server.aria2.forcePauseAll()
def unpause(self, gid):
return self.server.aria2.unpause(gid)
def unpauseAll(self):
return self.server.aria2.unpauseAll()
def tellStatus(self, gid, keys=None):
return self.server.aria2.tellStatus(gid, keys)
def getUris(self, gid):
return self.server.aria2.getUris(gid)
def getFiles(self, gid):
return self.server.aria2.getFiles(gid)
def getPeers(self, gid):
return self.server.aria2.getPeers(gid)
def getServers(self, gid):
return self.server.aria2.getServers(gid)
def tellActive(self, keys=None):
return self.server.aria2.tellActive(keys)
def tellWaiting(self, offset, num, keys=None):
return self.server.aria2.tellWaiting(offset, num, keys)
def tellStopped(self, offset, num, keys=None):
return self.server.aria2.tellStopped(offset, num, keys)
def changePosition(self, gid, pos, how):
return self.server.aria2.changePosition(gid, pos, how)
def changeUri(self, gid, fileIndex, delUris, addUris, position=None):
return self.server.aria2.changeUri(gid, fileIndex, delUris, addUris, position)
def getOption(self, gid):
return self.server.aria2.getOption(gid)
def changeOption(self, gid, options):
return self.server.aria2.changeOption(gid, options)
def getGlobalOption(self):
return self.server.aria2.getGlobalOption()
|
MIT License
|
beancount/fava
|
src/fava/application.py
|
download_journal
|
python
|
def download_journal() -> Any:
now = datetime.datetime.now().replace(microsecond=0)
filename = f"journal_{now.isoformat()}.beancount"
data = BytesIO(bytes(render_template("beancount_file"), "utf8"))
return send_file(data, as_attachment=True, download_name=filename)
|
Download a Journal file.
|
https://github.com/beancount/fava/blob/703497d31c467702c59004dc2143b04ba178dce8/src/fava/application.py#L369-L374
|
import datetime
import functools
import threading
from io import BytesIO
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
import flask
import markdown2
import werkzeug.urls
from beancount import __version__ as beancount_version
from beancount.core.account import ACCOUNT_RE
from beancount.utils.text_utils import replace_numbers
from flask import abort
from flask import Flask
from flask import redirect
from flask import render_template
from flask import render_template_string
from flask import request
from flask import send_file
from flask_babel import Babel
from flask_babel import get_translations
from werkzeug.utils import secure_filename
from fava import __version__ as fava_version
from fava import LANGUAGES
from fava import template_filters
from fava.context import g
from fava.core import FavaLedger
from fava.core.charts import FavaJSONEncoder
from fava.core.documents import is_document_or_import_file
from fava.help import HELP_PAGES
from fava.helpers import FavaAPIException
from fava.json_api import json_api
from fava.serialisation import serialise
from fava.util import next_key
from fava.util import resource_path
from fava.util import send_file_inline
from fava.util import setup_logging
from fava.util import slugify
from fava.util.date import Interval
from fava.util.excel import HAVE_EXCEL
STATIC_FOLDER = resource_path("static")
setup_logging()
app = Flask(
__name__,
template_folder=str(resource_path("templates")),
static_folder=str(STATIC_FOLDER),
)
app.register_blueprint(json_api, url_prefix="/<bfile>/api")
app.json_encoder = FavaJSONEncoder
jinja_extensions = app.jinja_options.setdefault("extensions", [])
jinja_extensions.append("jinja2.ext.do")
jinja_extensions.append("jinja2.ext.loopcontrols")
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
app.config["HAVE_EXCEL"] = HAVE_EXCEL
app.config["ACCOUNT_RE"] = ACCOUNT_RE
REPORTS = [
"balance_sheet",
"commodities",
"documents",
"events",
"editor",
"errors",
"holdings",
"import",
"income_statement",
"journal",
"options",
"query",
"statistics",
"trial_balance",
]
LOAD_FILE_LOCK = threading.Lock()
def ledger_slug(ledger: FavaLedger) -> str:
title_slug = slugify(ledger.options["title"])
return title_slug or slugify(ledger.beancount_file_path)
def update_ledger_slugs(ledgers: List[FavaLedger]) -> None:
ledgers_by_slug: Dict[str, FavaLedger] = {}
for ledger in ledgers:
slug = ledger_slug(ledger)
unique_key = next_key(slug, ledgers_by_slug)
ledgers_by_slug[unique_key] = ledger
app.config["LEDGERS"] = ledgers_by_slug
def _load_file() -> None:
ledgers = [
FavaLedger(filepath) for filepath in app.config["BEANCOUNT_FILES"]
]
update_ledger_slugs(ledgers)
def get_locale() -> Optional[str]:
lang = g.ledger.fava_options["language"]
if lang is not None:
return lang
return request.accept_languages.best_match(["en"] + LANGUAGES)
BABEL = Babel(app)
BABEL.localeselector(get_locale)
for function in template_filters.FILTERS:
app.add_template_filter(function)
app.add_template_filter(serialise)
@app.url_defaults
def _inject_filters(endpoint: str, values: Dict[str, Any]) -> None:
if "bfile" not in values and app.url_map.is_endpoint_expecting(
endpoint, "bfile"
):
values["bfile"] = g.beancount_file_slug
if endpoint in ["static", "index"]:
return
for name in ["conversion", "interval", "account", "filter", "time"]:
if name not in values:
values[name] = request.args.get(name)
def static_url(filename: str) -> str:
file_path = STATIC_FOLDER / filename
try:
mtime = int(file_path.stat().st_mtime)
except FileNotFoundError:
mtime = 0
return url_for("static", filename=filename, mtime=mtime)
CACHED_URL_FOR = functools.lru_cache(2048)(flask.url_for)
def url_for(endpoint: str, **values: Any) -> str:
_inject_filters(endpoint, values)
return CACHED_URL_FOR(endpoint, **values)
def url_for_source(**kwargs: Any) -> str:
if g.ledger.fava_options["use-external-editor"]:
return (
f"beancount://{kwargs.get('file_path')}"
+ f"?lineno={kwargs.get('line', 1)}"
)
return url_for("report", report_name="editor", **kwargs)
def translations() -> Any:
return get_translations()._catalog
app.add_template_global(static_url, "static_url")
app.add_template_global(datetime.date.today, "today")
app.add_template_global(url_for, "url_for")
app.add_template_global(url_for_source, "url_for_source")
app.add_template_global(translations, "translations")
@app.context_processor
def template_context() -> Dict[str, Any]:
return dict(ledger=g.ledger)
@app.before_request
def _perform_global_filters() -> None:
ledger = getattr(g, "ledger", None)
if ledger:
if request.blueprint != "json_api":
ledger.changed()
ledger.filter(
account=request.args.get("account"),
filter=request.args.get("filter"),
time=request.args.get("time"),
)
@app.after_request
def _incognito(response: flask.wrappers.Response) -> flask.wrappers.Response:
if app.config.get("INCOGNITO") and response.content_type.startswith(
"text/html"
):
is_editor = (
request.endpoint == "report"
and request.view_args is not None
and request.view_args["report_name"] == "editor"
)
if not is_editor:
original_text = response.get_data(as_text=True)
response.set_data(replace_numbers(original_text))
return response
@app.url_value_preprocessor
def _pull_beancount_file(
_: Optional[str], values: Optional[Dict[str, str]]
) -> None:
g.beancount_file_slug = values.pop("bfile", None) if values else None
with LOAD_FILE_LOCK:
if not app.config.get("LEDGERS"):
_load_file()
if g.beancount_file_slug:
if g.beancount_file_slug not in app.config["LEDGERS"]:
if not any(
g.beancount_file_slug == ledger_slug(ledger)
for ledger in app.config["LEDGERS"].values()
):
abort(404)
update_ledger_slugs(app.config["LEDGERS"].values())
g.ledger = app.config["LEDGERS"][g.beancount_file_slug]
g.conversion = request.args.get("conversion", "at_cost")
g.interval = Interval.get(request.args.get("interval", "month"))
@app.errorhandler(FavaAPIException)
def fava_api_exception(error: FavaAPIException) -> str:
return render_template(
"_layout.html", page_title="Error", content=error.message
)
@app.route("/")
@app.route("/<bfile>/")
def index() -> werkzeug.wrappers.response.Response:
if not g.beancount_file_slug:
g.beancount_file_slug = next(iter(app.config["LEDGERS"]))
index_url = url_for("index")
default_path = app.config["LEDGERS"][g.beancount_file_slug].fava_options[
"default-page"
]
return redirect(f"{index_url}{default_path}")
@app.route("/<bfile>/account/<name>/")
@app.route("/<bfile>/account/<name>/<subreport>/")
def account(name: str, subreport: str = "journal") -> str:
if subreport in ["journal", "balances", "changes"]:
return render_template(
"account.html", account_name=name, subreport=subreport
)
return abort(404)
@app.route("/<bfile>/document/", methods=["GET"])
def document() -> Any:
filename = request.args.get("filename")
if filename is None:
return abort(404)
if is_document_or_import_file(filename, g.ledger):
return send_file_inline(filename)
return abort(404)
@app.route("/<bfile>/statement/", methods=["GET"])
def statement() -> Any:
entry_hash = request.args.get("entry_hash", "")
key = request.args.get("key", "")
document_path = g.ledger.statement_path(entry_hash, key)
return send_file_inline(document_path)
@app.route("/<bfile>/holdings/by_<aggregation_key>/")
def holdings_by(aggregation_key: str) -> str:
if aggregation_key in ["account", "currency", "cost_currency"]:
return render_template(
"_layout.html",
active_page="holdings",
aggregation_key=aggregation_key,
)
return abort(404)
@app.route("/<bfile>/_query_result/")
def query_result() -> str:
return render_template("_query_result.html")
@app.route("/<bfile>/<report_name>/")
def report(report_name: str) -> str:
if report_name in REPORTS:
return render_template("_layout.html", active_page=report_name)
return abort(404)
@app.route("/<bfile>/extension/<report_name>/")
def extension_report(report_name: str) -> str:
try:
template, extension = g.ledger.extensions.template_and_extension(
report_name
)
content = render_template_string(template, extension=extension)
return render_template(
"_layout.html", content=content, page_title=extension.report_title
)
except LookupError:
return abort(404)
@app.route("/<bfile>/download-query/query_result.<result_format>")
def download_query(result_format: str) -> Any:
name, data = g.ledger.query_shell.query_to_file(
request.args.get("query_string", ""), result_format
)
filename = f"{secure_filename(name.strip())}.{result_format}"
return send_file(data, as_attachment=True, download_name=filename)
@app.route("/<bfile>/download-journal/")
|
MIT License
|
docusign/docusign-python-client
|
docusign_esign/models/envelope_documents_result.py
|
EnvelopeDocumentsResult.__eq__
|
python
|
def __eq__(self, other):
if not isinstance(other, EnvelopeDocumentsResult):
return False
return self.to_dict() == other.to_dict()
|
Returns true if both objects are equal
|
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/envelope_documents_result.py#L139-L144
|
import pprint
import re
import six
from docusign_esign.client.configuration import Configuration
class EnvelopeDocumentsResult(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'envelope_documents': 'list[EnvelopeDocument]',
'envelope_id': 'str'
}
attribute_map = {
'envelope_documents': 'envelopeDocuments',
'envelope_id': 'envelopeId'
}
def __init__(self, _configuration=None, **kwargs):
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._envelope_documents = None
self._envelope_id = None
self.discriminator = None
setattr(self, "_{}".format('envelope_documents'), kwargs.get('envelope_documents', None))
setattr(self, "_{}".format('envelope_id'), kwargs.get('envelope_id', None))
@property
def envelope_documents(self):
return self._envelope_documents
@envelope_documents.setter
def envelope_documents(self, envelope_documents):
self._envelope_documents = envelope_documents
@property
def envelope_id(self):
return self._envelope_id
@envelope_id.setter
def envelope_id(self, envelope_id):
self._envelope_id = envelope_id
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EnvelopeDocumentsResult, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
|
MIT License
|
mrod5/pyturb
|
src/pyturb/gas_models/gas.py
|
Gas.cp_molar
|
python
|
def cp_molar(self, temperature):
raise NotImplementedError
|
Molar heat capacity ratio at constant pressure [J/mol/K]
|
https://github.com/mrod5/pyturb/blob/08b4016528fc50733fff58d967d1000bf1e634c9/src/pyturb/gas_models/gas.py#L121-L125
|
from abc import abstractmethod
class Gas(object):
@property
@abstractmethod
def gas_species(self):
return NotImplementedError
@property
@abstractmethod
def Ng(self):
return NotImplementedError
@property
@abstractmethod
def mg(self):
return NotImplementedError
@property
@abstractmethod
def Ru(self):
return NotImplementedError
@property
@abstractmethod
def Rg(self):
raise NotImplementedError
@property
@abstractmethod
def Mg(self):
raise NotImplementedError
@abstractmethod
def cp(self, temperature):
raise NotImplementedError
@abstractmethod
def cv(self, temperature):
raise NotImplementedError
@abstractmethod
def gamma(self, temperature):
raise NotImplementedError
@abstractmethod
|
MIT License
|
orange-opensource/rtpy
|
rtpy/repositories.py
|
RtpyRepositories.repository_configuration
|
python
|
def repository_configuration(self, repo_key, **kwargs):
api_method = self._category + "Repository Configuration"
target = self._prefix + repo_key
return self._request("GET", target, api_method, kwargs)
|
Retrieve the current configuration of a repository.
Supported by local, remote and virtual repositories.
Parameters
----------
repo_key: str
Key of the repository
**kwargs
Keyword arguments
|
https://github.com/orange-opensource/rtpy/blob/389d2fd0186176862b1dbf2cf1df77d9e5b5e623/rtpy/repositories.py#L35-L51
|
from .tools import RtpyBase
class RtpyRepositories(RtpyBase):
def get_repositories(self, options=None, **kwargs):
api_method = self._category + "Get Repositories"
target = self._append_to_string("repositories", options)
return self._request("GET", target, api_method, kwargs)
|
Apache License 2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.