repository_name
stringlengths 7
107
| function_path
stringlengths 4
190
| function_identifier
stringlengths 1
236
| language
stringclasses 1
value | function
stringlengths 9
647k
| docstring
stringlengths 5
488k
| function_url
stringlengths 71
285
| context
stringlengths 0
2.51M
| license
stringclasses 5
values |
---|---|---|---|---|---|---|---|---|
tuturto/pyherc
|
src/pyherc/test/builders/action.py
|
TrappingFactoryBuilder.build
|
python
|
def build(self):
return TrappingFactory(self.trap_creator)
|
Builds trapping factory
|
https://github.com/tuturto/pyherc/blob/4e7c72a4d80d335f7d3c48cecac96cd7105acac4/src/pyherc/test/builders/action.py#L427-L431
|
from pyherc.rules.consume.factories import DrinkFactory
from pyherc.rules.digging.factories import DigFactory
from pyherc.rules.inventory.equip import EquipFactory
from pyherc.rules.inventory.factories import (DropFactory, InventoryFactory,
PickUpFactory)
from pyherc.rules.inventory.unequip import UnEquipFactory
from pyherc.rules.magic import GainDomainFactory, SpellCastingFactory
from pyherc.rules.mitosis.factory import MitosisFactory
from pyherc.rules.metamorphosis.factory import MetamorphosisFactory
from pyherc.rules.trapping.factory import TrappingFactory
from pyherc.rules.public import ActionFactory
from random import Random
class ActionFactoryBuilder():
def __init__(self):
super().__init__()
self.model = None
self.factories = []
self.effect_factory = None
self.use_real_drink_factory = False
self.use_real_inventory_factory = False
self.use_real_spellcasting_factory = False
self.use_real_gain_domain_factory = False
self.use_real_mitosis_factory = False
self.use_real_metamorphosis_factory = False
self.use_real_dig_factory = False
self.use_real_trapping_factory = False
def with_drink_factory(self, drink_factory=None):
if drink_factory is None:
self.use_real_drink_factory = True
else:
if hasattr(drink_factory, 'build'):
self.factories.append(drink_factory.build())
else:
self.factories.append(drink_factory)
return self
def with_spellcasting_factory(self, spellcasting_factory=None):
if not spellcasting_factory:
self.use_real_spellcasting_factory = True
else:
if hasattr(spellcasting_factory, 'build'):
self.factories.append(spellcasting_factory.build())
else:
self.factories.append(spellcasting_factory)
return self
def with_inventory_factory(self):
self.use_real_inventory_factory = True
return self
def with_effect_factory(self, effect_factory):
self.effect_factory = effect_factory
return self
def with_gain_domain_factory(self, gain_domain_factory=None):
if gain_domain_factory:
self.factories.append(gain_domain_factory)
else:
self.use_real_gain_domain_factory = True
return self
def with_mitosis_factory(self, mitosis_factory=None):
if mitosis_factory:
self.factories.append(mitosis_factory)
else:
self.use_real_mitosis_factory = True
return self
def with_metamorphosis_factory(self, metamorphosis_factory=None):
if metamorphosis_factory:
self.factories.append(metamorphosis_factory)
else:
self.use_real_metamorphosis_factory = True
return self
def with_dig_factory(self, dig_factory=None):
if dig_factory:
self.factories.append(dig_factory)
else:
self.use_real_dig_factory = True
return self
def with_trapping_factory(self, trapping_factory=None):
if trapping_factory:
self.factories.append(trapping_factory)
else:
self.use_real_trapping_factory = True
return self
def build(self):
if self.use_real_drink_factory:
self.factories.append((DrinkFactoryBuilder()
.with_effect_factory(self.effect_factory)
.build()))
if self.use_real_inventory_factory:
self.factories.append(InventoryFactory([PickUpFactory(),
DropFactory(),
EquipFactory(),
UnEquipFactory()]))
if self.use_real_spellcasting_factory:
self.factories.append(SpellCastingFactoryBuilder().build())
if self.use_real_gain_domain_factory:
self.factories.append(GainDomainFactoryBuilder().build())
if self.use_real_mitosis_factory:
self.factories.append(MitosisFactoryBuilder()
.build())
if self.use_real_metamorphosis_factory:
self.factories.append(MetamorphosisFactoryBuilder().build())
if self.use_real_dig_factory:
self.factories.append(DigFactoryBuilder().build())
if self.use_real_trapping_factory:
self.factories.append(TrappingFactoryBuilder().build())
action_factory = ActionFactory(self.model,
self.factories)
return action_factory
class DrinkFactoryBuilder():
def __init__(self):
super().__init__()
self.effect_factory = None
def with_effect_factory(self, effect_factory):
self.effect_factory = effect_factory
return self
def build(self):
return DrinkFactory(self.effect_factory)
class GainDomainFactoryBuilder():
def __init__(self):
super().__init__()
def build(self):
return GainDomainFactory()
class SpellCastingFactoryBuilder():
def __init__(self):
super().__init__()
self.spell_factory = None
self.use_real_spell_factory = False
self.effects_factory = None
self.use_real_effects_factory = False
def with_spell_factory(self, spell_factory=None):
if not spell_factory:
self.use_real_spell_factory = True
else:
if hasattr(spell_factory, 'build'):
self.spell_factory = spell_factory.build()
else:
self.spell_factory = spell_factory
return self
def with_effects_factory(self, effects_factory=None):
if effects_factory:
if hasattr(effects_factory, 'build'):
self.effects_factory = effects_factory.build()
else:
self.effects_factory = effects_factory
else:
self.use_real_effects_factory = True
return self
def build(self):
if self.use_real_spell_factory:
pass
if self.use_real_effects_factory:
pass
return SpellCastingFactory(spell_factory=self.spell_factory,
effects_factory=self.effects_factory)
class MitosisFactoryBuilder():
def __init__(self):
super().__init__()
self.character_generator = None
self.character_limit = 30
self.rng = Random()
def with_character_limit(self, character_limit):
self.character_limit = character_limit
return self
def with_character_generator(self, generator):
self.character_generator = generator
return self
def with_random_number_generator(self, rng):
self.rng = rng
def build(self):
return MitosisFactory(character_generator=self.character_generator,
character_limit=self.character_limit,
rng=self.rng)
class MetamorphosisFactoryBuilder():
def __init__(self):
super().__init__()
self.character_generator = None
self.rng = Random()
def with_character_generator(self, generator):
self.character_generator = generator
return self
def with_random_number_generator(self, rng):
self.rng = rng
return self
def build(self):
return MetamorphosisFactory(character_generator=self.character_generator,
rng=self.rng)
class DigFactoryBuilder():
def __init__(self):
super().__init__()
self.rng = Random()
def with_random_number_generator(rng):
self.rng = rng
return self
def build(self):
return DigFactory(self.rng)
class TrappingFactoryBuilder():
def __init__(self):
super().__init__()
self.trap_creator = None
def with_trap_creator(self, trap_creator):
self.trap_creator = trap_creator
return self
|
MIT License
|
georgepar/slp
|
slp/util/system.py
|
safe_mkdirs
|
python
|
def safe_mkdirs(path: str) -> None:
if not os.path.exists(path):
try:
os.makedirs(path)
except Exception as e:
logger.warning(e)
raise IOError((f"Failed to create recursive directories: {path}"))
|
Makes recursively all the directories in input path
Utility function similar to mkdir -p. Makes directories recursively, if given path does not exist
Args:
path (str): Path to mkdir -p
Examples:
>>> safe_mkdirs("super/cali/fragi/listic/expi/ali/docious")
|
https://github.com/georgepar/slp/blob/ac55154f063245e0e4ed584c59f16370d228d8a7/slp/util/system.py#L145-L161
|
import functools
import os
import pickle
import shutil
import socket
import subprocess
import sys
import time
import urllib
import urllib.request
from datetime import datetime
from typing import Any, Callable, Optional, Tuple, cast
import validators
import yaml
from loguru import logger
from slp.util import types
try:
import ujson as json
except ImportError:
import json
def has_internet_connection(timeout: int = 3) -> bool:
host, port = "8.8.8.8", 53
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
return True
except socket.error as ex:
print(ex)
return False
def date_fname() -> str:
return datetime.now().strftime("%Y%m%d-%H%M%S")
def print_separator(
symbol: str = "*", n: int = 10, print_fn: Callable[[str], None] = print
):
print_fn(symbol * n)
def is_url(inp: Optional[str]) -> types.ValidationResult:
if not inp:
return False
return validators.url(inp)
def is_file(inp: Optional[str]) -> types.ValidationResult:
if not inp:
return False
return os.path.isfile(inp)
def is_subpath(child: str, parent: str) -> bool:
parent = os.path.abspath(parent)
child = os.path.abspath(child)
return cast(
bool, os.path.commonpath([parent]) == os.path.commonpath([parent, child])
)
|
MIT License
|
snowplow/snowplow-python-tracker
|
snowplow_tracker/payload.py
|
Payload.add_json
|
python
|
def add_json(self, dict_, encode_base64, type_when_encoded, type_when_not_encoded, json_encoder=None):
if dict_ is not None and dict_ != {}:
json_dict = json.dumps(dict_, ensure_ascii=False, default=json_encoder)
if encode_base64:
encoded_dict = base64.urlsafe_b64encode(json_dict.encode("utf-8"))
if not isinstance(encoded_dict, str):
encoded_dict = encoded_dict.decode("utf-8")
self.add(type_when_encoded, encoded_dict)
else:
self.add(type_when_not_encoded, json_dict)
|
Add an encoded or unencoded JSON to the payload
:param dict_: Custom context for the event
:type dict_: dict(string:*) | None
:param encode_base64: If the payload is base64 encoded
:type encode_base64: bool
:param type_when_encoded: Name of the field when encode_base64 is set
:type type_when_encoded: string
:param type_when_not_encoded: Name of the field when encode_base64 is not set
:type type_when_not_encoded: string
:param json_encoder: Custom JSON serializer that gets called on non-serializable object
:type json_encoder: function | None
|
https://github.com/snowplow/snowplow-python-tracker/blob/23ff19811a372a6d68aceb9b866681e5948a53a0/snowplow_tracker/payload.py#L66-L93
|
import random
import time
import json
import base64
from contracts import contract
class Payload:
def __init__(self, dict_=None):
self.nv_pairs = {}
if dict_ is not None:
for f in dict_:
self.nv_pairs[f] = dict_[f]
"""
Methods to add to the payload
"""
def add(self, name, value):
if not (value == "" or value is None):
self.nv_pairs[name] = value
@contract
def add_dict(self, dict_, base64=False):
for f in dict_:
self.add(f, dict_[f])
@contract
|
Apache License 2.0
|
genialis/resolwe
|
resolwe/flow/executors/startup_communication_container.py
|
modify_connector_settings
|
python
|
def modify_connector_settings():
connector_settings = global_settings.SETTINGS["STORAGE_CONNECTORS"]
storages = global_settings.SETTINGS["FLOW_STORAGE"]
connector_storage = {
connector_name: storage_name
for storage_name, storage_settings in storages.items()
for connector_name in storage_settings["connectors"]
}
for connector_name in MOUNTED_CONNECTORS:
storage_name = connector_storage[connector_name]
connector_settings[connector_name]["config"]["path"] = Path(
f"/{storage_name}_{connector_name}"
)
|
Modify mountpoints and add processing and input connectors.
The path settings on filesystem connectors point to the path on the worker
node. They have to be remaped to a path inside container and processing and
input connector settings must be added.
|
https://github.com/genialis/resolwe/blob/dc8a70979ae9722e6c60ae0e3935c6542c637f48/resolwe/flow/executors/startup_communication_container.py#L693-L713
|
import array
import asyncio
import functools
import json
import logging
import os
import shutil
import signal
import socket
import sys
import threading
from contextlib import suppress
from distutils.util import strtobool
from pathlib import Path
from typing import Any, Optional, Tuple
import zmq
import zmq.asyncio
from executors import constants, global_settings
from executors.connectors import connectors
from executors.connectors.baseconnector import BaseStorageConnector
from executors.connectors.hasher import StreamHasher
from executors.connectors.transfer import Transfer
from executors.socket_utils import (
BaseCommunicator,
BaseProtocol,
Message,
PeerIdentity,
Response,
ResponseStatus,
SocketCommunicator,
)
from executors.transfer import transfer_data
from executors.zeromq_utils import ZMQCommunicator
PROCESSING_SOCKET = constants.SOCKETS_VOLUME / constants.COMMUNICATION_PROCESSING_SOCKET
UPLOAD_FILE_SOCKET = constants.SOCKETS_VOLUME / constants.UPLOAD_FILE_SOCKET
LISTENER_IP = os.getenv("LISTENER_SERVICE_HOST", "127.0.0.1")
LISTENER_PORT = os.getenv("LISTENER_SERVICE_PORT", "53893")
LISTENER_PROTOCOL = os.getenv("LISTENER_PROTOCOL", "tcp")
DATA_ID = int(os.getenv("DATA_ID", "-1"))
KEEP_DATA = bool(strtobool(os.environ.get("FLOW_MANAGER_KEEP_DATA", "False")))
RUNNING_IN_KUBERNETES = bool(
strtobool(os.environ.get("RUNNING_IN_KUBERNETES", "False"))
)
DESCRIPTOR_CHUNK_SIZE = int(os.environ.get("DESCRIPTOR_CHUNK_SIZE", 100))
MOUNTED_CONNECTORS = [
name for name in os.environ["MOUNTED_CONNECTORS"].split(",") if name
]
STORAGE_CONNECTOR: dict[
str, Tuple[BaseStorageConnector, Optional[BaseStorageConnector]]
] = {}
LOG_LEVEL = int(os.getenv("LOG_LEVEL", logging.DEBUG))
BOTO_LOG_LEVEL = int(os.getenv("BOTO_LOG_LEVEL", logging.WARNING))
GOOGLE_LOG_LEVEL = int(os.getenv("GOOGLE_LOG_LEVEL", logging.WARNING))
logging.basicConfig(
stream=sys.stdout,
level=LOG_LEVEL,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
for boto_logger_name in ["botocore", "boto3", "s3transfer", "urllib3"]:
logging.getLogger(boto_logger_name).setLevel(BOTO_LOG_LEVEL)
for google_logger in ["google"]:
logging.getLogger(google_logger).setLevel(GOOGLE_LOG_LEVEL)
logger = logging.getLogger(__name__)
logger.info("Starting communication container for data with id %d.", DATA_ID)
def purge_secrets():
def handle_error(func, path, exc_info):
if isinstance(exc_info[1], PermissionError):
os.chmod(path, 0o700)
shutil.rmtree(path)
try:
for root, dirs, files in os.walk(constants.SECRETS_VOLUME):
for f in files:
os.chmod(os.path.join(root, f), 0o700)
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d), onerror=handle_error)
except OSError:
logger.exception("Manager exception while removing data runtime directory.")
class FakeConnector(BaseStorageConnector):
def __init__(
self,
config: dict,
name: str,
file_streams: dict[str, Any],
hashes: dict[str, str],
):
super().__init__(config, name)
self.path = Path()
self.supported_hash = ["crc32c", "md5", "awss3etag"]
self.multipart_chunksize = self.CHUNK_SIZE
self.file_streams = file_streams
self.hashes = hashes
self.get_ensures_data_integrity = True
def duplicate(self):
return self
def base_path(self):
return self.path
def get_object_list(self, url):
raise NotImplementedError
def push(self, stream, url):
raise NotImplementedError
def exists(self, url):
raise NotImplementedError
def get_hash(self, url, hash_type):
return self.hashes[os.fspath(url)][hash_type]
def get_hashes(self, url, hash_types):
raise NotImplementedError
def set_hashes(self, url, hashes):
raise NotImplementedError
def get(self, url, stream):
raise NotImplementedError
@property
def can_open_stream(self):
return True
def presigned_url(
self,
url,
expiration=10,
force_download=False,
):
raise NotImplementedError
def open_stream(self, url, mode):
path = Path(url)
if global_settings.LOCATION_SUBPATH < path:
url = path.relative_to(global_settings.LOCATION_SUBPATH)
file_stream = self.file_streams[os.fspath(url)]
return file_stream
class Uploader(threading.Thread):
def __init__(self, manager: "Manager", loop: asyncio.AbstractEventLoop):
super().__init__()
self._terminating = False
self.manager = manager
self.loop = loop
self.ready = threading.Event()
def receive_file_descriptors(
self, sock: socket.SocketType
) -> Tuple[str, dict[str, Any], bool]:
filenames_length = int.from_bytes(sock.recv(8), byteorder="big")
logger.debug(
"Received file descriptors message of length: %d.", filenames_length
)
if filenames_length == 0:
return ("", dict(), False)
fds = array.array("i")
msg, ancdata, flags, addr = sock.recvmsg(
filenames_length, socket.CMSG_LEN(DESCRIPTOR_CHUNK_SIZE * fds.itemsize)
)
logger.debug("Received file descriptors: %s, %s.", msg, ancdata)
storage_name, filenames, need_presigned_urls = json.loads(msg.decode())
for cmsg_level, cmsg_type, cmsg_data in ancdata:
if cmsg_level == socket.SOL_SOCKET and cmsg_type == socket.SCM_RIGHTS:
fds.frombytes(
cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]
)
return (storage_name, dict(zip(filenames, fds)), need_presigned_urls)
def send_message(self, sock, response):
payload = json.dumps(response).encode()
sock.sendall(len(payload).to_bytes(8, byteorder="big"))
sock.sendall(payload)
def run(self):
server_socket = socket.socket(family=socket.AF_UNIX)
server_socket.settimeout(constants.CONTAINER_TIMEOUT)
server_socket.bind(os.fspath(UPLOAD_FILE_SOCKET))
server_socket.listen()
self.ready.set()
try:
client, info = server_socket.accept()
except socket.timeout:
logger.error("Processing container is not connected to the upload socket")
self.upload_socket.close()
return
client.settimeout(1)
logger.info("Uploader client connected (%s)." % client)
with client:
while not self._terminating:
try:
(
storage_name,
file_descriptors,
need_presigned_urls,
) = self.receive_file_descriptors(client)
if not file_descriptors:
break
except socket.timeout:
logger.info("Uploader timeout waiting for data.")
continue
except:
logger.exception(
"Exception while receiving file descriptors, exiting."
)
break
try:
presigned_urls = []
to_transfer = []
logger.debug("Got %s", file_descriptors)
hashes: dict[str, str] = dict()
referenced_files: dict[str, dict[str, str]] = dict()
file_streams = {
file_name: os.fdopen(file_descriptor, "rb")
for file_name, file_descriptor in file_descriptors.items()
}
to_connector = STORAGE_CONNECTOR[storage_name][0]
for file_name in file_descriptors:
file_descriptor = file_descriptors[file_name]
stream = file_streams[file_name]
file_size = os.stat(file_descriptor).st_size
min_chunk_size = 8 * 1024 * 1024
needed_chunk_size = int(file_size / 10000) + 1
chunk_size = max(min_chunk_size, needed_chunk_size)
hasher = StreamHasher(chunk_size=chunk_size)
hasher.compute(stream)
referenced_files[file_name] = {
hash_type: hasher.hexdigest(hash_type)
for hash_type in StreamHasher.KNOWN_HASH_TYPES
}
hashes[
os.fspath(global_settings.LOCATION_SUBPATH / file_name)
] = referenced_files[file_name].copy()
referenced_files[file_name]["chunk_size"] = chunk_size
referenced_files[file_name]["path"] = file_name
referenced_files[file_name]["size"] = file_size
if need_presigned_urls:
presigned_urls.append(
to_connector.presigned_url(
global_settings.LOCATION_SUBPATH / file_name,
expiration=7 * 24 * 60 * 60,
)
)
stream.seek(0)
to_transfer = list(referenced_files.values())
from_connector = FakeConnector(
{"path": ""},
"File descriptors connector",
file_streams,
hashes,
)
transfer = Transfer(from_connector, to_connector)
transfer.transfer_objects(
global_settings.LOCATION_SUBPATH, to_transfer
)
except:
logger.exception("Exception uploading data.")
self.send_message(client, {"success": False})
break
else:
if to_transfer:
future = asyncio.run_coroutine_threadsafe(
self.manager.send_referenced_files(to_transfer), self.loop
)
future_response = future.result()
if (
future_response.response_status == ResponseStatus.ERROR
and future_response.message_data != "OK"
):
response = {"success": False}
else:
response = {
"success": True,
"presigned_urls": presigned_urls,
}
self.send_message(client, response)
finally:
for stream in file_streams.values():
stream.close()
def terminate(self):
self._terminating = True
class ListenerProtocol(BaseProtocol):
def __init__(
self, communicator: BaseCommunicator, processing_communicator: BaseCommunicator
):
super().__init__(communicator, logger)
self.processing_communicator = processing_communicator
async def get_script(self) -> str:
response = await self.communicator.send_command(
Message.command("get_script", "")
)
if response.response_status == ResponseStatus.ERROR:
raise RuntimeError("Response status error while fetching script.")
return response.message_data
async def finish(self, return_code: int):
await self.communicator.send_command(
Message.command("finish", {"rc": return_code})
)
async def handle_terminate(
self, message: Message, identity: PeerIdentity
) -> Response[str]:
response = await self.processing_communicator.send_command(
Message.command("terminate", "")
)
response.uuid = message.uuid
return response
class ProcessingProtocol(BaseProtocol):
def __init__(
self, communicator: BaseCommunicator, listener_communicator: BaseCommunicator
):
super().__init__(communicator, logger)
self.listener_communicator = listener_communicator
async def default_command_handler(
self, message: Message, identity: PeerIdentity
) -> Response:
return await self.listener_communicator.send_command(message, identity)
async def handle_upload_dirs(
self, message: Message[list[str]], identity: PeerIdentity
) -> Response[str]:
subpath = global_settings.LOCATION_SUBPATH
directories = message.message_data
referenced_dirs = []
for directory in directories:
if storage_connectors := STORAGE_CONNECTOR.get("data"):
if mounted_connector := storage_connectors[1]:
destination_dir = mounted_connector.path / subpath / directory
destination_dir.mkdir(parents=True, exist_ok=True)
referenced_dirs.append({"path": os.path.join(directory, ""), "size": 0})
return await self.listener_communicator.send_command(
Message.command("referenced_files", referenced_dirs)
)
async def process_script(self, script: str) -> int:
try:
response = await self.communicator.send_command(
Message.command("process_script", script), response_timeout=None
)
return response.message_data
except asyncio.CancelledError:
return 1
async def terminate(self):
await self.communicator.send_command(Message.command("terminate", ""))
class Manager:
def __init__(self):
self.processing_communicator: Optional[BaseCommunicator] = None
self.listener_communicator: Optional[BaseCommunicator] = None
self.processing_container_connected = asyncio.Event()
self._process_script_task: Optional[asyncio.Task] = None
async def send_referenced_files(self, referenced_files):
return await self.listener_communicator.send_command(
Message.command("referenced_files", referenced_files)
)
async def _handle_processing_container_connection(
self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter
):
logger.debug("Processing container connected")
writer.write(b"PING\n")
await writer.drain()
self.processing_container_connected.set()
self.processing_communicator = SocketCommunicator(
reader, writer, "(self <-> processing)", logger
)
async def start_processing_socket(self):
await asyncio.start_unix_server(
self._handle_processing_container_connection, os.fspath(PROCESSING_SOCKET)
)
logger.debug("Started listening on %s.", PROCESSING_SOCKET)
async def open_listener_connection(self) -> ZMQCommunicator:
zmq_context = zmq.asyncio.Context.instance()
zmq_socket = zmq_context.socket(zmq.DEALER)
zmq_socket.setsockopt(zmq.IDENTITY, str(DATA_ID).encode())
connect_string = f"{LISTENER_PROTOCOL}://{LISTENER_IP}:{LISTENER_PORT}"
logger.debug("Opening listener connection to %s", connect_string)
zmq_socket.connect(connect_string)
return ZMQCommunicator(zmq_socket, "worker <-> listener", logger)
async def transfer_missing_data(self):
try:
await transfer_data(self.listener_communicator)
except RuntimeError:
with suppress(Exception):
await self.listener_communicator.send_command(
Message.command(
"process_log", {"error": ["Error transfering missing data."]}
)
)
raise
def _communicator_stopped(self, future: asyncio.Future):
if self._process_script_task:
logger.debug("Communicator closed, cancelling script processing.")
self._process_script_task.cancel()
async def start(self) -> int:
try:
return_code = 1
logger.debug("Starting upload thread")
upload_thread = Uploader(self, asyncio.get_running_loop())
upload_thread.start()
if not upload_thread.ready.wait(60):
logger.error("Upload thread failed to start, terminating.")
raise RuntimeError("Upload thread failed to start.")
await self.start_processing_socket()
self.listener_communicator = await self.open_listener_connection()
try:
logger.debug("Waiting for the processing container to connect")
await asyncio.wait_for(
self.processing_container_connected.wait(),
constants.CONTAINER_TIMEOUT,
)
except asyncio.TimeoutError:
message = "Unable to connect to the processing container."
logger.critical(message)
with suppress(Exception):
await self.listener_communicator.send_command(
Message.command("process_log", {"error": [message]})
)
sys.exit(1)
logger.debug("Connected to the processing container.")
listener = ListenerProtocol(
self.listener_communicator, self.processing_communicator
)
processing = ProcessingProtocol(
self.processing_communicator, self.listener_communicator
)
try:
listener_task = asyncio.ensure_future(listener.communicate())
response = await self.listener_communicator.send_command(
Message.command("bootstrap", (DATA_ID, "communication"))
)
global_settings.initialize_constants(DATA_ID, response.message_data)
modify_connector_settings()
connectors.recreate_connectors()
set_default_storage_connectors()
processing_task = asyncio.ensure_future(processing.communicate())
listener_task.add_done_callback(self._communicator_stopped)
processing_task.add_done_callback(self._communicator_stopped)
await self.listener_communicator.send_command(
Message.command("update_status", "PR")
)
script = await listener.get_script()
self._process_script_task = asyncio.create_task(
processing.process_script(script)
)
return_code = await self._process_script_task
self._process_script_task = None
except RuntimeError as runtime_exception:
logger.exception("Error processing script.")
with suppress(Exception):
await self.listener_communicator.send_command(
Message.command(
"process_log",
{
"error": [
"Runtime error in communication container: "
f"{runtime_exception}."
]
},
)
)
except Exception:
logger.exception("While running communication container")
finally:
logger.debug("Terminating upload thread.")
upload_thread.terminate()
upload_thread.join()
if not KEEP_DATA:
purge_secrets()
try:
await listener.finish(return_code)
except RuntimeError:
logger.exception("Error sending finish command.")
except:
logger.exception("Unknown error sending finish command.")
listener.stop_communicate()
processing.stop_communicate()
with suppress(asyncio.TimeoutError):
await asyncio.wait_for(
asyncio.gather(listener_task, processing_task), timeout=10
)
return return_code
def set_default_storage_connectors():
storages = global_settings.SETTINGS["FLOW_STORAGE"]
for storage_name, storage_settings in storages.items():
storage_connectors = connectors.for_storage(storage_name)
default_connector = storage_connectors[0]
default_mounted_connector = None
for connector in storage_connectors:
if connector.name in MOUNTED_CONNECTORS:
default_mounted_connector = connector
break
STORAGE_CONNECTOR[storage_name] = (default_connector, default_mounted_connector)
|
Apache License 2.0
|
genialis/resolwe
|
resolwe/flow/expression_engines/jinja/filters.py
|
_get_data_attr
|
python
|
def _get_data_attr(data, attr):
if isinstance(data, dict):
data = data["__id"]
data_obj = Data.objects.get(id=data)
return getattr(data_obj, attr)
|
Get data object field.
|
https://github.com/genialis/resolwe/blob/dc8a70979ae9722e6c60ae0e3935c6542c637f48/resolwe/flow/expression_engines/jinja/filters.py#L20-L28
|
import copy
import json
import os
from django.conf import settings
from resolwe.flow.models import Data
from resolwe.flow.models.utils import hydrate_input_references, hydrate_input_uploads
from resolwe.flow.utils import dict_dot
def apply_filter_list(func, obj):
if isinstance(obj, (list, tuple)):
return [func(item) for item in obj]
return func(obj)
|
Apache License 2.0
|
reswitched/robocop-ng
|
robocop_ng/cogs/mod.py
|
Mod.say
|
python
|
async def say(self, ctx, *, the_text: str):
await ctx.send(the_text)
|
Repeats a given text, staff only.
|
https://github.com/reswitched/robocop-ng/blob/50d2c4f99ba04e2237f6d56b82cc0a7c647b84cc/robocop_ng/cogs/mod.py#L659-L661
|
import discord
from discord.ext import commands
from discord.ext.commands import Cog
import config
from helpers.checks import check_if_staff, check_if_bot_manager
from helpers.userlogs import userlog
from helpers.restrictions import add_restriction, remove_restriction
import io
class Mod(Cog):
def __init__(self, bot):
self.bot = bot
def check_if_target_is_staff(self, target):
return any(r.id in config.staff_role_ids for r in target.roles)
@commands.guild_only()
@commands.check(check_if_bot_manager)
@commands.command()
async def setguildicon(self, ctx, url):
img_bytes = await self.bot.aiogetbytes(url)
await ctx.guild.edit(icon=img_bytes, reason=str(ctx.author))
await ctx.send(f"Done!")
log_channel = self.bot.get_channel(config.modlog_channel)
log_msg = (
f"βοΈ **Guild Icon Update**: {ctx.author} changed the guild icon."
f"\nπ __Jump__: <{ctx.message.jump_url}>"
)
img_filename = url.split("/")[-1].split("#")[0]
img_file = discord.File(io.BytesIO(img_bytes), filename=img_filename)
await log_channel.send(log_msg, file=img_file)
@commands.guild_only()
@commands.check(check_if_staff)
@commands.command()
async def mute(self, ctx, target: discord.Member, *, reason: str = ""):
if target == ctx.author:
return await ctx.send("You can't do mod actions on yourself.")
elif target == self.bot.user:
return await ctx.send(
f"I'm sorry {ctx.author.mention}, I'm afraid I can't do that."
)
elif self.check_if_target_is_staff(target):
return await ctx.send(
"I can't mute this user as they're a member of staff."
)
userlog(target.id, ctx.author, reason, "mutes", target.name)
safe_name = await commands.clean_content(escape_markdown=True).convert(
ctx, str(target)
)
dm_message = f"You were muted!"
if reason:
dm_message += f' The given reason is: "{reason}".'
try:
await target.send(dm_message)
except discord.errors.Forbidden:
pass
mute_role = ctx.guild.get_role(config.mute_role)
await target.add_roles(mute_role, reason=str(ctx.author))
chan_message = (
f"π **Muted**: {str(ctx.author)} muted "
f"{target.mention} | {safe_name}\n"
f"π· __User ID__: {target.id}\n"
)
if reason:
chan_message += f'βοΈ __Reason__: "{reason}"'
else:
chan_message += (
"Please add an explanation below. In the future, "
"it is recommended to use `.mute <user> [reason]`"
" as the reason is automatically sent to the user."
)
chan_message += f"\nπ __Jump__: <{ctx.message.jump_url}>"
log_channel = self.bot.get_channel(config.modlog_channel)
await log_channel.send(chan_message)
await ctx.send(f"{target.mention} can no longer speak.")
add_restriction(target.id, config.mute_role)
@commands.guild_only()
@commands.check(check_if_staff)
@commands.command()
async def unmute(self, ctx, target: discord.Member):
safe_name = await commands.clean_content(escape_markdown=True).convert(
ctx, str(target)
)
mute_role = ctx.guild.get_role(config.mute_role)
await target.remove_roles(mute_role, reason=str(ctx.author))
chan_message = (
f"π **Unmuted**: {str(ctx.author)} unmuted "
f"{target.mention} | {safe_name}\n"
f"π· __User ID__: {target.id}\n"
)
chan_message += f"\nπ __Jump__: <{ctx.message.jump_url}>"
log_channel = self.bot.get_channel(config.modlog_channel)
await log_channel.send(chan_message)
await ctx.send(f"{target.mention} can now speak again.")
remove_restriction(target.id, config.mute_role)
@commands.guild_only()
@commands.bot_has_permissions(kick_members=True)
@commands.check(check_if_staff)
@commands.command()
async def kick(self, ctx, target: discord.Member, *, reason: str = ""):
if target == ctx.author:
return await ctx.send("You can't do mod actions on yourself.")
elif target == self.bot.user:
return await ctx.send(
f"I'm sorry {ctx.author.mention}, I'm afraid I can't do that."
)
elif self.check_if_target_is_staff(target):
return await ctx.send(
"I can't kick this user as they're a member of staff."
)
userlog(target.id, ctx.author, reason, "kicks", target.name)
safe_name = await commands.clean_content(escape_markdown=True).convert(
ctx, str(target)
)
dm_message = f"You were kicked from {ctx.guild.name}."
if reason:
dm_message += f' The given reason is: "{reason}".'
dm_message += (
"\n\nYou are able to rejoin the server,"
" but please be sure to behave when participating again."
)
try:
await target.send(dm_message)
except discord.errors.Forbidden:
pass
await target.kick(reason=f"{ctx.author}, reason: {reason}")
chan_message = (
f"π’ **Kick**: {str(ctx.author)} kicked "
f"{target.mention} | {safe_name}\n"
f"π· __User ID__: {target.id}\n"
)
if reason:
chan_message += f'βοΈ __Reason__: "{reason}"'
else:
chan_message += (
"Please add an explanation below. In the future"
", it is recommended to use "
"`.kick <user> [reason]`"
" as the reason is automatically sent to the user."
)
chan_message += f"\nπ __Jump__: <{ctx.message.jump_url}>"
log_channel = self.bot.get_channel(config.modlog_channel)
await log_channel.send(chan_message)
await ctx.send(f"π’ {safe_name}, π.")
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
@commands.check(check_if_staff)
@commands.command(aliases=["yeet"])
async def ban(self, ctx, target: discord.Member, *, reason: str = ""):
if target == ctx.author:
if target.id == 181627658520625152:
return await ctx.send(
"https://cdn.discordapp.com/attachments/286612533757083648/403080855402315796/rehedge.PNG"
)
return await ctx.send("hedgeberg#7337 is now b&. π")
elif target == self.bot.user:
return await ctx.send(
f"I'm sorry {ctx.author.mention}, I'm afraid I can't do that."
)
elif self.check_if_target_is_staff(target):
return await ctx.send("I can't ban this user as they're a member of staff.")
userlog(target.id, ctx.author, reason, "bans", target.name)
safe_name = await commands.clean_content(escape_markdown=True).convert(
ctx, str(target)
)
dm_message = f"You were banned from {ctx.guild.name}."
if reason:
dm_message += f' The given reason is: "{reason}".'
dm_message += "\n\nThis ban does not expire."
try:
await target.send(dm_message)
except discord.errors.Forbidden:
pass
await target.ban(
reason=f"{ctx.author}, reason: {reason}", delete_message_days=0
)
chan_message = (
f"β **Ban**: {str(ctx.author)} banned "
f"{target.mention} | {safe_name}\n"
f"π· __User ID__: {target.id}\n"
)
if reason:
chan_message += f'βοΈ __Reason__: "{reason}"'
else:
chan_message += (
"Please add an explanation below. In the future"
", it is recommended to use `.ban <user> [reason]`"
" as the reason is automatically sent to the user."
)
chan_message += f"\nπ __Jump__: <{ctx.message.jump_url}>"
log_channel = self.bot.get_channel(config.modlog_channel)
await log_channel.send(chan_message)
await ctx.send(f"{safe_name} is now b&. π")
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
@commands.check(check_if_staff)
@commands.command()
async def bandel(
self, ctx, day_count: int, target: discord.Member, *, reason: str = ""
):
if target == ctx.author:
if target.id == 181627658520625152:
return await ctx.send(
"https://cdn.discordapp.com/attachments/286612533757083648/403080855402315796/rehedge.PNG"
)
return await ctx.send("hedgeberg#7337 is now b&. π")
elif target == self.bot.user:
return await ctx.send(
f"I'm sorry {ctx.author.mention}, I'm afraid I can't do that."
)
elif self.check_if_target_is_staff(target):
return await ctx.send("I can't ban this user as they're a member of staff.")
if day_count < 0 or day_count > 7:
return await ctx.send(
"Message delete day count needs to be between 0 and 7 days."
)
userlog(target.id, ctx.author, reason, "bans", target.name)
safe_name = await commands.clean_content(escape_markdown=True).convert(
ctx, str(target)
)
dm_message = f"You were banned from {ctx.guild.name}."
if reason:
dm_message += f' The given reason is: "{reason}".'
dm_message += "\n\nThis ban does not expire."
try:
await target.send(dm_message)
except discord.errors.Forbidden:
pass
await target.ban(
reason=f"{ctx.author}, days of message deletions: {day_count}, reason: {reason}",
delete_message_days=day_count,
)
chan_message = (
f"β **Ban**: {str(ctx.author)} banned with {day_count} of messages deleted "
f"{target.mention} | {safe_name}\n"
f"π· __User ID__: {target.id}\n"
)
if reason:
chan_message += f'βοΈ __Reason__: "{reason}"'
else:
chan_message += (
"Please add an explanation below. In the future"
", it is recommended to use `.bandel <daycount> <user> [reason]`"
" as the reason is automatically sent to the user."
)
chan_message += f"\nπ __Jump__: <{ctx.message.jump_url}>"
log_channel = self.bot.get_channel(config.modlog_channel)
await log_channel.send(chan_message)
await ctx.send(
f"{safe_name} is now b&, with {day_count} days of messages deleted. π"
)
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
@commands.check(check_if_staff)
@commands.command(aliases=["softban"])
async def hackban(self, ctx, target: int, *, reason: str = ""):
target_user = await self.bot.fetch_user(target)
target_member = ctx.guild.get_member(target)
if target == ctx.author.id:
return await ctx.send("You can't do mod actions on yourself.")
elif target == self.bot.user:
return await ctx.send(
f"I'm sorry {ctx.author.mention}, I'm afraid I can't do that."
)
elif target_member and self.check_if_target_is_staff(target_member):
return await ctx.send("I can't ban this user as they're a member of staff.")
userlog(target, ctx.author, reason, "bans", target_user.name)
safe_name = await commands.clean_content(escape_markdown=True).convert(
ctx, str(target)
)
await ctx.guild.ban(
target_user, reason=f"{ctx.author}, reason: {reason}", delete_message_days=0
)
chan_message = (
f"β **Hackban**: {str(ctx.author)} banned "
f"{target_user.mention} | {safe_name}\n"
f"π· __User ID__: {target}\n"
)
if reason:
chan_message += f'βοΈ __Reason__: "{reason}"'
else:
chan_message += (
"Please add an explanation below. In the future"
", it is recommended to use "
"`.hackban <user> [reason]`."
)
chan_message += f"\nπ __Jump__: <{ctx.message.jump_url}>"
log_channel = self.bot.get_channel(config.modlog_channel)
await log_channel.send(chan_message)
await ctx.send(f"{safe_name} is now b&. π")
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
@commands.check(check_if_staff)
@commands.command()
async def massban(self, ctx, *, targets: str):
targets_int = [int(target) for target in targets.strip().split(" ")]
for target in targets_int:
target_user = await self.bot.fetch_user(target)
target_member = ctx.guild.get_member(target)
if target == ctx.author.id:
await ctx.send(f"(re: {target}) You can't do mod actions on yourself.")
continue
elif target == self.bot.user:
await ctx.send(
f"(re: {target}) I'm sorry {ctx.author.mention}, I'm afraid I can't do that."
)
continue
elif target_member and self.check_if_target_is_staff(target_member):
await ctx.send(
f"(re: {target}) I can't ban this user as they're a member of staff."
)
continue
userlog(target, ctx.author, f"massban", "bans", target_user.name)
safe_name = await commands.clean_content(escape_markdown=True).convert(
ctx, str(target)
)
await ctx.guild.ban(
target_user,
reason=f"{ctx.author}, reason: massban",
delete_message_days=0,
)
chan_message = (
f"β **Massban**: {str(ctx.author)} banned "
f"{target_user.mention} | {safe_name}\n"
f"π· __User ID__: {target}\n"
"Please add an explanation below."
)
chan_message += f"\nπ __Jump__: <{ctx.message.jump_url}>"
log_channel = self.bot.get_channel(config.modlog_channel)
await log_channel.send(chan_message)
await ctx.send(f"All {len(targets_int)} users are now b&. π")
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
@commands.check(check_if_staff)
@commands.command()
async def unban(self, ctx, target: int, *, reason: str = ""):
target_user = await self.bot.fetch_user(target)
safe_name = await commands.clean_content(escape_markdown=True).convert(
ctx, str(target)
)
await ctx.guild.unban(target_user, reason=f"{ctx.author}, reason: {reason}")
chan_message = (
f"β οΈ **Unban**: {str(ctx.author)} unbanned "
f"{target_user.mention} | {safe_name}\n"
f"π· __User ID__: {target}\n"
)
if reason:
chan_message += f'βοΈ __Reason__: "{reason}"'
else:
chan_message += (
"Please add an explanation below. In the future"
", it is recommended to use "
"`.unban <user id> [reason]`."
)
chan_message += f"\nπ __Jump__: <{ctx.message.jump_url}>"
log_channel = self.bot.get_channel(config.modlog_channel)
await log_channel.send(chan_message)
await ctx.send(f"{safe_name} is now unb&.")
@commands.guild_only()
@commands.bot_has_permissions(ban_members=True)
@commands.check(check_if_staff)
@commands.command()
async def silentban(self, ctx, target: discord.Member, *, reason: str = ""):
if target == ctx.author:
return await ctx.send("You can't do mod actions on yourself.")
elif target == self.bot.user:
return await ctx.send(
f"I'm sorry {ctx.author.mention}, I'm afraid I can't do that."
)
elif self.check_if_target_is_staff(target):
return await ctx.send("I can't ban this user as they're a member of staff.")
userlog(target.id, ctx.author, reason, "bans", target.name)
safe_name = await commands.clean_content(escape_markdown=True).convert(
ctx, str(target)
)
await target.ban(
reason=f"{ctx.author}, reason: {reason}", delete_message_days=0
)
chan_message = (
f"β **Silent ban**: {str(ctx.author)} banned "
f"{target.mention} | {safe_name}\n"
f"π· __User ID__: {target.id}\n"
)
if reason:
chan_message += f'βοΈ __Reason__: "{reason}"'
else:
chan_message += (
"Please add an explanation below. In the future"
", it is recommended to use `.ban <user> [reason]`"
" as the reason is automatically sent to the user."
)
chan_message += f"\nπ __Jump__: <{ctx.message.jump_url}>"
log_channel = self.bot.get_channel(config.modlog_channel)
await log_channel.send(chan_message)
@commands.guild_only()
@commands.check(check_if_staff)
@commands.command()
async def approve(self, ctx, target: discord.Member, role: str = "community"):
if role not in config.named_roles:
return await ctx.send(
"No such role! Available roles: " + ",".join(config.named_roles)
)
log_channel = self.bot.get_channel(config.modlog_channel)
target_role = ctx.guild.get_role(config.named_roles[role])
if target_role in target.roles:
return await ctx.send("Target already has this role.")
await target.add_roles(target_role, reason=str(ctx.author))
await ctx.send(f"Approved {target.mention} to `{role}` role.")
await log_channel.send(
f"β
Approved: {str(ctx.author)} added"
f" {role} to {target.mention}"
f"\nπ __Jump__: <{ctx.message.jump_url}>"
)
@commands.guild_only()
@commands.check(check_if_staff)
@commands.command(aliases=["unapprove"])
async def revoke(self, ctx, target: discord.Member, role: str = "community"):
if role not in config.named_roles:
return await ctx.send(
"No such role! Available roles: " + ",".join(config.named_roles)
)
log_channel = self.bot.get_channel(config.modlog_channel)
target_role = ctx.guild.get_role(config.named_roles[role])
if target_role not in target.roles:
return await ctx.send("Target doesn't have this role.")
await target.remove_roles(target_role, reason=str(ctx.author))
await ctx.send(f"Un-approved {target.mention} from `{role}` role.")
await log_channel.send(
f"β Un-approved: {str(ctx.author)} removed"
f" {role} from {target.mention}"
f"\nπ __Jump__: <{ctx.message.jump_url}>"
)
@commands.guild_only()
@commands.check(check_if_staff)
@commands.command(aliases=["clear"])
async def purge(self, ctx, limit: int, channel: discord.TextChannel = None):
log_channel = self.bot.get_channel(config.modlog_channel)
if not channel:
channel = ctx.channel
await channel.purge(limit=limit)
msg = (
f"π **Purged**: {str(ctx.author)} purged {limit} "
f"messages in {channel.mention}."
)
await log_channel.send(msg)
@commands.guild_only()
@commands.check(check_if_staff)
@commands.command()
async def warn(self, ctx, target: discord.Member, *, reason: str = ""):
if target == ctx.author:
return await ctx.send("You can't do mod actions on yourself.")
elif target == self.bot.user:
return await ctx.send(
f"I'm sorry {ctx.author.mention}, I'm afraid I can't do that."
)
elif self.check_if_target_is_staff(target):
return await ctx.send(
"I can't warn this user as they're a member of staff."
)
log_channel = self.bot.get_channel(config.modlog_channel)
warn_count = userlog(target.id, ctx.author, reason, "warns", target.name)
safe_name = await commands.clean_content(escape_markdown=True).convert(
ctx, str(target)
)
chan_msg = (
f"β οΈ **Warned**: {str(ctx.author)} warned "
f"{target.mention} (warn #{warn_count}) "
f"| {safe_name}\n"
)
msg = f"You were warned on {ctx.guild.name}."
if reason:
msg += " The given reason is: " + reason
msg += (
f"\n\nPlease read the rules in {config.rules_url}. "
f"This is warn #{warn_count}."
)
if warn_count == 2:
msg += " __The next warn will automatically kick.__"
if warn_count == 3:
msg += (
"\n\nYou were kicked because of this warning. "
"You can join again right away. "
"Two more warnings will result in an automatic ban."
)
if warn_count == 4:
msg += (
"\n\nYou were kicked because of this warning. "
"This is your final warning. "
"You can join again, but "
"**one more warn will result in a ban**."
)
chan_msg += "**This resulted in an auto-kick.**\n"
if warn_count == 5:
msg += "\n\nYou were automatically banned due to five warnings."
chan_msg += "**This resulted in an auto-ban.**\n"
try:
await target.send(msg)
except discord.errors.Forbidden:
pass
if warn_count == 3 or warn_count == 4:
await target.kick()
if warn_count >= 5:
await target.ban(reason="exceeded warn limit", delete_message_days=0)
await ctx.send(
f"{target.mention} warned. " f"User has {warn_count} warning(s)."
)
if reason:
chan_msg += f'βοΈ __Reason__: "{reason}"'
else:
chan_msg += (
"Please add an explanation below. In the future"
", it is recommended to use `.warn <user> [reason]`"
" as the reason is automatically sent to the user."
)
chan_msg += f"\nπ __Jump__: <{ctx.message.jump_url}>"
await log_channel.send(chan_msg)
@commands.guild_only()
@commands.check(check_if_staff)
@commands.command(aliases=["setnick", "nick"])
async def nickname(self, ctx, target: discord.Member, *, nick: str = ""):
try:
if nick:
await target.edit(nick=nick, reason=str(ctx.author))
else:
await target.edit(nick=None, reason=str(ctx.author))
await ctx.send("Successfully set nickname.")
except discord.errors.Forbidden:
await ctx.send(
"I don't have the permission to set that user's nickname.\n"
"User's top role may be above mine, or I may lack Manage Nicknames permission."
)
@commands.guild_only()
@commands.check(check_if_staff)
@commands.command(aliases=["echo"])
|
MIT License
|
whutch/cwmud
|
cwmud/contrib/weather/patterns.py
|
WeatherPattern.__init__
|
python
|
def __init__(self, time_source=time.time, time_scale=1.0, seed=None,
formation_speed=1.0, storm_scale=60, wind_scale=None):
self.time_source = time_source
self.time_scale = time_scale
self._time_base = time_source()
self._time_offset = self._time_base
self.seed = seed if seed is not None else self._time_base % 100000
self.formation_speed = formation_speed
self.storm_scale = storm_scale
self.wind_scale = wind_scale
self._offset_x = None
self._offset_y = None
|
Create a new weather pattern.
:param callable time_source: A callable source of time
:param time_scale: A multiplier to scale time by
:param seed: A seed to use for the pattern's noise generation
:param formation_speed: The speed at which storm formations change;
higher numbers are faster, zero will not change
:param storm_scale: The scale of the storm noise
:param wind_scale: The scale of the wind noise
:returns None:
|
https://github.com/whutch/cwmud/blob/bee8b126a5e70edd0593dae9753a6be8d52357cf/cwmud/contrib/weather/patterns.py#L16-L39
|
import time
from ...core.random import generate_noise
class WeatherPattern:
|
MIT License
|
magenta/magenta
|
magenta/models/score2perf/datagen_beam.py
|
generate_examples
|
python
|
def generate_examples(input_transform, output_dir, problem_name, splits,
min_hop_size_seconds, max_hop_size_seconds,
num_replications, min_pitch, max_pitch,
encode_performance_fn, encode_score_fns=None,
augment_fns=None, absolute_timing=False,
random_crop_length=None):
logging.getLogger().setLevel(logging.INFO)
if isinstance(input_transform, dict):
split_names = input_transform.keys()
else:
if not splits:
raise ValueError(
'Split probabilities must be provided if input is not presplit.')
split_names, split_probabilities = zip(*splits.items())
cumulative_splits = list(zip(split_names, np.cumsum(split_probabilities)))
if cumulative_splits[-1][1] != 1.0:
raise ValueError('Split probabilities must sum to 1; got %f' %
cumulative_splits[-1][1])
output_filenames = [
os.path.join(output_dir, '%s-%s.tfrecord' % (problem_name, split_name))
for split_name in split_names
]
for split_name, output_filename in zip(split_names, output_filenames):
existing_output_filenames = tf.gfile.Glob(output_filename + '*')
if existing_output_filenames:
tf.logging.info(
'Data files already exist for split %s in problem %s, deleting.',
split_name, problem_name)
for filename in existing_output_filenames:
tf.gfile.Remove(filename)
pipeline_options = beam.options.pipeline_options.PipelineOptions(
FLAGS.pipeline_options.split(','))
with beam.Pipeline(options=pipeline_options) as p:
if isinstance(input_transform, dict):
split_partitions = [
p | 'input_transform_%s' % split_name >> input_transform[split_name]
for split_name in split_names
]
else:
p |= 'input_transform' >> input_transform
split_partitions = p | 'partition' >> beam.Partition(
functools.partial(select_split, cumulative_splits),
len(cumulative_splits))
for split_name, output_filename, s in zip(
split_names, output_filenames, split_partitions):
if isinstance(min_hop_size_seconds, dict):
min_hop = min_hop_size_seconds[split_name]
else:
min_hop = min_hop_size_seconds
if isinstance(max_hop_size_seconds, dict):
max_hop = max_hop_size_seconds[split_name]
else:
max_hop = max_hop_size_seconds
s |= 'preshuffle_%s' % split_name >> beam.Reshuffle()
s |= 'filter_invalid_notes_%s' % split_name >> beam.Map(
functools.partial(filter_invalid_notes, min_pitch, max_pitch))
s |= 'extract_examples_%s' % split_name >> beam.ParDo(
ExtractExamplesDoFn(
min_hop, max_hop,
num_replications if split_name == 'train' else 1,
encode_performance_fn, encode_score_fns,
augment_fns if split_name == 'train' else None,
absolute_timing,
random_crop_length))
s |= 'shuffle_%s' % split_name >> beam.Reshuffle()
s |= 'write_%s' % split_name >> beam.io.WriteToTFRecord(
output_filename, coder=beam.coders.ProtoCoder(tf.train.Example))
|
Generate data for a Score2Perf problem.
Args:
input_transform: The input PTransform object that reads input NoteSequence
protos, or dictionary mapping split names to such PTransform objects.
Should produce `(id, NoteSequence)` tuples.
output_dir: The directory to write the resulting TFRecord file containing
examples.
problem_name: Name of the Tensor2Tensor problem, used as a base filename
for generated data.
splits: A dictionary of split names and their probabilities. Probabilites
should add up to 1. If `input_filename` is a dictionary, this argument
will be ignored.
min_hop_size_seconds: Minimum hop size in seconds at which input
NoteSequence protos can be split. Can also be a dictionary mapping split
name to minimum hop size.
max_hop_size_seconds: Maximum hop size in seconds at which input
NoteSequence protos can be split. If zero or None, will not split at
all. Can also be a dictionary mapping split name to maximum hop size.
num_replications: Number of times input NoteSequence protos will be
replicated prior to splitting.
min_pitch: Minimum MIDI pitch value; notes with lower pitch will be dropped.
max_pitch: Maximum MIDI pitch value; notes with greater pitch will be
dropped.
encode_performance_fn: Required performance encoding function.
encode_score_fns: Optional dictionary of named score encoding functions.
augment_fns: Optional list of data augmentation functions. Only applied in
the 'train' split.
absolute_timing: If True, each score will use absolute instead of tempo-
relative timing. Since chord inference depends on having beats, the
score will only contain melody.
random_crop_length: If specified, crop each encoded performance to this
length. Cannot be specified if using scores.
Raises:
ValueError: If split probabilities do not add up to 1, or if splits are not
provided but `input_filename` is not a dictionary.
|
https://github.com/magenta/magenta/blob/be6558f1a06984faff6d6949234f5fe9ad0ffdb5/magenta/models/score2perf/datagen_beam.py#L340-L455
|
import copy
import functools
import hashlib
import logging
import os
import random
import typing
import apache_beam as beam
from apache_beam import typehints
from apache_beam.metrics import Metrics
from magenta.models.score2perf import music_encoders
import note_seq
from note_seq import chord_inference
from note_seq import melody_inference
from note_seq import sequences_lib
import numpy as np
from tensor2tensor.data_generators import generator_utils
import tensorflow.compat.v1 as tf
SCORE_BPM = 120.0
BEAT = note_seq.NoteSequence.TextAnnotation.BEAT
FLAGS = tf.app.flags.FLAGS
flags = tf.app.flags
flags.DEFINE_string(
'pipeline_options', '',
'Command line flags to use in constructing the Beam pipeline options.')
@typehints.with_output_types(typing.Tuple[str, str])
class ReadNoteSequencesFromTFRecord(beam.PTransform):
def __init__(self, tfrecord_path):
super(ReadNoteSequencesFromTFRecord, self).__init__()
self._tfrecord_path = tfrecord_path
def expand(self, pcoll):
pcoll |= beam.Create([self._tfrecord_path])
pcoll |= beam.io.tfrecordio.ReadAllFromTFRecord()
pcoll |= beam.Map(
lambda ns_str: (note_seq.NoteSequence.FromString(ns_str).id, ns_str))
return pcoll
def select_split(cumulative_splits, kv, unused_num_partitions):
key, _ = kv
m = hashlib.md5(key.encode('utf-8'))
r = int(m.hexdigest(), 16) / (2 ** (8 * m.digest_size))
for i, (name, p) in enumerate(cumulative_splits):
if r < p:
Metrics.counter('select_split', name).inc()
return i
assert False
def filter_invalid_notes(min_pitch, max_pitch, kv):
key, ns_str = kv
ns = note_seq.NoteSequence.FromString(ns_str)
valid_notes = [note for note in ns.notes
if min_pitch <= note.pitch <= max_pitch]
if len(valid_notes) < len(ns.notes):
del ns.notes[:]
ns.notes.extend(valid_notes)
Metrics.counter('filter_invalid_notes', 'out_of_range_pitch').inc()
return key, ns.SerializeToString()
class DataAugmentationError(Exception):
pass
class ExtractExamplesDoFn(beam.DoFn):
def __init__(self, min_hop_size_seconds, max_hop_size_seconds,
num_replications, encode_performance_fn, encode_score_fns,
augment_fns, absolute_timing, random_crop_length,
*unused_args, **unused_kwargs):
if (max_hop_size_seconds and
max_hop_size_seconds != min_hop_size_seconds and
max_hop_size_seconds < 2 * min_hop_size_seconds):
raise ValueError(
'Maximum hop size must be at least twice minimum hop size.')
if encode_score_fns and random_crop_length:
raise ValueError('Cannot perform random crop when scores are used.')
super(ExtractExamplesDoFn, self).__init__(*unused_args, **unused_kwargs)
self._min_hop_size_seconds = min_hop_size_seconds
self._max_hop_size_seconds = max_hop_size_seconds
self._num_replications = num_replications
self._encode_performance_fn = encode_performance_fn
self._encode_score_fns = encode_score_fns
self._augment_fns = augment_fns if augment_fns else [lambda ns: ns]
self._absolute_timing = absolute_timing
self._random_crop_length = random_crop_length
def process(self, kv):
key, ns_str = kv
m = hashlib.md5(key.encode('utf-8'))
random.seed(int(m.hexdigest(), 16))
ns = note_seq.NoteSequence.FromString(ns_str)
ns = sequences_lib.apply_sustain_control_changes(ns)
del ns.control_changes[:]
if (self._min_hop_size_seconds and
ns.total_time < self._min_hop_size_seconds):
Metrics.counter('extract_examples', 'sequence_too_short').inc()
return
sequences = []
for _ in range(self._num_replications):
if self._max_hop_size_seconds:
if self._max_hop_size_seconds == self._min_hop_size_seconds:
sequences += sequences_lib.split_note_sequence(
ns, self._max_hop_size_seconds)
else:
hop_times = [0.0]
while hop_times[-1] <= ns.total_time - self._min_hop_size_seconds:
if hop_times[-1] + self._max_hop_size_seconds < ns.total_time:
max_offset = min(
self._max_hop_size_seconds,
ns.total_time - self._min_hop_size_seconds - hop_times[-1])
else:
max_offset = self._max_hop_size_seconds
offset = random.uniform(self._min_hop_size_seconds, max_offset)
hop_times.append(hop_times[-1] + offset)
sequences += sequences_lib.split_note_sequence(ns, hop_times[1:-1])
else:
sequences += [ns]
for performance_sequence in sequences:
if self._encode_score_fns:
if not self._absolute_timing:
beats = [
ta for ta in performance_sequence.text_annotations
if ta.annotation_type == BEAT
and ta.time <= performance_sequence.total_time
]
if len(beats) < 2:
Metrics.counter('extract_examples', 'not_enough_beats').inc()
continue
performance_sequence = sequences_lib.extract_subsequence(
performance_sequence,
start_time=min(beat.time for beat in beats),
end_time=max(beat.time for beat in beats)
)
try:
chord_inference.infer_chords_for_sequence(
performance_sequence,
chord_change_prob=0.25,
chord_note_concentration=50.0,
add_key_signatures=True)
except chord_inference.ChordInferenceError:
Metrics.counter('extract_examples', 'chord_inference_failed').inc()
continue
try:
melody_instrument = melody_inference.infer_melody_for_sequence(
performance_sequence,
melody_interval_scale=2.0,
rest_prob=0.1,
instantaneous_non_max_pitch_prob=1e-15,
instantaneous_non_empty_rest_prob=0.0,
instantaneous_missing_pitch_prob=1e-15)
except melody_inference.MelodyInferenceError:
Metrics.counter('extract_examples', 'melody_inference_failed').inc()
continue
if not self._absolute_timing:
score_sequence, unused_alignment = sequences_lib.rectify_beats(
performance_sequence, beats_per_minute=SCORE_BPM)
else:
score_sequence = copy.deepcopy(performance_sequence)
performance_notes = []
for note in performance_sequence.notes:
if note.instrument != melody_instrument:
performance_notes.append(note)
del performance_sequence.notes[:]
performance_sequence.notes.extend(performance_notes)
score_notes = []
for note in score_sequence.notes:
if note.instrument == melody_instrument:
score_notes.append(note)
del score_sequence.notes[:]
score_sequence.notes.extend(score_notes)
del performance_sequence.key_signatures[:]
del performance_sequence.text_annotations[:]
Metrics.counter('extract_examples', 'extracted_score').inc()
for augment_fn in self._augment_fns:
try:
augmented_performance_sequence = augment_fn(performance_sequence)
except DataAugmentationError:
Metrics.counter(
'extract_examples', 'augment_performance_failed').inc()
continue
example_dict = {
'targets': self._encode_performance_fn(
augmented_performance_sequence)
}
if not example_dict['targets']:
Metrics.counter('extract_examples', 'skipped_empty_targets').inc()
continue
if (self._random_crop_length and
len(example_dict['targets']) > self._random_crop_length):
max_offset = len(example_dict['targets']) - self._random_crop_length
offset = random.randrange(max_offset + 1)
example_dict['targets'] = example_dict['targets'][
offset:offset + self._random_crop_length]
if self._encode_score_fns:
try:
augmented_score_sequence = augment_fn(score_sequence)
except DataAugmentationError:
Metrics.counter('extract_examples', 'augment_score_failed').inc()
continue
skip = False
for name, encode_score_fn in self._encode_score_fns.items():
example_dict[name] = encode_score_fn(augmented_score_sequence)
if not example_dict[name]:
Metrics.counter('extract_examples',
'skipped_empty_%s' % name).inc()
skip = True
break
if skip:
continue
Metrics.counter('extract_examples', 'encoded_example').inc()
Metrics.distribution(
'extract_examples', 'performance_length_in_seconds').update(
int(augmented_performance_sequence.total_time))
yield generator_utils.to_example(example_dict)
|
Apache License 2.0
|
mortcanty/earthengine
|
src/ee/batch.py
|
Export.__new__
|
python
|
def __new__(cls, collection, description='myExportTableTask', config=None):
config = (config or {}).copy()
if 'driveFileNamePrefix' not in config and 'outputBucket' not in config:
config['driveFileNamePrefix'] = description
if 'fileFormat' not in config:
config['fileFormat'] = 'CSV'
return _CreateTask(
Task.Type.EXPORT_TABLE, collection, description, config)
|
Export an EE FeatureCollection as a table.
The exported table will reside in Google Drive or Cloud Storage.
Args:
collection: The feature collection to be exported.
description: Human-readable name of the task.
config: A dictionary that will be copied and used as parameters
for the task:
- fileFormat: The output format: "CSV" (default), "GeoJSON", "KML",
or "KMZ".
If exporting to Google Drive (default):
- driveFolder: The name of a unique folder in your Drive
account to export into. Defaults to the root of the drive.
- driveFileNamePrefix: The Google Drive filename for the export.
Defaults to the name of the task.
If exporting to Google Cloud Storage:
- outputBucket: The name of a Cloud Storage bucket for the export.
- outputPrefix: Cloud Storage object name prefix for the export.
Returns:
An unstarted Task that exports the table.
|
https://github.com/mortcanty/earthengine/blob/9786109a98ed3f7fcad630e1dd69f63591183a37/src/ee/batch.py#L463-L493
|
import json
import six
from . import data
from . import ee_exception
from . import geometry
class Task(object):
def __init__(self, taskId, config=None):
self.id = taskId
self.config = config and config.copy()
class Type(object):
EXPORT_IMAGE = 'EXPORT_IMAGE'
EXPORT_MAP = 'EXPORT_TILES'
EXPORT_TABLE = 'EXPORT_FEATURES'
EXPORT_VIDEO = 'EXPORT_VIDEO'
class State(object):
UNSUBMITTED = 'UNSUBMITTED'
READY = 'READY'
RUNNING = 'RUNNING'
COMPLETED = 'COMPLETED'
FAILED = 'FAILED'
CANCEL_REQUESTED = 'CANCEL_REQUESTED'
CANCELLED = 'CANCELLED'
class ExportDestination(object):
DRIVE = 'DRIVE'
GCS = 'GOOGLE_CLOUD_STORAGE'
ASSET = 'ASSET'
def start(self):
if not self.config:
raise ee_exception.EEException(
'Task config must be specified for tasks to be started.')
data.startProcessing(self.id, self.config)
def status(self):
result = data.getTaskStatus(self.id)[0]
if result['state'] == 'UNKNOWN': result['state'] = Task.State.UNSUBMITTED
return result
def active(self):
return self.status()['state'] in (Task.State.READY,
Task.State.RUNNING,
Task.State.CANCEL_REQUESTED)
def cancel(self):
data.cancelTask(self.id)
@staticmethod
def list():
statuses = data.getTaskList()
tasks = []
for status in statuses:
tasks.append(Task(status['id'], {
'type': status['task_type'],
'description': status['description'],
'state': status['state'],
}))
return tasks
def __repr__(self):
if self.config:
return '<Task %(type)s: %(description)s (%(state)s)>' % self.config
else:
return '<Task "%s">' % self.id
class Export(object):
def __init__(self):
raise AssertionError('This class cannot be instantiated.')
class image(object):
def __init__(self):
raise AssertionError('This class cannot be instantiated.')
def __new__(cls, image, description='myExportImageTask', config=None):
config = (config or {}).copy()
if 'driveFileNamePrefix' not in config and 'outputBucket' not in config:
config['driveFileNamePrefix'] = description
if 'region' in config:
config['region'] = _GetSerializedRegion(config.get('region'))
return _CreateTask(
Task.Type.EXPORT_IMAGE, image, description, config)
@staticmethod
def toAsset(image, description='myExportImageTask', assetId=None,
pyramidingPolicy=None, dimensions=None, region=None,
scale=None, crs=None, crsTransform=None, maxPixels=None,
**kwargs):
config = _CopyDictFilterNone(locals())
_ConvertToServerParams(config, 'image', Task.ExportDestination.ASSET)
if 'region' in config:
config['region'] = _GetSerializedRegion(config.get('region'))
return _CreateTask(
Task.Type.EXPORT_IMAGE, image, description, config)
@staticmethod
def toCloudStorage(image, description='myExportImageTask',
bucket=None, fileNamePrefix=None,
dimensions=None, region=None, scale=None,
crs=None, crsTransform=None, maxPixels=None,
shardSize=None, fileDimensions=None, **kwargs):
config = _CopyDictFilterNone(locals())
_ConvertToServerParams(config, 'image', Task.ExportDestination.GCS)
if 'region' in config:
config['region'] = _GetSerializedRegion(config.get('region'))
return _CreateTask(
Task.Type.EXPORT_IMAGE, image, description, config)
@staticmethod
def toDrive(image, description='myExportImageTask', folder=None,
fileNamePrefix=None, dimensions=None, region=None,
scale=None, crs=None, crsTransform=None, maxPixels=None,
shardSize=None, fileDimensions=None, **kwargs):
config = _CopyDictFilterNone(locals())
if 'fileNamePrefix' not in config:
config['fileNamePrefix'] = description
_ConvertToServerParams(config, 'image', Task.ExportDestination.DRIVE)
if 'region' in config:
config['region'] = _GetSerializedRegion(config.get('region'))
return _CreateTask(
Task.Type.EXPORT_IMAGE, image, description, config)
class map(object):
def __init__(self):
raise AssertionError('This class cannot be instantiated.')
@staticmethod
def toCloudStorage(image, description='myExportMapTask', bucket=None,
fileFormat=None, path=None, writePublicTiles=None,
maxZoom=None, scale=None, minZoom=None,
region=None, skipEmptyTiles=None, **kwargs):
config = _CopyDictFilterNone(locals())
if 'path' not in config:
config['path'] = description
_ConvertToServerParams(config, 'image', Task.ExportDestination.GCS)
if 'fileFormat' not in config:
config['fileFormat'] = 'auto'
if 'writePublicTiles' not in config:
config['writePublicTiles'] = True
if 'region' in config:
config['region'] = _GetSerializedRegion(config.get('region'))
return _CreateTask(
Task.Type.EXPORT_MAP, image, description, config)
class table(object):
def __init__(self):
raise AssertionError('This class cannot be instantiated.')
|
MIT License
|
schedutron/chirps
|
chirps/functions.py
|
fav_tweet
|
python
|
def fav_tweet(account_handler, tweet):
try:
account_handler.favorites.create(_id=tweet['id'])
return 0
except TwitterHTTPError:
return 1
|
Favorites a passed tweet and returns a success status - 0 if successful
otherwise 1.
|
https://github.com/schedutron/chirps/blob/b4676612b129b55167139159ff099ae03533e285/chirps/functions.py#L49-L57
|
import json
import random
import psycopg2
import requests
from lxml.html import fromstring
from twitter import TwitterHTTPError
from chirps.scrapers import *
def reply(account_handler, tweet_id, user_name, msg):
account_handler.statuses.update(
status='@%s %s' % (user_name, msg),
in_reply_to_status_id=tweet_id
)
def print_tweet(tweet):
print(tweet["user"]["name"])
print(tweet["user"]["screen_name"])
print(tweet["created_at"])
print(tweet["text"])
hashtags_list = []
hashtags = tweet["entities"]["hashtags"]
for tag in hashtags:
hashtags_list.append(tag["text"])
print(hashtags_list)
|
MIT License
|
napalm-automation/napalm-yang
|
napalm_yang/models/openconfig/interfaces/interface/subinterfaces/subinterface/ipv6/addresses/address/vrrp/vrrp_group/config/__init__.py
|
config._set_advertisement_interval
|
python
|
def _set_advertisement_interval(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..65535"]},
int_size=16,
),
restriction_dict={"range": ["1..4095"]},
),
default=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
)(
100
),
is_leaf=True,
yang_name="advertisement-interval",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint16",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """advertisement_interval must be of a type compatible with uint16""",
"defined-type": "uint16",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4095']}), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16)(100), is_leaf=True, yang_name="advertisement-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='uint16', is_config=True)""",
}
)
self.__advertisement_interval = t
if hasattr(self, "_set"):
self._set()
|
Setter method for advertisement_interval, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/addresses/address/vrrp/vrrp_group/config/advertisement_interval (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_advertisement_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_advertisement_interval() directly.
YANG Description: Sets the interval between successive VRRP
advertisements -- RFC 5798 defines this as a 12-bit
value expressed as 0.1 seconds, with default 100, i.e.,
1 second. Several implementation express this in units of
seconds
|
https://github.com/napalm-automation/napalm-yang/blob/9148e015b086ebe311c07deb92e168ea36fd7771/napalm_yang/models/openconfig/interfaces/interface/subinterfaces/subinterface/ipv6/addresses/address/vrrp/vrrp_group/config/__init__.py#L762-L816
|
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class config(PybindBase):
__slots__ = (
"_path_helper",
"_extmethods",
"__virtual_router_id",
"__virtual_address",
"__priority",
"__preempt",
"__preempt_delay",
"__accept_mode",
"__advertisement_interval",
"__virtual_link_local",
)
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__virtual_router_id = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..255"]},
),
is_leaf=True,
yang_name="virtual-router-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint8",
is_config=True,
)
self.__virtual_address = YANGDynClass(
base=TypedListType(
allowed_type=[
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
]
),
is_leaf=False,
yang_name="virtual-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="inet:ip-address",
is_config=True,
)
self.__priority = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..254"]},
),
default=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
)(
100
),
is_leaf=True,
yang_name="priority",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint8",
is_config=True,
)
self.__preempt = YANGDynClass(
base=YANGBool,
default=YANGBool("true"),
is_leaf=True,
yang_name="preempt",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="boolean",
is_config=True,
)
self.__preempt_delay = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
restriction_dict={"range": ["0..3600"]},
),
default=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
)(
0
),
is_leaf=True,
yang_name="preempt-delay",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint16",
is_config=True,
)
self.__accept_mode = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="accept-mode",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="boolean",
is_config=True,
)
self.__advertisement_interval = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
restriction_dict={"range": ["1..4095"]},
),
default=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
)(
100
),
is_leaf=True,
yang_name="advertisement-interval",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint16",
is_config=True,
)
self.__virtual_link_local = YANGDynClass(
base=[
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
],
is_leaf=True,
yang_name="virtual-link-local",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="inet:ip-address",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"interfaces",
"interface",
"subinterfaces",
"subinterface",
"ipv6",
"addresses",
"address",
"vrrp",
"vrrp-group",
"config",
]
def _get_virtual_router_id(self):
return self.__virtual_router_id
def _set_virtual_router_id(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["1..255"]},
),
is_leaf=True,
yang_name="virtual-router-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint8",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """virtual_router_id must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..255']}), is_leaf=True, yang_name="virtual-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='uint8', is_config=True)""",
}
)
self.__virtual_router_id = t
if hasattr(self, "_set"):
self._set()
def _unset_virtual_router_id(self):
self.__virtual_router_id = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..255"]},
),
is_leaf=True,
yang_name="virtual-router-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint8",
is_config=True,
)
def _get_virtual_address(self):
return self.__virtual_address
def _set_virtual_address(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=TypedListType(
allowed_type=[
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
]
),
is_leaf=False,
yang_name="virtual-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="inet:ip-address",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """virtual_address must be of a type compatible with inet:ip-address""",
"defined-type": "inet:ip-address",
"generated-type": """YANGDynClass(base=TypedListType(allowed_type=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),]), is_leaf=False, yang_name="virtual-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='inet:ip-address', is_config=True)""",
}
)
self.__virtual_address = t
if hasattr(self, "_set"):
self._set()
def _unset_virtual_address(self):
self.__virtual_address = YANGDynClass(
base=TypedListType(
allowed_type=[
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
]
),
is_leaf=False,
yang_name="virtual-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="inet:ip-address",
is_config=True,
)
def _get_priority(self):
return self.__priority
def _set_priority(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["1..254"]},
),
default=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
)(
100
),
is_leaf=True,
yang_name="priority",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint8",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """priority must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..254']}), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8)(100), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='uint8', is_config=True)""",
}
)
self.__priority = t
if hasattr(self, "_set"):
self._set()
def _unset_priority(self):
self.__priority = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["1..254"]},
),
default=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
)(
100
),
is_leaf=True,
yang_name="priority",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint8",
is_config=True,
)
def _get_preempt(self):
return self.__preempt
def _set_preempt(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("true"),
is_leaf=True,
yang_name="preempt",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="boolean",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """preempt must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="preempt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='boolean', is_config=True)""",
}
)
self.__preempt = t
if hasattr(self, "_set"):
self._set()
def _unset_preempt(self):
self.__preempt = YANGDynClass(
base=YANGBool,
default=YANGBool("true"),
is_leaf=True,
yang_name="preempt",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="boolean",
is_config=True,
)
def _get_preempt_delay(self):
return self.__preempt_delay
def _set_preempt_delay(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..65535"]},
int_size=16,
),
restriction_dict={"range": ["0..3600"]},
),
default=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
)(
0
),
is_leaf=True,
yang_name="preempt-delay",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint16",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """preempt_delay must be of a type compatible with uint16""",
"defined-type": "uint16",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['0..3600']}), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16)(0), is_leaf=True, yang_name="preempt-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='uint16', is_config=True)""",
}
)
self.__preempt_delay = t
if hasattr(self, "_set"):
self._set()
def _unset_preempt_delay(self):
self.__preempt_delay = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
restriction_dict={"range": ["0..3600"]},
),
default=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
)(
0
),
is_leaf=True,
yang_name="preempt-delay",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="uint16",
is_config=True,
)
def _get_accept_mode(self):
return self.__accept_mode
def _set_accept_mode(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="accept-mode",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="boolean",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """accept_mode must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="accept-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='boolean', is_config=True)""",
}
)
self.__accept_mode = t
if hasattr(self, "_set"):
self._set()
def _unset_accept_mode(self):
self.__accept_mode = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="accept-mode",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="boolean",
is_config=True,
)
def _get_advertisement_interval(self):
return self.__advertisement_interval
|
Apache License 2.0
|
googleapis/python-vision
|
google/cloud/vision_v1p4beta1/services/image_annotator/client.py
|
ImageAnnotatorClient.async_batch_annotate_files
|
python
|
def async_batch_annotate_files(
self,
request: Union[image_annotator.AsyncBatchAnnotateFilesRequest, dict] = None,
*,
requests: Sequence[image_annotator.AsyncAnnotateFileRequest] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
has_flattened_params = any([requests])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
if not isinstance(request, image_annotator.AsyncBatchAnnotateFilesRequest):
request = image_annotator.AsyncBatchAnnotateFilesRequest(request)
if requests is not None:
for i in range(len(requests)):
requests[i] = image_annotator.AsyncAnnotateFileRequest(requests[i])
if requests is not None:
request.requests = requests
rpc = self._transport._wrapped_methods[
self._transport.async_batch_annotate_files
]
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
response = operation.from_gapic(
response,
self._transport.operations_client,
image_annotator.AsyncBatchAnnotateFilesResponse,
metadata_type=image_annotator.OperationMetadata,
)
return response
|
r"""Run asynchronous image detection and annotation for a list of
generic files, such as PDF files, which may contain multiple
pages and multiple images per page. Progress and results can be
retrieved through the ``google.longrunning.Operations``
interface. ``Operation.metadata`` contains ``OperationMetadata``
(metadata). ``Operation.response`` contains
``AsyncBatchAnnotateFilesResponse`` (results).
Args:
request (Union[google.cloud.vision_v1p4beta1.types.AsyncBatchAnnotateFilesRequest, dict]):
The request object. Multiple async file annotation
requests are batched into a single service call.
requests (Sequence[google.cloud.vision_v1p4beta1.types.AsyncAnnotateFileRequest]):
Required. Individual async file
annotation requests for this batch.
This corresponds to the ``requests`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.vision_v1p4beta1.types.AsyncBatchAnnotateFilesResponse`
Response to an async batch file annotation request.
|
https://github.com/googleapis/python-vision/blob/baf4a42802479cd9004779ec493016d922addf1c/google/cloud/vision_v1p4beta1/services/image_annotator/client.py#L617-L702
|
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials
from google.auth.transport import mtls
from google.auth.transport.grpc import SslCredentials
from google.auth.exceptions import MutualTLSChannelError
from google.oauth2 import service_account
from google.api_core import operation
from google.api_core import operation_async
from google.cloud.vision_v1p4beta1.types import image_annotator
from .transports.base import ImageAnnotatorTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import ImageAnnotatorGrpcTransport
from .transports.grpc_asyncio import ImageAnnotatorGrpcAsyncIOTransport
class ImageAnnotatorClientMeta(type):
_transport_registry = (
OrderedDict()
)
_transport_registry["grpc"] = ImageAnnotatorGrpcTransport
_transport_registry["grpc_asyncio"] = ImageAnnotatorGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[ImageAnnotatorTransport]:
if label:
return cls._transport_registry[label]
return next(iter(cls._transport_registry.values()))
class ImageAnnotatorClient(metaclass=ImageAnnotatorClientMeta):
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "vision.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__(
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ImageAnnotatorTransport:
return self._transport
@staticmethod
def product_path(project: str, location: str, product: str,) -> str:
return "projects/{project}/locations/{location}/products/{product}".format(
project=project, location=location, product=product,
)
@staticmethod
def parse_product_path(path: str) -> Dict[str, str]:
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/products/(?P<product>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def product_set_path(project: str, location: str, product_set: str,) -> str:
return "projects/{project}/locations/{location}/productSets/{product_set}".format(
project=project, location=location, product_set=product_set,
)
@staticmethod
def parse_product_set_path(path: str) -> Dict[str, str]:
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/productSets/(?P<product_set>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, ImageAnnotatorTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
if isinstance(transport, ImageAnnotatorTransport):
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def batch_annotate_images(
self,
request: Union[image_annotator.BatchAnnotateImagesRequest, dict] = None,
*,
requests: Sequence[image_annotator.AnnotateImageRequest] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> image_annotator.BatchAnnotateImagesResponse:
has_flattened_params = any([requests])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
if not isinstance(request, image_annotator.BatchAnnotateImagesRequest):
request = image_annotator.BatchAnnotateImagesRequest(request)
if requests is not None:
for i in range(len(requests)):
requests[i] = image_annotator.AnnotateImageRequest(requests[i])
if requests is not None:
request.requests = requests
rpc = self._transport._wrapped_methods[self._transport.batch_annotate_images]
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
return response
def batch_annotate_files(
self,
request: Union[image_annotator.BatchAnnotateFilesRequest, dict] = None,
*,
requests: Sequence[image_annotator.AnnotateFileRequest] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> image_annotator.BatchAnnotateFilesResponse:
has_flattened_params = any([requests])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
if not isinstance(request, image_annotator.BatchAnnotateFilesRequest):
request = image_annotator.BatchAnnotateFilesRequest(request)
if requests is not None:
for i in range(len(requests)):
requests[i] = image_annotator.AnnotateFileRequest(requests[i])
if requests is not None:
request.requests = requests
rpc = self._transport._wrapped_methods[self._transport.batch_annotate_files]
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
return response
def async_batch_annotate_images(
self,
request: Union[image_annotator.AsyncBatchAnnotateImagesRequest, dict] = None,
*,
requests: Sequence[image_annotator.AnnotateImageRequest] = None,
output_config: image_annotator.OutputConfig = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
has_flattened_params = any([requests, output_config])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
if not isinstance(request, image_annotator.AsyncBatchAnnotateImagesRequest):
request = image_annotator.AsyncBatchAnnotateImagesRequest(request)
if requests is not None:
for i in range(len(requests)):
requests[i] = image_annotator.AnnotateImageRequest(requests[i])
if requests is not None:
request.requests = requests
if output_config is not None:
request.output_config = output_config
rpc = self._transport._wrapped_methods[
self._transport.async_batch_annotate_images
]
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
response = operation.from_gapic(
response,
self._transport.operations_client,
image_annotator.AsyncBatchAnnotateImagesResponse,
metadata_type=image_annotator.OperationMetadata,
)
return response
|
Apache License 2.0
|
uptane/uptane
|
demo/demo_secondary.py
|
clean_up_temp_folder
|
python
|
def clean_up_temp_folder():
if os.path.isdir(CLIENT_DIRECTORY):
shutil.rmtree(CLIENT_DIRECTORY)
|
Deletes the temp directory created by the demo
|
https://github.com/uptane/uptane/blob/ea01d82fcebb1c7446f5a4574a5775286953a0bd/demo/demo_secondary.py#L645-L650
|
from __future__ import print_function
from __future__ import unicode_literals
from io import open
import demo
import uptane
import uptane.common
import uptane.clients.secondary as secondary
from uptane import GREEN, RED, YELLOW, ENDCOLORS
from demo.uptane_banners import *
import tuf.keys
import tuf.repository_tool as rt
import atexit
import os
import shutil
import time
import copy
import json
import canonicaljson
from six.moves import xmlrpc_client
import readline, rlcompleter
readline.parse_and_bind('tab: complete')
uptane.DEMO_MODE = True
CLIENT_DIRECTORY_PREFIX = 'temp_secondary'
CLIENT_DIRECTORY = None
_vin = 'democar'
_ecu_serial = 'TCUdemocar'
_primary_host = demo.PRIMARY_SERVER_HOST
_primary_port = demo.PRIMARY_SERVER_DEFAULT_PORT
firmware_filename = 'secondary_firmware.txt'
current_firmware_fileinfo = {}
secondary_ecu = None
ecu_key = None
nonce = None
attacks_detected = ''
most_recent_signed_ecu_manifest = None
def clean_slate(
use_new_keys=False,
vin=_vin,
ecu_serial=_ecu_serial,
primary_host=None,
primary_port=None):
global secondary_ecu
global _vin
global _ecu_serial
global _primary_host
global _primary_port
global nonce
global CLIENT_DIRECTORY
global attacks_detected
_vin = vin
_ecu_serial = ecu_serial
if primary_host is not None:
_primary_host = primary_host
if primary_port is not None:
_primary_port = primary_port
CLIENT_DIRECTORY = os.path.join(
uptane.WORKING_DIR, CLIENT_DIRECTORY_PREFIX + demo.get_random_string(5))
key_timeserver_pub = demo.import_public_key('timeserver')
factory_firmware_fileinfo = {
'filepath': '/secondary_firmware.txt',
'fileinfo': {
'hashes': {
'sha512': '706c283972c5ae69864b199e1cdd9b4b8babc14f5a454d0fd4d3b35396a04ca0b40af731671b74020a738b5108a78deb032332c36d6ae9f31fae2f8a70f7e1ce',
'sha256': '6b9f987226610bfed08b824c93bf8b2f59521fce9a2adef80c495f363c1c9c44'},
'length': 37}}
load_or_generate_key(use_new_keys)
clock = tuf.formats.unix_timestamp_to_datetime(int(time.time()))
clock = clock.isoformat() + 'Z'
tuf.formats.ISO8601_DATETIME_SCHEMA.check_match(clock)
atexit.register(clean_up_temp_folder)
uptane.common.create_directory_structure_for_client(
CLIENT_DIRECTORY, create_secondary_pinning_file(),
{demo.IMAGE_REPO_NAME: demo.IMAGE_REPO_ROOT_FNAME,
demo.DIRECTOR_REPO_NAME: os.path.join(demo.DIRECTOR_REPO_DIR, vin,
'metadata', 'root' + demo.METADATA_EXTENSION)})
tuf.conf.repository_directory = CLIENT_DIRECTORY
secondary_ecu = secondary.Secondary(
full_client_dir=CLIENT_DIRECTORY,
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=_vin,
ecu_serial=_ecu_serial,
ecu_key=ecu_key,
time=clock,
firmware_fileinfo=factory_firmware_fileinfo,
timeserver_public_key=key_timeserver_pub)
try:
register_self_with_director()
except xmlrpc_client.Fault:
print('Registration with Director failed. Now assuming this Secondary is '
'already registered.')
try:
register_self_with_primary()
except xmlrpc_client.Fault:
print('Registration with Primary failed. Now assuming this Secondary is '
'already registered.')
print('\n' + GREEN + ' Now simulating a Secondary that rolled off the '
'assembly line\n and has never seen an update.' + ENDCOLORS)
print("Generating this Secondary's first ECU Version Manifest and sending "
"it to the Primary.")
generate_signed_ecu_manifest()
submit_ecu_manifest_to_primary()
def create_secondary_pinning_file():
pinnings = json.load(
open(demo.DEMO_SECONDARY_PINNING_FNAME, 'r', encoding='utf-8'))
fname_to_create = os.path.join(
demo.DEMO_DIR, 'pinned.json_secondary_' + demo.get_random_string(5))
atexit.register(clean_up_temp_file, fname_to_create)
for repo_name in pinnings['repositories']:
assert 1 == len(pinnings['repositories'][repo_name]['mirrors']), 'Config error.'
mirror = pinnings['repositories'][repo_name]['mirrors'][0]
mirror = mirror.replace('<full_client_dir>', CLIENT_DIRECTORY)
pinnings['repositories'][repo_name]['mirrors'][0] = mirror
with open(fname_to_create, 'wb') as fobj:
fobj.write(canonicaljson.encode_canonical_json(pinnings))
return fname_to_create
def submit_ecu_manifest_to_primary(signed_ecu_manifest=None):
global most_recent_signed_ecu_manifest
if signed_ecu_manifest is None:
signed_ecu_manifest = most_recent_signed_ecu_manifest
if tuf.conf.METADATA_FORMAT == 'der':
signed_ecu_manifest = xmlrpc_client.Binary(signed_ecu_manifest)
else:
uptane.formats.SIGNABLE_ECU_VERSION_MANIFEST_SCHEMA.check_match(
signed_ecu_manifest)
server = xmlrpc_client.ServerProxy(
'http://' + str(_primary_host) + ':' + str(_primary_port))
server.submit_ecu_manifest(
secondary_ecu.vin,
secondary_ecu.ecu_serial,
secondary_ecu.nonce_next,
signed_ecu_manifest)
secondary_ecu.set_nonce_as_sent()
def load_or_generate_key(use_new_keys=False):
global ecu_key
if use_new_keys:
demo.generate_key('secondary')
key_pub = demo.import_public_key('secondary')
key_pri = demo.import_private_key('secondary')
ecu_key = uptane.common.canonical_key_from_pub_and_pri(key_pub, key_pri)
def update_cycle():
global secondary_ecu
global current_firmware_fileinfo
global attacks_detected
pserver = xmlrpc_client.ServerProxy(
'http://' + str(_primary_host) + ':' + str(_primary_port))
time_attestation = pserver.get_time_attestation_for_ecu(_ecu_serial)
if tuf.conf.METADATA_FORMAT == 'der':
time_attestation = time_attestation.data
metadata_archive = pserver.get_metadata(secondary_ecu.ecu_serial)
try:
secondary_ecu.update_time(time_attestation)
except uptane.BadTimeAttestation as e:
print("Timeserver attestation from Primary does not check out: "
"This Secondary's nonce was not found. Not updating this Secondary's "
"time this cycle.")
except tuf.BadSignatureError as e:
print(RED + "Timeserver attestation from Primary did not check out. Bad "
"signature. Not updating this Secondary's time." + ENDCOLORS)
attacks_detected += 'Timeserver attestation had bad signature.\n'
archive_fname = os.path.join(
secondary_ecu.full_client_dir, 'metadata_archive.zip')
with open(archive_fname, 'wb') as fobj:
fobj.write(metadata_archive.data)
secondary_ecu.process_metadata(archive_fname)
if len(secondary_ecu.validated_targets_for_this_ecu) == 0:
print_banner(BANNER_NO_UPDATE, color=WHITE+BLACK_BG,
text='No validated targets were found. Either the Director '
'did not instruct this ECU to install anything, or the target info '
'the Director provided could not be validated.')
generate_signed_ecu_manifest()
submit_ecu_manifest_to_primary()
return
expected_target_info = secondary_ecu.validated_targets_for_this_ecu[-1]
expected_image_fname = expected_target_info['filepath']
if expected_image_fname[0] == '/':
expected_image_fname = expected_image_fname[1:]
if not pserver.update_exists_for_ecu(secondary_ecu.ecu_serial):
print_banner(BANNER_NO_UPDATE, color=WHITE+BLACK_BG,
text='Primary reports that there is no update for this ECU.')
(image_fname, image) = pserver.get_image(secondary_ecu.ecu_serial)
generate_signed_ecu_manifest()
submit_ecu_manifest_to_primary()
return
(image_fname, image) = pserver.get_image(secondary_ecu.ecu_serial)
if image is None:
print(YELLOW + 'Requested image from Primary but received none. Update '
'terminated.' + ENDCOLORS)
attacks_detected += 'Requested image from Primary but received none.\n'
generate_signed_ecu_manifest()
submit_ecu_manifest_to_primary()
return
elif not secondary_ecu.validated_targets_for_this_ecu:
print(RED + 'Requested and received image from Primary, but metadata '
'indicates no valid targets from the Director intended for this ECU. '
'Update terminated.' + ENDCOLORS)
generate_signed_ecu_manifest()
submit_ecu_manifest_to_primary()
return
elif image_fname != expected_image_fname:
print(RED + 'Requested and received image from Primary, but this '
'Secondary has not validated any target info that matches the given ' +
'filename. Expected: ' + repr(expected_image_fname) + '; received: ' +
repr(image_fname) + '; aborting "install".' + ENDCOLORS)
attacks_detected += 'Received unexpected image from Primary with ' + 'unexpected filename.\n'
generate_signed_ecu_manifest()
submit_ecu_manifest_to_primary()
return
unverified_targets_dir = os.path.join(CLIENT_DIRECTORY, 'unverified_targets')
if not os.path.exists(unverified_targets_dir):
os.mkdir(unverified_targets_dir)
with open(os.path.join(unverified_targets_dir, image_fname), 'wb') as fobj:
fobj.write(image.data)
try:
secondary_ecu.validate_image(image_fname)
except tuf.DownloadLengthMismatchError:
print_banner(
BANNER_DEFENDED, color=WHITE+DARK_BLUE_BG,
text='Image from Primary failed to validate: length mismatch. Image: ' +
repr(image_fname), sound=TADA)
attacks_detected += 'Image from Primary failed to validate: length ' + 'mismatch.\n'
generate_signed_ecu_manifest()
submit_ecu_manifest_to_primary()
return
except tuf.BadHashError:
print_banner(
BANNER_DEFENDED, color=WHITE+DARK_BLUE_BG,
text='Image from Primary failed to validate: hash mismatch. Image: ' +
repr(image_fname), sound=TADA)
attacks_detected += 'Image from Primary failed to validate: hash ' + 'mismatch.\n'
generate_signed_ecu_manifest()
submit_ecu_manifest_to_primary()
return
if secondary_ecu.firmware_fileinfo == expected_target_info:
print_banner(
BANNER_NO_UPDATE_NEEDED, color=WHITE+BLACK_BG,
text='We already have installed the firmware that the Director wants us '
'to install. Image: ' + repr(image_fname))
generate_signed_ecu_manifest()
submit_ecu_manifest_to_primary()
return
image_filepath = os.path.join(CLIENT_DIRECTORY, 'unverified_targets', image_fname)
current_firmware_filepath = os.path.join(CLIENT_DIRECTORY, image_fname)
if os.path.exists(current_firmware_filepath):
os.remove(current_firmware_filepath)
os.rename(image_filepath, current_firmware_filepath)
secondary_ecu.firmware_fileinfo = expected_target_info
with open(current_firmware_filepath, 'rb') as file_object:
if file_object.read() == b'evil content':
print_banner(BANNER_COMPROMISED, color=WHITE+RED_BG,
text='A malicious update has been installed! Arbitrary package attack '
'successful: this Secondary has been compromised! Image: ' +
repr(expected_image_fname), sound=WITCH)
else:
print_banner(
BANNER_UPDATED, color=WHITE+GREEN_BG,
text='Installed firmware received from Primary that was fully '
'validated by the Director and Image Repo. Image: ' +
repr(image_fname), sound=WON)
if expected_target_info['filepath'].endswith('.txt'):
print('The contents of the newly-installed firmware with filename ' +
repr(expected_target_info['filepath']) + ' are:')
print('---------------------------------------------------------')
print(open(os.path.join(CLIENT_DIRECTORY, image_fname)).read())
print('---------------------------------------------------------')
generate_signed_ecu_manifest()
submit_ecu_manifest_to_primary()
def generate_signed_ecu_manifest():
global secondary_ecu
global most_recent_signed_ecu_manifest
global attacks_detected
most_recent_signed_ecu_manifest = secondary_ecu.generate_signed_ecu_manifest(
attacks_detected)
attacks_detected = ''
def ATTACK_send_corrupt_manifest_to_primary():
import copy
corrupt_signed_manifest = copy.copy(most_recent_signed_ecu_manifest)
corrupt_signed_manifest['signed']['attacks_detected'] += 'Everything is great, I PROMISE!'
print(YELLOW + 'ATTACK: Corrupted Manifest (bad signature):' + ENDCOLORS)
print(' Modified the signed manifest as a MITM, simply changing a value:')
print(' The attacks_detected field now reads "' + RED +
repr(corrupt_signed_manifest['signed']['attacks_detected']) + ENDCOLORS)
try:
submit_ecu_manifest_to_primary(corrupt_signed_manifest)
except xmlrpc_client.Fault:
print(GREEN + 'Primary REJECTED the fraudulent ECU manifest.' + ENDCOLORS)
else:
print(RED + 'Primary ACCEPTED the fraudulent ECU manifest!' + ENDCOLORS)
def register_self_with_director():
server = xmlrpc_client.ServerProxy(
'http://' + str(demo.DIRECTOR_SERVER_HOST) + ':' +
str(demo.DIRECTOR_SERVER_PORT))
print('Registering Secondary ECU Serial and Key with Director.')
server.register_ecu_serial(
secondary_ecu.ecu_serial,
uptane.common.public_key_from_canonical(secondary_ecu.ecu_key), _vin,
False)
print(GREEN + 'Secondary has been registered with the Director.' + ENDCOLORS)
def register_self_with_primary():
server = xmlrpc_client.ServerProxy(
'http://' + str(_primary_host) + ':' + str(_primary_port))
print('Registering Secondary ECU Serial and Key with Primary.')
server.register_new_secondary(secondary_ecu.ecu_serial)
print(GREEN + 'Secondary has been registered with the Primary.' + ENDCOLORS)
def enforce_jail(fname, expected_containing_dir):
abs_fname = os.path.abspath(os.path.join(expected_containing_dir, fname))
if not abs_fname.startswith(os.path.abspath(expected_containing_dir)):
raise ValueError('Expected a filename in directory ' +
repr(expected_containing_dir) + '. When appending ' + repr(fname) +
' to the given directory, the result was not in the given directory.')
else:
return abs_fname
def clean_up_temp_file(filename):
if os.path.isfile(filename):
os.remove(filename)
|
MIT License
|
catalyst-team/catalyst
|
tests/catalyst/contrib/datasets/test_movielens.py
|
setup_module
|
python
|
def setup_module():
data_path = "./data"
try:
shutil.rmtree(data_path)
except Exception as e:
print("Error! Code: {c}, Message, {m}".format(c=type(e).__name__, m=str(e)))
|
Remove the temp folder if exists
|
https://github.com/catalyst-team/catalyst/blob/a6fc305eaddc499c17584824794fa8d006072842/tests/catalyst/contrib/datasets/test_movielens.py#L14-L22
|
import os
import shutil
import pytest
import torch
from catalyst.settings import SETTINGS
if SETTINGS.ml_required:
from catalyst.contrib.datasets import MovieLens
|
Apache License 2.0
|
cherrypy/cherrypy
|
cherrypy/test/test_wsgi_unix_socket.py
|
USocketHTTPConnection.__call__
|
python
|
def __call__(self, *args, **kwargs):
return self
|
Catch-all method just to present itself as a constructor for the
HTTPConnection.
|
https://github.com/cherrypy/cherrypy/blob/a7983fe61f7237f2354915437b04295694100372/cherrypy/test/test_wsgi_unix_socket.py#L32-L37
|
import os
import socket
import atexit
import tempfile
from http.client import HTTPConnection
import pytest
import cherrypy
from cherrypy.test import helper
def usocket_path():
fd, path = tempfile.mkstemp('cp_test.sock')
os.close(fd)
os.remove(path)
return path
USOCKET_PATH = usocket_path()
class USocketHTTPConnection(HTTPConnection):
def __init__(self, path):
HTTPConnection.__init__(self, 'localhost')
self.path = path
|
BSD 3-Clause New or Revised License
|
naiyt/trellorss-gae
|
lib/trellorss/trello/__init__.py
|
TrelloClient.__init__
|
python
|
def __init__(self, api_key, token = None, api_secret = None, token_secret = None):
if api_key and api_secret and token and token_secret:
self.oauth_consumer = oauth.Consumer(key = api_key, secret = api_secret)
self.oauth_token = oauth.Token(key = token, secret = token_secret)
self.client = oauth.Client(self.oauth_consumer, self.oauth_token)
elif api_key:
self.client = Http()
if token is None:
self.public_only = True
else:
self.public_only = False
self.api_key = api_key
self.auth_token = token
|
Constructor
:api_key: API key generated at https://trello.com/1/appKey/generate
:oauth_token: OAuth token generated by the user
|
https://github.com/naiyt/trellorss-gae/blob/fc9ac51e793f69b641183b0aa6411e29b6fa5b99/lib/trellorss/trello/__init__.py#L28-L52
|
from httplib2 import Http
from urllib import urlencode
from datetime import datetime
import exceptions
import json
import os
import random
import time
import urlparse
class ResourceUnavailable(Exception):
def __init__(self, msg, http_response):
Exception.__init__(self)
self._msg = msg
self._status = http_response.status
def __str__(self):
return "Resource unavailable: %s (HTTP status: %s)" % (self._msg, self._status)
class Unauthorized(ResourceUnavailable):
pass
class TrelloClient(object):
|
MIT License
|
crypt3lx2k/tripcode-dictionary-tools
|
iwi/threading/Pool.py
|
Pool.__init__
|
python
|
def __init__ (self, num_threads=32, use_daemons=True):
self.num_threads = num_threads
self.job_queue = Queue.Queue()
self.res_queue = Queue.Queue()
self.threads = []
self.closed = False
for _ in xrange(self.num_threads):
self.threads.append(Pool.Worker(self))
for thread in self.threads:
thread.daemon = use_daemons
thread.start()
|
Initializes an instance with a certain number of threads.
|
https://github.com/crypt3lx2k/tripcode-dictionary-tools/blob/927d5d7a896d5e19bb76aa64175a92bb7308b051/iwi/threading/Pool.py#L57-L72
|
import Queue
import sys
import threading
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
__all__ = ['Pool']
class Pool (object):
class WorkerExit (object):
def __call__ (self):
return None
class Worker (threading.Thread):
def __init__ (self, pool):
super(Pool.Worker, self).__init__()
self.pool = pool
def run (self):
obj = None
while obj is not self.pool.sentinel:
obj, args, kwargs = self.pool.job_queue.get()
try:
res = obj(*args, **kwargs)
if res is not None:
self.pool.res_queue.put(res)
except Exception as e:
logger.error('%s', e)
finally:
self.pool.job_queue.task_done()
sentinel = WorkerExit()
|
MIT License
|
nedbat/coveragepy
|
coverage/misc.py
|
human_sorted
|
python
|
def human_sorted(strings):
return sorted(strings, key=human_key)
|
Sort the given iterable of strings the way that humans expect.
Numeric components in the strings are sorted as numbers.
Returns the sorted list.
|
https://github.com/nedbat/coveragepy/blob/1677bbb3d8ca809d5c5e25248f08e065af85cde7/coverage/misc.py#L407-L415
|
import contextlib
import errno
import hashlib
import importlib
import importlib.util
import inspect
import locale
import os
import os.path
import random
import re
import socket
import sys
import types
from coverage import env
from coverage.exceptions import CoverageException
from coverage.exceptions import *
ISOLATED_MODULES = {}
def isolate_module(mod):
if mod not in ISOLATED_MODULES:
new_mod = types.ModuleType(mod.__name__)
ISOLATED_MODULES[mod] = new_mod
for name in dir(mod):
value = getattr(mod, name)
if isinstance(value, types.ModuleType):
value = isolate_module(value)
setattr(new_mod, name, value)
return ISOLATED_MODULES[mod]
os = isolate_module(os)
class SysModuleSaver:
def __init__(self):
self.old_modules = set(sys.modules)
def restore(self):
new_modules = set(sys.modules) - self.old_modules
for m in new_modules:
del sys.modules[m]
@contextlib.contextmanager
def sys_modules_saved():
saver = SysModuleSaver()
try:
yield
finally:
saver.restore()
def import_third_party(modname):
with sys_modules_saved():
try:
return importlib.import_module(modname)
except ImportError:
return None
def dummy_decorator_with_args(*args_unused, **kwargs_unused):
def _decorator(func):
return func
return _decorator
USE_CONTRACTS = env.TESTING and not bool(int(os.environ.get("COVERAGE_NO_CONTRACTS", 0)))
if USE_CONTRACTS:
from contracts import contract
from contracts import new_contract as raw_new_contract
def new_contract(*args, **kwargs):
try:
raw_new_contract(*args, **kwargs)
except ValueError:
pass
new_contract('bytes', lambda v: isinstance(v, bytes))
new_contract('unicode', lambda v: isinstance(v, str))
def one_of(argnames):
def _decorator(func):
argnameset = {name.strip() for name in argnames.split(",")}
def _wrapper(*args, **kwargs):
vals = [kwargs.get(name) for name in argnameset]
assert sum(val is not None for val in vals) == 1
return func(*args, **kwargs)
return _wrapper
return _decorator
else:
contract = dummy_decorator_with_args
one_of = dummy_decorator_with_args
def new_contract(*args_unused, **kwargs_unused):
pass
def nice_pair(pair):
start, end = pair
if start == end:
return "%d" % start
else:
return "%d-%d" % (start, end)
def expensive(fn):
if env.TESTING:
attr = "_once_" + fn.__name__
def _wrapper(self):
if hasattr(self, attr):
raise AssertionError(f"Shouldn't have called {fn.__name__} more than once")
setattr(self, attr, True)
return fn(self)
return _wrapper
else:
return fn
def bool_or_none(b):
if b is None:
return None
else:
return bool(b)
def join_regex(regexes):
return "|".join("(?:%s)" % r for r in regexes)
def file_be_gone(path):
try:
os.remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def ensure_dir(directory):
if directory:
os.makedirs(directory, exist_ok=True)
def ensure_dir_for_file(path):
ensure_dir(os.path.dirname(path))
def output_encoding(outfile=None):
if outfile is None:
outfile = sys.stdout
encoding = (
getattr(outfile, "encoding", None) or
getattr(sys.__stdout__, "encoding", None) or
locale.getpreferredencoding()
)
return encoding
def filename_suffix(suffix):
if suffix is True:
dice = random.Random(os.urandom(8)).randint(0, 999999)
suffix = "%s.%s.%06d" % (socket.gethostname(), os.getpid(), dice)
return suffix
class Hasher:
def __init__(self):
self.hash = hashlib.new("sha3_256")
def update(self, v):
self.hash.update(str(type(v)).encode("utf-8"))
if isinstance(v, str):
self.hash.update(v.encode("utf-8"))
elif isinstance(v, bytes):
self.hash.update(v)
elif v is None:
pass
elif isinstance(v, (int, float)):
self.hash.update(str(v).encode("utf-8"))
elif isinstance(v, (tuple, list)):
for e in v:
self.update(e)
elif isinstance(v, dict):
keys = v.keys()
for k in sorted(keys):
self.update(k)
self.update(v[k])
else:
for k in dir(v):
if k.startswith('__'):
continue
a = getattr(v, k)
if inspect.isroutine(a):
continue
self.update(k)
self.update(a)
self.hash.update(b'.')
def hexdigest(self):
return self.hash.hexdigest()[:32]
def _needs_to_implement(that, func_name):
if hasattr(that, "_coverage_plugin_name"):
thing = "Plugin"
name = that._coverage_plugin_name
else:
thing = "Class"
klass = that.__class__
name = f"{klass.__module__}.{klass.__name__}"
raise NotImplementedError(
f"{thing} {name!r} needs to implement {func_name}()"
)
class DefaultValue:
def __init__(self, display_as):
self.display_as = display_as
def __repr__(self):
return self.display_as
def substitute_variables(text, variables):
dollar_pattern = r"""(?x) # Use extended regex syntax
\$ # A dollar sign,
(?: # then
(?P<dollar>\$) | # a dollar sign, or
(?P<word1>\w+) | # a plain word, or
{ # a {-wrapped
(?P<word2>\w+) # word,
(?:
(?P<strict>\?) | # with a strict marker
-(?P<defval>[^}]*) # or a default value
)? # maybe.
}
)
"""
dollar_groups = ('dollar', 'word1', 'word2')
def dollar_replace(match):
word = next(g for g in match.group(*dollar_groups) if g)
if word == "$":
return "$"
elif word in variables:
return variables[word]
elif match.group('strict'):
msg = f"Variable {word} is undefined: {text!r}"
raise CoverageException(msg)
else:
return match.group('defval')
text = re.sub(dollar_pattern, dollar_replace, text)
return text
def format_local_datetime(dt):
return dt.astimezone().strftime('%Y-%m-%d %H:%M %z')
def import_local_file(modname, modfile=None):
if modfile is None:
modfile = modname + '.py'
spec = importlib.util.spec_from_file_location(modname, modfile)
mod = importlib.util.module_from_spec(spec)
sys.modules[modname] = mod
spec.loader.exec_module(mod)
return mod
def human_key(s):
def tryint(s):
try:
return int(s)
except ValueError:
return s
return [tryint(c) for c in re.split(r"(\d+)", s)]
|
Apache License 2.0
|
alliefitter/boto3_type_annotations
|
boto3_type_annotations_with_docs/boto3_type_annotations/eks/client.py
|
Client.create_cluster
|
python
|
def create_cluster(self, name: str, roleArn: str, resourcesVpcConfig: Dict, version: str = None, logging: Dict = None, clientRequestToken: str = None) -> Dict:
pass
|
Creates an Amazon EKS control plane.
The Amazon EKS control plane consists of control plane instances that run the Kubernetes software, like ``etcd`` and the API server. The control plane runs in an account managed by AWS, and the Kubernetes API is exposed via the Amazon EKS API server endpoint. Each Amazon EKS cluster control plane is single-tenant and unique, and runs on its own set of Amazon EC2 instances.
The cluster control plane is provisioned across multiple Availability Zones and fronted by an Elastic Load Balancing Network Load Balancer. Amazon EKS also provisions elastic network interfaces in your VPC subnets to provide connectivity from the control plane instances to the worker nodes (for example, to support ``kubectl exec`` , ``logs`` , and ``proxy`` data flows).
Amazon EKS worker nodes run in your AWS account and connect to your cluster's control plane via the Kubernetes API server endpoint and a certificate file that is created for your cluster.
You can use the ``endpointPublicAccess`` and ``endpointPrivateAccess`` parameters to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled and private access is disabled. For more information, see `Amazon EKS Cluster Endpoint Access Control <https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html>`__ in the * *Amazon EKS User Guide* * .
You can use the ``logging`` parameter to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs are not exported to CloudWatch Logs. For more information, see `Amazon EKS Cluster Control Plane Logs <https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html>`__ in the * *Amazon EKS User Guide* * .
.. note::
CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see `Amazon CloudWatch Pricing <http://aws.amazon.com/cloudwatch/pricing/>`__ .
Cluster creation typically takes between 10 and 15 minutes. After you create an Amazon EKS cluster, you must configure your Kubernetes tooling to communicate with the API server and launch worker nodes into your cluster. For more information, see `Managing Cluster Authentication <https://docs.aws.amazon.com/eks/latest/userguide/managing-auth.html>`__ and `Launching Amazon EKS Worker Nodes <https://docs.aws.amazon.com/eks/latest/userguide/launch-workers.html>`__ in the *Amazon EKS User Guide* .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/CreateCluster>`_
**Request Syntax**
::
response = client.create_cluster(
name='string',
version='string',
roleArn='string',
resourcesVpcConfig={
'subnetIds': [
'string',
],
'securityGroupIds': [
'string',
],
'endpointPublicAccess': True|False,
'endpointPrivateAccess': True|False
},
logging={
'clusterLogging': [
{
'types': [
'api'|'audit'|'authenticator'|'controllerManager'|'scheduler',
],
'enabled': True|False
},
]
},
clientRequestToken='string'
)
**Response Syntax**
::
{
'cluster': {
'name': 'string',
'arn': 'string',
'createdAt': datetime(2015, 1, 1),
'version': 'string',
'endpoint': 'string',
'roleArn': 'string',
'resourcesVpcConfig': {
'subnetIds': [
'string',
],
'securityGroupIds': [
'string',
],
'vpcId': 'string',
'endpointPublicAccess': True|False,
'endpointPrivateAccess': True|False
},
'logging': {
'clusterLogging': [
{
'types': [
'api'|'audit'|'authenticator'|'controllerManager'|'scheduler',
],
'enabled': True|False
},
]
},
'status': 'CREATING'|'ACTIVE'|'DELETING'|'FAILED',
'certificateAuthority': {
'data': 'string'
},
'clientRequestToken': 'string',
'platformVersion': 'string'
}
}
**Response Structure**
- *(dict) --*
- **cluster** *(dict) --*
The full description of your new cluster.
- **name** *(string) --*
The name of the cluster.
- **arn** *(string) --*
The Amazon Resource Name (ARN) of the cluster.
- **createdAt** *(datetime) --*
The Unix epoch timestamp in seconds for when the cluster was created.
- **version** *(string) --*
The Kubernetes server version for the cluster.
- **endpoint** *(string) --*
The endpoint for your Kubernetes API server.
- **roleArn** *(string) --*
The Amazon Resource Name (ARN) of the IAM role that provides permissions for the Kubernetes control plane to make calls to AWS API operations on your behalf.
- **resourcesVpcConfig** *(dict) --*
The VPC configuration used by the cluster control plane. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see `Cluster VPC Considerations <https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html>`__ and `Cluster Security Group Considerations <https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html>`__ in the *Amazon EKS User Guide* .
- **subnetIds** *(list) --*
The subnets associated with your cluster.
- *(string) --*
- **securityGroupIds** *(list) --*
The security groups associated with the cross-account elastic network interfaces that are used to allow communication between your worker nodes and the Kubernetes control plane.
- *(string) --*
- **vpcId** *(string) --*
The VPC associated with your cluster.
- **endpointPublicAccess** *(boolean) --*
This parameter indicates whether the Amazon EKS public API server endpoint is enabled. If the Amazon EKS public API server endpoint is disabled, your cluster's Kubernetes API server can only receive requests that originate from within the cluster VPC.
- **endpointPrivateAccess** *(boolean) --*
This parameter indicates whether the Amazon EKS private API server endpoint is enabled. If the Amazon EKS private API server endpoint is enabled, Kubernetes API requests that originate from within your cluster's VPC will use the private VPC endpoint instead of traversing the internet.
- **logging** *(dict) --*
The logging configuration for your cluster.
- **clusterLogging** *(list) --*
The cluster control plane logging configuration for your cluster.
- *(dict) --*
An object representing the enabled or disabled Kubernetes control plane logs for your cluster.
- **types** *(list) --*
The available cluster control plane log types.
- *(string) --*
- **enabled** *(boolean) --*
If a log type is enabled, then that log type exports its control plane logs to CloudWatch Logs. If a log type is not enabled, then that log type does not export its control plane logs. Each individual log type can be enabled or disabled independently.
- **status** *(string) --*
The current status of the cluster.
- **certificateAuthority** *(dict) --*
The ``certificate-authority-data`` for your cluster.
- **data** *(string) --*
The base64 encoded certificate data required to communicate with your cluster. Add this to the ``certificate-authority-data`` section of the ``kubeconfig`` file for your cluster.
- **clientRequestToken** *(string) --*
Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.
- **platformVersion** *(string) --*
The platform version of your Amazon EKS cluster. For more information, see `Platform Versions <https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html>`__ in the * *Amazon EKS User Guide* * .
:type name: string
:param name: **[REQUIRED]**
The unique name to give to your cluster.
:type version: string
:param version:
The desired Kubernetes version for your cluster. If you do not specify a value here, the latest version available in Amazon EKS is used.
:type roleArn: string
:param roleArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the IAM role that provides permissions for Amazon EKS to make calls to other AWS API operations on your behalf. For more information, see `Amazon EKS Service IAM Role <https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html>`__ in the * *Amazon EKS User Guide* * .
:type resourcesVpcConfig: dict
:param resourcesVpcConfig: **[REQUIRED]**
The VPC configuration used by the cluster control plane. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see `Cluster VPC Considerations <https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html>`__ and `Cluster Security Group Considerations <https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html>`__ in the *Amazon EKS User Guide* . You must specify at least two subnets. You may specify up to five security groups, but we recommend that you use a dedicated security group for your cluster control plane.
- **subnetIds** *(list) --*
Specify subnets for your Amazon EKS worker nodes. Amazon EKS creates cross-account elastic network interfaces in these subnets to allow communication between your worker nodes and the Kubernetes control plane.
- *(string) --*
- **securityGroupIds** *(list) --*
Specify one or more security groups for the cross-account elastic network interfaces that Amazon EKS creates to use to allow communication between your worker nodes and the Kubernetes control plane. If you do not specify a security group, the default security group for your VPC is used.
- *(string) --*
- **endpointPublicAccess** *(boolean) --*
Set this value to ``false`` to disable public access for your cluster\'s Kubernetes API server endpoint. If you disable public access, your cluster\'s Kubernetes API server can only receive requests from within the cluster VPC. The default value for this parameter is ``true`` , which enables public access for your Kubernetes API server. For more information, see `Amazon EKS Cluster Endpoint Access Control <https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html>`__ in the * *Amazon EKS User Guide* * .
- **endpointPrivateAccess** *(boolean) --*
Set this value to ``true`` to enable private access for your cluster\'s Kubernetes API server endpoint. If you enable private access, Kubernetes API requests from within your cluster\'s VPC will use the private VPC endpoint. The default value for this parameter is ``false`` , which disables private access for your Kubernetes API server. For more information, see `Amazon EKS Cluster Endpoint Access Control <https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html>`__ in the * *Amazon EKS User Guide* * .
:type logging: dict
:param logging:
Enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs are not exported to CloudWatch Logs. For more information, see `Amazon EKS Cluster Control Plane Logs <https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html>`__ in the * *Amazon EKS User Guide* * .
.. note::
CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see `Amazon CloudWatch Pricing <http://aws.amazon.com/cloudwatch/pricing/>`__ .
- **clusterLogging** *(list) --*
The cluster control plane logging configuration for your cluster.
- *(dict) --*
An object representing the enabled or disabled Kubernetes control plane logs for your cluster.
- **types** *(list) --*
The available cluster control plane log types.
- *(string) --*
- **enabled** *(boolean) --*
If a log type is enabled, then that log type exports its control plane logs to CloudWatch Logs. If a log type is not enabled, then that log type does not export its control plane logs. Each individual log type can be enabled or disabled independently.
:type clientRequestToken: string
:param clientRequestToken:
Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.
This field is autopopulated if not provided.
:rtype: dict
:returns:
|
https://github.com/alliefitter/boto3_type_annotations/blob/2a88aa562b1aee6e8a6cc30402980884b3707fbb/boto3_type_annotations_with_docs/boto3_type_annotations/eks/client.py#L25-L201
|
from typing import Optional
from botocore.client import BaseClient
from botocore.waiter import Waiter
from typing import Union
from typing import Dict
from botocore.paginate import Paginator
class Client(BaseClient):
def can_paginate(self, operation_name: str = None):
pass
|
MIT License
|
yunify/qingcloud-sdk-python
|
qingcloud/qingstor/acl.py
|
ACL.__init__
|
python
|
def __init__(self, bucket=None, acl=None):
self.bucket = bucket
self.acl = acl or []
self.grants = []
for item in self.acl:
grantee = item["grantee"]
if grantee["type"] == "user":
grant = Grant(
permission=item["permission"],
type=grantee["type"],
id=grantee["id"],
name=grantee["name"]
)
else:
grant = Grant(
permission=item["permission"],
type=grantee["type"],
name=grantee["name"]
)
self.add_grant(grant)
|
@param bucket - The bucket
@param acl - The access control list of the bucket
|
https://github.com/yunify/qingcloud-sdk-python/blob/10faf28de8651f3fbfea326552ee19f7519935b1/qingcloud/qingstor/acl.py#L19-L42
|
class ACL(object):
|
Apache License 2.0
|
holoviz/spatialpandas
|
spatialpandas/geometry/_algorithms/intersection.py
|
lines_intersect_bounds
|
python
|
def lines_intersect_bounds(
x0, y0, x1, y1, flat_values, start_offsets, stop_offsets, result
):
n = len(start_offsets)
result.fill(False)
if x1 < x0:
x0, x1 = x1, x0
if y1 < y0:
y0, y1, = y1, y0
if x0 == x1 or y0 == y1:
return
for i in range(n):
_perform_line_intersect_bounds(
i, x0, y0, x1, y1, flat_values, start_offsets, stop_offsets, result
)
return result
|
Test whether each line in a collection of lines intersects with the supplied bounds
Args:
x0, y0, x1, y1: Bounds coordinates
flat_values: Interleaved line coordinates
start_offsets, stop_offsets: start and stop offsets into flat_values
separating individual lines
result: boolean array to be provided by the caller into which intersection
results will be written; must be at least as long as start_offsets
Returns:
None
|
https://github.com/holoviz/spatialpandas/blob/b6809e79f615e0be6fda6845b9725b5f87529c56/spatialpandas/geometry/_algorithms/intersection.py#L305-L341
|
import numpy as np
from numba import prange
from ...geometry._algorithms.bounds import total_bounds_interleaved
from ...geometry._algorithms.orientation import triangle_orientation
from ...utils import ngjit, ngpjit
@ngjit
def segment_intersects_point(ax0, ay0, ax1, ay1, bx, by):
if bx < min(ax0, ax1) or bx > max(ax0, ax1):
return False
if by < min(ay0, ay1) or by > max(ay0, ay1):
return False
sx = ax1 - ax0
sy = ay1 - ay0
px = bx - ax0
py = by - ay0
sxp = sx * py - sy * px
return sxp == 0
@ngjit
def segments_intersect_1d(ax0, ax1, bx0, bx1):
if ax1 < ax0:
ax0, ax1 = ax1, ax0
if bx1 < bx0:
bx0, bx1 = bx1, bx0
return max(ax0, bx0) <= min(ax1, bx1)
@ngjit
def segments_intersect(ax0, ay0, ax1, ay1, bx0, by0, bx1, by1):
if not segments_intersect_1d(ax0, ax1, bx0, bx1):
return False
if not segments_intersect_1d(ay0, ay1, by0, by1):
return False
a_zero = ax0 == ax1 and ay0 == ay1
b_zero = bx0 == bx1 and by0 == by1
if a_zero and not b_zero and (
ax0 == bx0 and ay0 == by0 or ax0 == bx1 and ay0 == by1
):
return True
elif b_zero and not a_zero and (
bx0 == ax0 and by0 == ay0 or bx0 == ax1 and by0 == ay1
):
return True
elif a_zero or b_zero:
return False
b0_orientation = triangle_orientation(ax0, ay0, ax1, ay1, bx0, by0)
b1_orientation = triangle_orientation(ax0, ay0, ax1, ay1, bx1, by1)
if b0_orientation == 0 and b1_orientation == 0:
return True
elif b0_orientation == b1_orientation:
return False
a0_orientation = triangle_orientation(bx0, by0, bx1, by1, ax0, ay0)
a1_orientation = triangle_orientation(bx0, by0, bx1, by1, ax1, ay1)
if a0_orientation == 0 and a1_orientation == 0:
return True
elif a0_orientation == a1_orientation:
return False
return True
@ngjit
def point_intersects_polygon(x, y, values, value_offsets):
winding_number = 0
for i in range(len(value_offsets) - 1):
start = value_offsets[i]
stop = value_offsets[i + 1]
for k in range(start, stop - 2, 2):
x0 = values[k]
y0 = values[k + 1]
x1 = values[k + 2]
y1 = values[k + 3]
if y1 == y0:
continue
if y1 < y0:
ascending = -1
y0, y1 = y1, y0
x0, x1 = x1, x0
else:
ascending = 1
if y0 >= y or y1 < y or (x0 < x and x1 < x):
continue
if x0 >= x and x1 >= x:
winding_number += ascending
else:
ax = x0 - x
ay = y0 - y
bx = x1 - x
by = y1 - y
axb = ax * by - ay * bx
if axb > 0 or (axb == 0 and ascending):
winding_number += ascending
return winding_number != 0
@ngpjit
def multipoints_intersect_bounds(
x0, y0, x1, y1, flat_values, start_offsets, stop_offsets, result
):
n = len(start_offsets)
result.fill(False)
if x1 < x0:
x0, x1 = x1, x0
if y1 < y0:
y0, y1, = y1, y0
for i in prange(n):
start = start_offsets[i]
stop = stop_offsets[i]
point_in_rect = False
for j in range(start, stop, 2):
x = flat_values[j]
y = flat_values[j + 1]
if x0 <= x and x <= x1 and y0 <= y and y <= y1:
point_in_rect = True
break
if point_in_rect:
result[i] = True
continue
return result
@ngjit
def _perform_line_intersect_bounds(
i, x0, y0, x1, y1, flat_values, start_offsets, stop_offsets, result
):
start = start_offsets[i]
stop = stop_offsets[i]
bounds = total_bounds_interleaved(flat_values[start:stop])
if bounds[0] > x1 or bounds[1] > y1 or bounds[2] < x0 or bounds[3] < y0:
return
if (bounds[0] >= x0 and bounds[2] <= x1 or
bounds[1] >= y0 and bounds[3] <= y1):
result[i] = True
return
vert_in_rect = False
for j in range(start, stop, 2):
x = flat_values[j]
y = flat_values[j + 1]
if x0 <= x and x <= x1 and y0 <= y and y <= y1:
vert_in_rect = True
break
if vert_in_rect:
result[i] = True
return
segment_intersects = False
for j in range(start, stop - 2, 2):
ex0 = flat_values[j]
ey0 = flat_values[j + 1]
ex1 = flat_values[j + 2]
ey1 = flat_values[j + 3]
if segments_intersect(ex0, ey0, ex1, ey1, x0, y1, x1, y1):
segment_intersects = True
break
if segments_intersect(ex0, ey0, ex1, ey1, x0, y0, x1, y0):
segment_intersects = True
break
if segments_intersect(ex0, ey0, ex1, ey1, x0, y0, x0, y1):
segment_intersects = True
break
if segments_intersect(ex0, ey0, ex1, ey1, x1, y0, x1, y1):
segment_intersects = True
break
if segment_intersects:
result[i] = True
@ngjit
|
BSD 2-Clause Simplified License
|
robmarkcole/bme680-mqtt-micropython
|
lib/bme680.py
|
BME680.get_pressure_oversample
|
python
|
def get_pressure_oversample(self):
return (self._get_regs(CONF_T_P_MODE_ADDR, 1) & OSP_MSK) >> OSP_POS
|
Get pressure oversampling
|
https://github.com/robmarkcole/bme680-mqtt-micropython/blob/988be02c5e9c76967257e1e9e14dafff700d42f5/lib/bme680.py#L93-L95
|
from constants import *
import math
import time
__version__ = '1.0.2'
class BME680(BME680Data):
def __init__(self, i2c_addr=I2C_ADDR_PRIMARY, i2c_device=None):
BME680Data.__init__(self)
self.i2c_addr = i2c_addr
self._i2c = i2c_device
if self._i2c is None:
import smbus
self._i2c = smbus.SMBus(1)
self.chip_id = self._get_regs(CHIP_ID_ADDR, 1)
if self.chip_id != CHIP_ID:
raise RuntimeError("BME680 Not Found. Invalid CHIP ID: 0x{0:02x}".format(self.chip_id))
self.soft_reset()
self.set_power_mode(SLEEP_MODE)
self._get_calibration_data()
self.set_humidity_oversample(OS_2X)
self.set_pressure_oversample(OS_4X)
self.set_temperature_oversample(OS_8X)
self.set_filter(FILTER_SIZE_3)
self.set_gas_status(ENABLE_GAS_MEAS)
self.get_sensor_data()
def _get_calibration_data(self):
calibration = self._get_regs(COEFF_ADDR1, COEFF_ADDR1_LEN)
calibration += self._get_regs(COEFF_ADDR2, COEFF_ADDR2_LEN)
heat_range = self._get_regs(ADDR_RES_HEAT_RANGE_ADDR, 1)
heat_value = twos_comp(self._get_regs(ADDR_RES_HEAT_VAL_ADDR, 1), bits=8)
sw_error = twos_comp(self._get_regs(ADDR_RANGE_SW_ERR_ADDR, 1), bits=8)
self.calibration_data.set_from_array(calibration)
self.calibration_data.set_other(heat_range, heat_value, sw_error)
def soft_reset(self):
self._set_regs(SOFT_RESET_ADDR, SOFT_RESET_CMD)
time.sleep(RESET_PERIOD / 1000.0)
def set_humidity_oversample(self, value):
self.tph_settings.os_hum = value
self._set_bits(CONF_OS_H_ADDR, OSH_MSK, OSH_POS, value)
def get_humidity_oversample(self):
return (self._get_regs(CONF_OS_H_ADDR, 1) & OSH_MSK) >> OSH_POS
def set_pressure_oversample(self, value):
self.tph_settings.os_pres = value
self._set_bits(CONF_T_P_MODE_ADDR, OSP_MSK, OSP_POS, value)
|
MIT License
|
opencord/xos
|
lib/xos-api/xosapi/chameleon_client/grpc_client.py
|
GrpcClient._get_endpoint_from_consul
|
python
|
def _get_endpoint_from_consul(self, service_name):
host = self.consul_endpoint.split(':')[0].strip()
port = int(self.consul_endpoint.split(':')[1].strip())
while True:
log.debug('consul-lookup', host=host, port=port)
consul = Consul(host=host, port=port)
_, services = consul.catalog.service(service_name)
log.debug('consul-response', services=services)
if services:
break
log.warning('no-service', consul_host=host, consul_port=port,
service_name=service_name)
yield asleep(1.0)
service = services[randint(0, len(services) - 1)]
endpoint = '{}:{}'.format(service['ServiceAddress'],
service['ServicePort'])
returnValue(endpoint)
|
Look up an appropriate grpc endpoint (host, port) from
consul, under the service name specified by service-name
|
https://github.com/opencord/xos/blob/e52d3ea83d3a26b8d0a72cccce7898258926f5eb/lib/xos-api/xosapi/chameleon_client/grpc_client.py#L175-L200
|
from __future__ import absolute_import
import os
import sys
import time
from random import randint
from zlib import decompress
import functools
import grpc
from consul import Consul
from grpc._channel import _Rendezvous
from structlog import get_logger
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet.error import ConnectError
from .protos.schema_pb2_grpc import SchemaServiceStub
from google.protobuf.empty_pb2 import Empty
from .asleep import asleep
log = get_logger()
class GrpcClient(object):
RETRY_BACKOFF = [0.05, 0.1, 0.2, 0.5, 1, 2, 5]
def __init__(self, consul_endpoint, work_dir, endpoint='localhost:50055',
reconnect_callback=None, credentials=None, restart_on_disconnect=False):
self.consul_endpoint = consul_endpoint
self.endpoint = endpoint
self.work_dir = work_dir
self.reconnect_callback = reconnect_callback
self.credentials = credentials
self.restart_on_disconnect = restart_on_disconnect
self.google_api_dir = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'protos'))
self.plugin_dir = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'protoc_plugins'))
self.channel = None
self.schema = None
self.retries = 0
self.shutting_down = False
self.connected = False
self.was_connected = False
def start(self):
log.debug('starting')
if not self.connected:
reactor.callLater(0, self.connect)
log.info('started')
return self
def stop(self):
log.debug('stopping')
if self.shutting_down:
return
self.shutting_down = True
log.info('stopped')
def set_reconnect_callback(self, reconnect_callback):
self.reconnect_callback = reconnect_callback
return self
def connectivity_callback(self, client, connectivity):
if (self.was_connected) and (connectivity in [connectivity.TRANSIENT_FAILURE, connectivity.SHUTDOWN]):
log.info("connectivity lost -- restarting")
os.execv(sys.executable, ['python'] + sys.argv)
if (connectivity == connectivity.READY):
self.was_connected = True
if (connectivity == connectivity.IDLE) and (self.was_connected):
connectivity = client.channel._channel.check_connectivity_state(True)
@inlineCallbacks
def connect(self):
if self.shutting_down or self.connected:
return
try:
if self.endpoint.startswith('@'):
_endpoint = yield self._get_endpoint_from_consul(
self.endpoint[1:])
else:
_endpoint = self.endpoint
if self.credentials:
log.info('securely connecting', endpoint=_endpoint)
self.channel = grpc.secure_channel(_endpoint, self.credentials)
else:
log.info('insecurely connecting', endpoint=_endpoint)
self.channel = grpc.insecure_channel(_endpoint)
if self.restart_on_disconnect:
connectivity_callback = functools.partial(self.connectivity_callback, self)
self.channel.subscribe(connectivity_callback)
time.sleep(0.5)
swagger_from = self._retrieve_schema()
self._compile_proto_files(swagger_from)
self._clear_backoff()
self.connected = True
if self.reconnect_callback is not None:
reactor.callLater(0, self.reconnect_callback)
return
except _Rendezvous as e:
if e.code() == grpc.StatusCode.UNAVAILABLE:
log.info('grpc-endpoint-not-available')
else:
log.exception('rendezvous error', e=e)
yield self._backoff('not-available')
except Exception:
if not self.shutting_down:
log.exception('cannot-connect', endpoint=_endpoint)
yield self._backoff('unknown-error')
reactor.callLater(0, self.connect)
def _backoff(self, msg):
wait_time = self.RETRY_BACKOFF[min(self.retries,
len(self.RETRY_BACKOFF) - 1)]
self.retries += 1
log.error(msg, retry_in=wait_time)
return asleep(wait_time)
def _clear_backoff(self):
if self.retries:
log.info('reconnected', after_retries=self.retries)
self.retries = 0
@inlineCallbacks
|
Apache License 2.0
|
smartelect/smartelect
|
changesets/models.py
|
Changeset.revoke_approval
|
python
|
def revoke_approval(self, user):
if not self.in_approvable_status():
raise NotInApprovableStatus
if not self.is_approved_by(user):
raise NotApprovedBy
self.approvers.remove(user)
if self.status == Changeset.STATUS_APPROVED and self.number_of_approvals < settings.MINIMUM_CHANGESET_APPROVALS:
self.status = Changeset.STATUS_NEW
self.save()
|
If the changeset is still in a state where approvals may be changed, and
the user was an approver, remove their approval.
If that makes the changeset no longer have the minimum number of approvals,
update its state.
|
https://github.com/smartelect/smartelect/blob/d6d35f2fa8f60e756ad5247f8f0a5f05830e92f8/changesets/models.py#L333-L348
|
import logging
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models, transaction
from django.urls import reverse
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from civil_registry.models import Citizen
from libya_elections.abstract import AbstractTimestampModel
from register.models import RegistrationCenter, Registration
from .exceptions import NotPermittedToApprove, NotInApprovableStatus, NotApprovedBy, NotAnAllowedStatus, ChangesetException
logger = logging.getLogger(__name__)
APPROVE_CHANGESET_PERMISSION = 'changesets.approve_changeset'
QUEUE_CHANGESET_PERMISSION = 'changesets.queue_changeset'
EDIT_CHANGESET_PERMISSION = 'changesets.change_changeset'
ADD_CHANGESET_PERMISSION = 'changesets.add_changeset'
READ_CHANGESET_PERMISSION = 'changesets.read_changeset'
BROWSE_CHANGESETS_PERMISSION = 'changesets.browse_changeset'
DELETE_CHANGESET_PERMISSION = 'changesets.delete_changeset'
CHANGE_CHANGESETS_GROUP = "Change Changesets"
APPROVE_CHANGESETS_GROUP = "Approve Changesets"
QUEUE_CHANGESETS_GROUP = "Queue Changesets"
class Changeset(AbstractTimestampModel):
CHANGE_CENTER = 1
CHANGE_BLOCK = 2
CHANGE_UNBLOCK = 3
CHANGE_ROLLBACK = 4
CHANGE_CHOICES = [
(CHANGE_CENTER, _("Move voters to another polling center")),
(CHANGE_BLOCK, _("Block voters")),
(CHANGE_UNBLOCK, _("Unblock voters")),
(CHANGE_ROLLBACK, _("Roll back another changeset")),
]
CHANGE_VALID_VALUES = [value for value, label in CHANGE_CHOICES]
SELECT_CENTERS = 1
SELECT_UPLOADED_NIDS = 2
SELECT_OTHER_CHANGESET = 3
HOW_TO_SELECT_CHOICES = [
(SELECT_CENTERS, _("Select a list of polling centers")),
(SELECT_UPLOADED_NIDS, _("Upload a list of national IDs")),
(SELECT_OTHER_CHANGESET, _("Select another changeset")),
]
STATUS_NEW = 1
STATUS_APPROVED = 2
STATUS_QUEUED = 3
STATUS_EXECUTING = 4
STATUS_FAILED = 5
STATUS_SUCCESSFUL = 6
STATUS_PARTIALLY_SUCCESSFUL = 7
STATUS_ROLLED_BACK = 8
STATUS_CHOICES = [
(STATUS_NEW, _("New - not approved")),
(STATUS_APPROVED, _("Approved - not started")),
(STATUS_QUEUED,
_("Started - start button has been pressed but processing has not begun")),
(STATUS_EXECUTING, _("Executing - being processed")),
(STATUS_FAILED, _("Failed - had errors, changes were not made")),
(STATUS_SUCCESSFUL, _("Successful - completed without errors and not rolled back")),
(STATUS_PARTIALLY_SUCCESSFUL,
_("Partially successful - rollback was not able to rollback all changes")),
(STATUS_ROLLED_BACK, _("Rolled back - some or all changes have been rolled back")),
]
HAS_BEEN_EXECUTED_STATUSES = [STATUS_SUCCESSFUL, STATUS_PARTIALLY_SUCCESSFUL, STATUS_FAILED,
STATUS_ROLLED_BACK]
HAS_BEEN_QUEUED_STATUSES = [STATUS_QUEUED, STATUS_EXECUTING] + HAS_BEEN_EXECUTED_STATUSES
ROLLBACKABLE_STATUSES = [STATUS_SUCCESSFUL, STATUS_PARTIALLY_SUCCESSFUL]
EXECUTABLE_STATUSES = [STATUS_APPROVED, STATUS_QUEUED]
name = models.CharField(_('name'), max_length=256, unique=True, blank=False)
change = models.IntegerField(_('change'), choices=CHANGE_CHOICES, default=CHANGE_CENTER)
how_to_select = models.IntegerField(
_('how to select'),
choices=HOW_TO_SELECT_CHOICES,
default=SELECT_CENTERS,
help_text=_("How to select affected voters. If you select another changeset, "
"it will change the same voters who were changed in the other changeset, "
"which might NOT be the same as using the same rules for selecting "
"voters that the other changeset used.")
)
other_changeset = models.ForeignKey(
'self',
null=True,
blank=True,
verbose_name=_('other changeset'),
related_name='target_changesets',
help_text=_("Another changeset to select voters from or to roll back."),
limit_choices_to={
'status__in': HAS_BEEN_EXECUTED_STATUSES
},
on_delete=models.CASCADE
)
selected_centers = models.ManyToManyField(
RegistrationCenter,
blank=True,
verbose_name=_('selected centers'),
related_name='changesets_from',
limit_choices_to={
'deleted': False,
}
)
selected_citizens = models.ManyToManyField(
'civil_registry.Citizen',
blank=True,
verbose_name=_('selected citizens'),
related_name='changesets_selected',
)
target_center = models.ForeignKey(
RegistrationCenter,
null=True,
blank=True,
verbose_name=_('target center'),
related_name='changesets_to',
limit_choices_to={
'reg_open': True,
'deleted': False,
},
on_delete=models.PROTECT
)
message = models.CharField(
_('message'),
max_length=1024,
default='',
blank=True,
help_text=_("Optional text message to send to affected voters after applying change"),
)
justification = models.TextField(
_('justification'),
help_text=_("Reason for the changes. Include references to legal justification "
"for the changes if possible.")
)
approvers = models.ManyToManyField(
settings.AUTH_USER_MODEL,
verbose_name=_('approvers'),
related_name='changeset_approvals',
)
execution_start_time = models.DateTimeField(
_('start time'),
null=True, blank=True, editable=False,
help_text=_("When execution of the changeset started.")
)
finish_time = models.DateTimeField(_('finish time'), null=True, blank=True, editable=False)
queued_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
blank=True,
on_delete=models.PROTECT,
verbose_name=_('queued by'),
related_name='changesets_queued',
help_text=_("The user who queued the changeset for execution."),
)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.PROTECT,
verbose_name=_('created by'),
related_name='changesets_created',
)
status = models.IntegerField(
_('status'),
choices=STATUS_CHOICES,
default=STATUS_NEW,
)
rollback_changeset = models.ForeignKey(
'self',
null=True,
blank=True,
verbose_name=_('rollback changeset'),
help_text=_("If this changeset has been rolled back, this is the changeset that did it."),
on_delete=models.PROTECT
)
error_text = models.TextField(
_('error text'),
blank=True,
default='',
help_text=_("If the changes failed, this will contain the error message(s).")
)
class Meta:
verbose_name = _("changeset")
verbose_name_plural = _("changesets")
ordering = ['-creation_date']
permissions = [
('approve_changeset', _("Approve changeset")),
('queue_changeset', _("Start changeset")),
('browse_changeset', _("Browse changesets")),
('read_changeset', _("Read changeset")),
]
def clean(self):
if (self.change == Changeset.CHANGE_ROLLBACK
and self.how_to_select != Changeset.SELECT_OTHER_CHANGESET):
raise ValidationError("Rollbacks must have how_to_select=OTHER CHANGESET")
if (self.how_to_select == Changeset.SELECT_OTHER_CHANGESET
and not self.other_changeset):
raise ValidationError("how_to_select is SELECT_OTHER_CHANGESET but you have not "
"selected another changeset.")
@property
def number_of_approvals(self):
if self.pk:
return self.approvers.count()
return 0
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('read_changeset', args=[self.pk])
def user_chooses_select_method(self):
return self.change != Changeset.CHANGE_ROLLBACK
def has_been_queued(self):
return self.status in Changeset.HAS_BEEN_QUEUED_STATUSES
def has_been_executed(self):
return self.status in Changeset.HAS_BEEN_EXECUTED_STATUSES
def in_editable_status(self):
return not self.has_been_queued()
def in_approvable_status(self):
return self.in_editable_status()
def in_queueable_status(self):
return self.status == Changeset.STATUS_APPROVED
def in_executable_status(self):
return self.status in Changeset.EXECUTABLE_STATUSES
def in_rollbackable_status(self):
return self.status in Changeset.ROLLBACKABLE_STATUSES
def in_deletable_status(self):
return not self.has_been_queued()
def is_approved_by(self, user):
return self.approvers.filter(pk=user.pk).exists()
def number_affected(self):
if self.has_been_executed():
return self.change_records.filter(changed=True).count()
else:
return self.get_citizens_to_change().count()
def number_not_changed(self):
return self.change_records.filter(changed=False).count()
def may_be_edited_by(self, user):
return user.has_perm(EDIT_CHANGESET_PERMISSION)
def may_be_queued_by(self, user):
return user.has_perm(QUEUE_CHANGESET_PERMISSION)
def may_be_deleted_by(self, user):
return user.has_perm(DELETE_CHANGESET_PERMISSION)
def may_be_approved_by(self, user):
return user.has_perm(APPROVE_CHANGESET_PERMISSION)
def approve(self, user):
if not self.may_be_approved_by(user):
raise NotPermittedToApprove
if not self.in_approvable_status():
raise NotInApprovableStatus
if not self.is_approved_by(user):
self.approvers.add(user)
if self.status < Changeset.STATUS_APPROVED and self.number_of_approvals >= settings.MINIMUM_CHANGESET_APPROVALS:
self.status = Changeset.STATUS_APPROVED
self.save()
|
Apache License 2.0
|
google/nerfies
|
nerfies/model_utils.py
|
sample_pdf
|
python
|
def sample_pdf(key, bins, weights, origins, directions, z_vals,
num_coarse_samples, use_stratified_sampling):
z_samples = piecewise_constant_pdf(key, bins, weights, num_coarse_samples,
use_stratified_sampling)
z_vals = jnp.sort(jnp.concatenate([z_vals, z_samples], axis=-1), axis=-1)
return z_vals, (
origins[..., None, :] + z_vals[..., None] * directions[..., None, :])
|
Hierarchical sampling.
Args:
key: jnp.ndarray(float32), [2,], random number generator.
bins: jnp.ndarray(float32), [batch_size, n_bins + 1].
weights: jnp.ndarray(float32), [batch_size, n_bins].
origins: ray origins.
directions: ray directions.
z_vals: jnp.ndarray(float32), [batch_size, n_coarse_samples].
num_coarse_samples: int, the number of samples.
use_stratified_sampling: bool, use use_stratified_sampling samples.
Returns:
z_vals: jnp.ndarray(float32),
[batch_size, n_coarse_samples + num_fine_samples].
points: jnp.ndarray(float32),
[batch_size, n_coarse_samples + num_fine_samples, 3].
|
https://github.com/google/nerfies/blob/04623e4474bde3459e2f7f2b5c9174d9e0faa7b1/nerfies/model_utils.py#L190-L215
|
from flax import linen as nn
from flax import optim
from flax import struct
from jax import lax
from jax import random
import jax.numpy as jnp
@struct.dataclass
class TrainState:
optimizer: optim.Optimizer
warp_alpha: jnp.ndarray = 0.0
time_alpha: jnp.ndarray = 0.0
@property
def warp_extra(self):
return {'alpha': self.warp_alpha, 'time_alpha': self.time_alpha}
def sample_along_rays(key, origins, directions, num_coarse_samples, near, far,
use_stratified_sampling, use_linear_disparity):
batch_size = origins.shape[0]
t_vals = jnp.linspace(0., 1., num_coarse_samples)
if not use_linear_disparity:
z_vals = near * (1. - t_vals) + far * t_vals
else:
z_vals = 1. / (1. / near * (1. - t_vals) + 1. / far * t_vals)
if use_stratified_sampling:
mids = .5 * (z_vals[..., 1:] + z_vals[..., :-1])
upper = jnp.concatenate([mids, z_vals[..., -1:]], -1)
lower = jnp.concatenate([z_vals[..., :1], mids], -1)
t_rand = random.uniform(key, [batch_size, num_coarse_samples])
z_vals = lower + (upper - lower) * t_rand
else:
z_vals = jnp.broadcast_to(z_vals[None, ...],
[batch_size, num_coarse_samples])
return (z_vals, (origins[..., None, :] +
z_vals[..., :, None] * directions[..., None, :]))
def volumetric_rendering(rgb,
sigma,
z_vals,
dirs,
use_white_background,
sample_at_infinity=True,
return_weights=False,
eps=1e-10):
last_sample_z = 1e10 if sample_at_infinity else 1e-19
dists = jnp.concatenate([
z_vals[..., 1:] - z_vals[..., :-1],
jnp.broadcast_to([last_sample_z], z_vals[..., :1].shape)
], -1)
dists = dists * jnp.linalg.norm(dirs[..., None, :], axis=-1)
alpha = 1.0 - jnp.exp(-sigma * dists)
accum_prod = jnp.concatenate([
jnp.ones_like(alpha[..., :1], alpha.dtype),
jnp.cumprod(1.0 - alpha[..., :-1] + eps, axis=-1),
], axis=-1)
weights = alpha * accum_prod
rgb = (weights[..., None] * rgb).sum(axis=-2)
exp_depth = (weights * z_vals).sum(axis=-1)
med_depth = compute_depth_map(weights, z_vals)
acc = weights.sum(axis=-1)
if use_white_background:
rgb = rgb + (1. - acc[..., None])
if sample_at_infinity:
acc = weights[..., :-1].sum(axis=-1)
out = {
'rgb': rgb,
'depth': exp_depth,
'med_depth': med_depth,
'acc': acc,
}
if return_weights:
out['weights'] = weights
return out
def piecewise_constant_pdf(key, bins, weights, num_coarse_samples,
use_stratified_sampling):
eps = 1e-5
weights += eps
pdf = weights / weights.sum(axis=-1, keepdims=True)
cdf = jnp.cumsum(pdf, axis=-1)
cdf = jnp.concatenate([jnp.zeros(list(cdf.shape[:-1]) + [1]), cdf], axis=-1)
if use_stratified_sampling:
u = random.uniform(key, list(cdf.shape[:-1]) + [num_coarse_samples])
else:
u = jnp.linspace(0., 1., num_coarse_samples)
u = jnp.broadcast_to(u, list(cdf.shape[:-1]) + [num_coarse_samples])
mask = (u[..., None, :] >= cdf[..., :, None])
def minmax(x):
x0 = jnp.max(jnp.where(mask, x[..., None], x[..., :1, None]), -2)
x1 = jnp.min(jnp.where(~mask, x[..., None], x[..., -1:, None]), -2)
x0 = jnp.minimum(x0, x[..., -2:-1])
x1 = jnp.maximum(x1, x[..., 1:2])
return x0, x1
bins_g0, bins_g1 = minmax(bins)
cdf_g0, cdf_g1 = minmax(cdf)
denom = (cdf_g1 - cdf_g0)
denom = jnp.where(denom < eps, 1., denom)
t = (u - cdf_g0) / denom
z_samples = bins_g0 + t * (bins_g1 - bins_g0)
return lax.stop_gradient(z_samples)
|
Apache License 2.0
|
jonathanfeng/new_horizons
|
venv/lib/python3.7/site-packages/jinja2/parser.py
|
Parser.fail_unknown_tag
|
python
|
def fail_unknown_tag(self, name, lineno=None):
return self._fail_ut_eof(name, self._end_token_stack, lineno)
|
Called if the parser encounters an unknown tag. Tries to fail
with a human readable error message that could help to identify
the problem.
|
https://github.com/jonathanfeng/new_horizons/blob/0ec21c8f8423932611e1e0bf24548dcef912bc54/venv/lib/python3.7/site-packages/jinja2/parser.py#L103-L108
|
from . import nodes
from ._compat import imap
from .exceptions import TemplateAssertionError
from .exceptions import TemplateSyntaxError
from .lexer import describe_token
from .lexer import describe_token_expr
_statement_keywords = frozenset(
[
"for",
"if",
"block",
"extends",
"print",
"macro",
"include",
"from",
"import",
"set",
"with",
"autoescape",
]
)
_compare_operators = frozenset(["eq", "ne", "lt", "lteq", "gt", "gteq"])
_math_nodes = {
"add": nodes.Add,
"sub": nodes.Sub,
"mul": nodes.Mul,
"div": nodes.Div,
"floordiv": nodes.FloorDiv,
"mod": nodes.Mod,
}
class Parser(object):
def __init__(self, environment, source, name=None, filename=None, state=None):
self.environment = environment
self.stream = environment._tokenize(source, name, filename, state)
self.name = name
self.filename = filename
self.closed = False
self.extensions = {}
for extension in environment.iter_extensions():
for tag in extension.tags:
self.extensions[tag] = extension.parse
self._last_identifier = 0
self._tag_stack = []
self._end_token_stack = []
def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
if lineno is None:
lineno = self.stream.current.lineno
raise exc(msg, lineno, self.name, self.filename)
def _fail_ut_eof(self, name, end_token_stack, lineno):
expected = []
for exprs in end_token_stack:
expected.extend(imap(describe_token_expr, exprs))
if end_token_stack:
currently_looking = " or ".join(
"'%s'" % describe_token_expr(expr) for expr in end_token_stack[-1]
)
else:
currently_looking = None
if name is None:
message = ["Unexpected end of template."]
else:
message = ["Encountered unknown tag '%s'." % name]
if currently_looking:
if name is not None and name in expected:
message.append(
"You probably made a nesting mistake. Jinja "
"is expecting this tag, but currently looking "
"for %s." % currently_looking
)
else:
message.append(
"Jinja was looking for the following tags: "
"%s." % currently_looking
)
if self._tag_stack:
message.append(
"The innermost block that needs to be "
"closed is '%s'." % self._tag_stack[-1]
)
self.fail(" ".join(message), lineno)
|
MIT License
|
finraos/elasticd
|
elasticd/plugin_manager.py
|
PluginManager.get_resource_locator
|
python
|
def get_resource_locator(self):
return self.plugins[RESOURCE_LOCATOR_KEY]
|
Simple 'getter'
:return: The loaded ResourceLocator plugin implementation
:rtype: elasticd.plugins.ResourceLocator
|
https://github.com/finraos/elasticd/blob/5ca6545cb7ae8b11e5e07ef90e5dc539ea627145/elasticd/plugin_manager.py#L43-L49
|
import logging
import importlib
DATASTORE_KEY = 'datastore'
DRIVER_KEY = 'driver'
RESOURCE_LOCATOR_KEY = 'resource-locator'
required_attributes = {DATASTORE_KEY: ['add_backend'],
DRIVER_KEY: ['update'],
RESOURCE_LOCATOR_KEY: ['get_resources']}
class PluginManager():
plugins = {}
def __init__(self, config):
logging.debug('initializing plugins ')
self._load_plugins(config)
def get_datastore(self):
return self.plugins[DATASTORE_KEY]
def get_driver(self):
return self.plugins[DRIVER_KEY]
|
Apache License 2.0
|
picoctf/picoctf
|
picoCTF-shell/hacksport/problem.py
|
Challenge.generate_flag
|
python
|
def generate_flag(self, random):
token = str(random.randint(1, 1e12))
hash_token = md5(token.encode("utf-8")).hexdigest()
return flag_fmt() % hash_token
|
Default generation of flags.
Args:
random: seeded random module.
|
https://github.com/picoctf/picoctf/blob/280dcf21ac024067cf436894947ffffd328be048/picoCTF-shell/hacksport/problem.py#L120-L131
|
import os
from abc import ABCMeta, abstractmethod, abstractproperty
from hashlib import md5
from os.path import join
from shutil import copy2
from hacksport.deploy import give_port, flag_fmt
from hacksport.operations import execute
XINETD_SCRIPT = """#!/bin/bash
cd $(dirname $0)
exec timeout -sKILL 3m %s
"""
XINETD_WEB_SCRIPT = """#!/bin/bash
cd $(dirname $0)
%s
"""
class File(object):
def __init__(self, path, permissions=0o664, user=None, group=None):
self.path = path
self.permissions = permissions
self.user = user
self.group = group
def __repr__(self):
return "{}({},{})".format(
self.__class__.__name__, repr(self.path), oct(self.permissions)
)
def to_dict(self):
return {
"path": self.path,
"permissions": self.permissions,
"user": self.user,
"group": self.group,
}
class Directory(File):
class GroupWriteDirectory(Directory):
def __init__(self, path, permissions=0o770):
super().__init__(path, permissions=permissions)
class PreTemplatedFile(File):
def __init__(self, path, permissions=0o664):
super().__init__(path, permissions=permissions)
class ExecutableFile(File):
def __init__(self, path, permissions=0o2755):
super().__init__(path, permissions=permissions)
class ProtectedFile(File):
def __init__(self, path, permissions=0o0440):
super().__init__(path, permissions=permissions)
def files_from_directory(directory, recurse=True, permissions=0o664):
result = []
for root, dirnames, filenames in os.walk(directory):
for filename in filenames:
result.append(File(join(root, filename), permissions))
if not recurse:
break
return result
class Challenge(metaclass=ABCMeta):
files = []
dont_template = []
|
MIT License
|
romainsacchi/premise
|
premise/inventory_imports.py
|
BaseInventoryImport.prepare_inventory
|
python
|
def prepare_inventory(self):
pass
|
Prepare the inventory for the merger with Ecoinvent.
Modifies :attr:`import_db` in-place.
:returns: Nothing
|
https://github.com/romainsacchi/premise/blob/e9ba6d60cf94b63b522c45e24ab70ce9cf5557c2/premise/inventory_imports.py#L106-L114
|
import csv
import pickle
import sys
import uuid
from pathlib import Path
import carculator
import carculator_truck
import numpy as np
import wurst
import xarray as xr
from bw2io import ExcelImporter, Migration
from bw2io.importers.base_lci import LCIImporter
from prettytable import PrettyTable
from wurst import searching as ws
from . import DATA_DIR, INVENTORY_DIR
from .geomap import Geomap
from .utils import *
FILEPATH_BIOSPHERE_FLOWS = DATA_DIR / "dict_biosphere.txt"
FILEPATH_MIGRATION_MAP = INVENTORY_DIR / "migration_map.csv"
def generate_migration_maps(origin, destination):
response = {"fields": ["name", "reference product", "location"], "data": []}
with open(FILEPATH_MIGRATION_MAP, "r") as read_obj:
csv_reader = csv.reader(read_obj, delimiter=";")
next(csv_reader)
for row in csv_reader:
if row[0] == origin and row[1] == destination:
data = {}
if row[5] != "":
data["name"] = row[5]
if row[6] != "":
data["reference product"] = row[6]
if row[7] != "":
data["location"] = row[7]
response["data"].append(((row[2], row[3], row[4]), data))
return response
EI_37_35_MIGRATION_MAP = generate_migration_maps("37", "35")
EI_37_36_MIGRATION_MAP = generate_migration_maps("37", "36")
EI_35_37_MIGRATION_MAP = generate_migration_maps("35", "37")
EI_35_36_MIGRATION_MAP = generate_migration_maps("35", "36")
EI_36_37_MIGRATION_MAP = generate_migration_maps("36", "37")
EI_36_35_MIGRATION_MAP = generate_migration_maps("36", "35")
class BaseInventoryImport:
def __init__(self, database, version, path):
self.db = database
self.db_code = [x["code"] for x in self.db]
self.db_names = [
(x["name"], x["reference product"], x["location"]) for x in self.db
]
self.version = version
self.biosphere_dict = self.get_biosphere_code()
path = Path(path)
if path != Path("."):
if not path.is_file():
raise FileNotFoundError(
"The inventory file {} could not be found.".format(path)
)
self.path = path
self.import_db = self.load_inventory(path)
def load_inventory(self, path):
pass
|
BSD 3-Clause New or Revised License
|
ellmetha/django-machina
|
machina/apps/forum_conversation/forum_polls/views.py
|
TopicPollVoteView.get_success_url
|
python
|
def get_success_url(self):
messages.success(self.request, _('Your vote has been cast.'))
return reverse(
'forum_conversation:topic',
kwargs={
'forum_slug': self.object.topic.forum.slug,
'forum_pk': self.object.topic.forum.pk,
'slug': self.object.topic.slug,
'pk': self.object.topic.pk,
},
)
|
Returns the success URL to redirect the user to.
|
https://github.com/ellmetha/django-machina/blob/876fcb48260e30443f4c8312a208231c54a8f85b/machina/apps/forum_conversation/forum_polls/views.py#L77-L88
|
from django.contrib import messages
from django.forms.forms import NON_FIELD_ERRORS
from django.http import HttpResponseRedirect
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.views.generic import UpdateView
from django.views.generic.edit import ModelFormMixin
from machina.core.db.models import get_model
from machina.core.loading import get_class
TopicPoll = get_model('forum_polls', 'TopicPoll')
TopicPollVote = get_model('forum_polls', 'TopicPollVote')
TopicPollVoteForm = get_class('forum_polls.forms', 'TopicPollVoteForm')
PermissionRequiredMixin = get_class('forum_permission.viewmixins', 'PermissionRequiredMixin')
class TopicPollVoteView(PermissionRequiredMixin, UpdateView):
form_class = TopicPollVoteForm
http_method_names = ['post', ]
model = TopicPoll
def get_form_kwargs(self):
kwargs = super(ModelFormMixin, self).get_form_kwargs()
kwargs['poll'] = self.object
return kwargs
def form_valid(self, form):
user_kwargs = (
{'voter': self.request.user}
if self.request.user.is_authenticated
else {'anonymous_key': self.request.user.forum_key}
)
if self.object.user_changes:
TopicPollVote.objects.filter(poll_option__poll=self.object, **user_kwargs).delete()
options = form.cleaned_data['options']
for option in options:
TopicPollVote.objects.create(poll_option=option, **user_kwargs)
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
messages.error(self.request, form.errors[NON_FIELD_ERRORS])
return redirect(
reverse(
'forum_conversation:topic',
kwargs={
'forum_slug': self.object.topic.forum.slug,
'forum_pk': self.object.topic.forum.pk,
'slug': self.object.topic.slug,
'pk': self.object.topic.pk
},
),
)
|
BSD 3-Clause New or Revised License
|
gabstopper/smc-python
|
smc/policy/layer3.py
|
FirewallRule.fw_ipv6_access_rules
|
python
|
def fw_ipv6_access_rules(self):
return rule_collection(
self.get_relation('fw_ipv6_access_rules'),
IPv6Rule)
|
IPv6 Rule entry point
:rtype: rule_collection(IPv6Rule)
|
https://github.com/gabstopper/smc-python/blob/54386c8a710727cc1acf69334a57b155d2f5408c/smc/policy/layer3.py#L77-L85
|
from smc.base.model import ElementCreator
from smc.api.exceptions import CreatePolicyFailed, ElementNotFound, LoadPolicyFailed, CreateElementFailed
from smc.policy.policy import Policy
from smc.policy.rule import IPv4Rule, IPv6Rule
from smc.policy.rule_nat import IPv4NATRule, IPv6NATRule
from smc.base.collection import rule_collection
class FirewallRule(object):
@property
def fw_ipv4_access_rules(self):
return rule_collection(
self.get_relation('fw_ipv4_access_rules'),
IPv4Rule)
@property
def fw_ipv4_nat_rules(self):
return rule_collection(
self.get_relation('fw_ipv4_nat_rules'),
IPv4NATRule)
@property
|
Apache License 2.0
|
jupyter/qtconsole
|
qtconsole/rich_text.py
|
export_xhtml
|
python
|
def export_xhtml(html, filename, image_tag=None):
if image_tag is None:
image_tag = default_image_tag
with io.open(filename, 'w', encoding='utf-8') as f:
offset = html.find("<html>")
assert offset > -1, 'Invalid HTML string: no <html> tag.'
html = ('<html xmlns="http://www.w3.org/1999/xhtml">\n'+
html[offset+6:])
html = fix_html(html)
f.write(IMG_RE.sub(lambda x: image_tag(x, path = None, format = "svg"),
html))
|
Export the contents of the ConsoleWidget as XHTML with inline SVGs.
Parameters
----------
html : unicode,
A Python unicode string containing the Qt HTML to export.
filename : str
The file to be saved.
image_tag : callable, optional (default None)
Used to convert images. See ``default_image_tag()`` for information.
|
https://github.com/jupyter/qtconsole/blob/6ce0c487653447cf3831aa0532d653553040568e/qtconsole/rich_text.py#L156-L183
|
import io
import os
import re
from qtpy import QtWidgets
EMPTY_P_RE = re.compile(r'<p[^/>]*>\s*</p>')
IMG_RE = re.compile(r'<img src="(?P<name>[\d]+)" />')
class HtmlExporter(object):
def __init__(self, control):
assert isinstance(control, (QtWidgets.QPlainTextEdit, QtWidgets.QTextEdit))
self.control = control
self.filename = 'ipython.html'
self.image_tag = None
self.inline_png = None
def export(self):
parent = self.control.window()
dialog = QtWidgets.QFileDialog(parent, 'Save as...')
dialog.setAcceptMode(QtWidgets.QFileDialog.AcceptSave)
filters = [
'HTML with PNG figures (*.html *.htm)',
'XHTML with inline SVG figures (*.xhtml *.xml)'
]
dialog.setNameFilters(filters)
if self.filename:
dialog.selectFile(self.filename)
root,ext = os.path.splitext(self.filename)
if ext.lower() in ('.xml', '.xhtml'):
dialog.selectNameFilter(filters[-1])
if dialog.exec_():
self.filename = dialog.selectedFiles()[0]
choice = dialog.selectedNameFilter()
html = self.control.document().toHtml()
if choice.startswith('XHTML'):
exporter = export_xhtml
else:
inline = self.inline_png
if inline is None and IMG_RE.search(html):
dialog = QtWidgets.QDialog(parent)
dialog.setWindowTitle('Save as...')
layout = QtWidgets.QVBoxLayout(dialog)
msg = "Exporting HTML with PNGs"
info = "Would you like inline PNGs (single large html " "file) or external image files?"
checkbox = QtWidgets.QCheckBox("&Don't ask again")
checkbox.setShortcut('D')
ib = QtWidgets.QPushButton("&Inline")
ib.setShortcut('I')
eb = QtWidgets.QPushButton("&External")
eb.setShortcut('E')
box = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Question,
dialog.windowTitle(), msg)
box.setInformativeText(info)
box.addButton(ib, QtWidgets.QMessageBox.NoRole)
box.addButton(eb, QtWidgets.QMessageBox.YesRole)
layout.setSpacing(0)
layout.addWidget(box)
layout.addWidget(checkbox)
dialog.setLayout(layout)
dialog.show()
reply = box.exec_()
dialog.hide()
inline = (reply == 0)
if checkbox.checkState():
self.inline_png = inline
exporter = lambda h, f, i: export_html(h, f, i, inline)
try:
return exporter(html, self.filename, self.image_tag)
except Exception as e:
msg = "Error exporting HTML to %s\n" % self.filename + str(e)
reply = QtWidgets.QMessageBox.warning(parent, 'Error', msg,
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.Ok)
return None
def export_html(html, filename, image_tag = None, inline = True):
if image_tag is None:
image_tag = default_image_tag
if inline:
path = None
else:
root,ext = os.path.splitext(filename)
path = root + "_files"
if os.path.isfile(path):
raise OSError("%s exists, but is not a directory." % path)
with io.open(filename, 'w', encoding='utf-8') as f:
html = fix_html(html)
f.write(IMG_RE.sub(lambda x: image_tag(x, path = path, format = "png"),
html))
|
BSD 3-Clause New or Revised License
|
azure/azure-capi-cli-extension
|
src/capi/azext_capi/_helpers.py
|
ssl_context
|
python
|
def ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == "Windows"):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS)
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
|
Returns an SSL context appropriate for the python version and environment.
|
https://github.com/azure/azure-capi-cli-extension/blob/8dfdb1541c7aca664342d9701a74128e15edf470/src/capi/azext_capi/_helpers.py#L19-L28
|
import platform
import ssl
import sys
from six.moves.urllib.request import urlopen
from azure.cli.core.util import in_cloud_console
|
MIT License
|
openstack/cinder
|
cinder/volume/drivers/hitachi/hbsd_common.py
|
HBSDCommon.set_terminate_target
|
python
|
def set_terminate_target(self, fake_connector, port_hostgroup_map):
raise NotImplementedError()
|
Set necessary information in connector in terminate.
|
https://github.com/openstack/cinder/blob/4558e4b53a7e41dc1263417a4824f39bb6fd30e1/cinder/volume/drivers/hitachi/hbsd_common.py#L713-L715
|
import re
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from cinder import coordination
from cinder import exception
from cinder.volume import configuration
from cinder.volume.drivers.hitachi import hbsd_utils as utils
from cinder.volume import volume_utils
VERSION = '2.1.0'
_STR_VOLUME = 'volume'
_STR_SNAPSHOT = 'snapshot'
_INHERITED_VOLUME_OPTS = [
'volume_backend_name',
'volume_driver',
'reserved_percentage',
'use_multipath_for_image_xfer',
'enforce_multipath_for_image_xfer',
'max_over_subscription_ratio',
'use_chap_auth',
'chap_username',
'chap_password',
]
COMMON_VOLUME_OPTS = [
cfg.StrOpt(
'hitachi_storage_id',
default=None,
help='Product number of the storage system.'),
cfg.StrOpt(
'hitachi_pool',
default=None,
help='Pool number or pool name of the DP pool.'),
cfg.StrOpt(
'hitachi_snap_pool',
default=None,
help='Pool number or pool name of the snapshot pool.'),
cfg.StrOpt(
'hitachi_ldev_range',
default=None,
help='Range of the LDEV numbers in the format of \'xxxx-yyyy\' that '
'can be used by the driver. Values can be in decimal format '
'(e.g. 1000) or in colon-separated hexadecimal format '
'(e.g. 00:03:E8).'),
cfg.ListOpt(
'hitachi_target_ports',
default=[],
help='IDs of the storage ports used to attach volumes to the '
'controller node. To specify multiple ports, connect them by '
'commas (e.g. CL1-A,CL2-A).'),
cfg.ListOpt(
'hitachi_compute_target_ports',
default=[],
help='IDs of the storage ports used to attach volumes to compute '
'nodes. To specify multiple ports, connect them by commas '
'(e.g. CL1-A,CL2-A).'),
cfg.BoolOpt(
'hitachi_group_create',
default=False,
help='If True, the driver will create host groups or iSCSI targets on '
'storage ports as needed.'),
cfg.BoolOpt(
'hitachi_group_delete',
default=False,
help='If True, the driver will delete host groups or iSCSI targets on '
'storage ports as needed.'),
]
_REQUIRED_COMMON_OPTS = [
'hitachi_storage_id',
'hitachi_pool',
]
CONF = cfg.CONF
CONF.register_opts(COMMON_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP)
LOG = logging.getLogger(__name__)
MSG = utils.HBSDMsg
def _str2int(num):
if not num:
return None
if num.isdigit():
return int(num)
if not re.match(r'[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F]' +
'[0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]$', num):
return None
try:
return int(num.replace(':', ''), 16)
except ValueError:
return None
class HBSDCommon():
def __init__(self, conf, driverinfo, db):
self.conf = conf
self.db = db
self.ctxt = None
self.lock = {
'do_setup': 'do_setup',
}
self.driver_info = driverinfo
self.storage_info = {
'protocol': driverinfo['proto'],
'pool_id': None,
'snap_pool_id': None,
'ldev_range': [],
'controller_ports': [],
'compute_ports': [],
'wwns': {},
'portals': {},
}
def create_ldev(self, size):
raise NotImplementedError()
def modify_ldev_name(self, ldev, name):
raise NotImplementedError()
def create_volume(self, volume):
try:
ldev = self.create_ldev(volume['size'])
except Exception:
with excutils.save_and_reraise_exception():
utils.output_log(MSG.CREATE_LDEV_FAILED)
self.modify_ldev_name(ldev, volume['id'].replace("-", ""))
return {
'provider_location': str(ldev),
}
def get_ldev_info(self, keys, ldev, **kwargs):
raise NotImplementedError()
def create_pair_on_storage(self, pvol, svol, is_snapshot=False):
raise NotImplementedError()
def _copy_on_storage(self, pvol, size, is_snapshot=False):
ldev_info = self.get_ldev_info(['status', 'attributes'], pvol)
if ldev_info['status'] != 'NML':
msg = utils.output_log(MSG.INVALID_LDEV_STATUS_FOR_COPY, ldev=pvol)
raise utils.HBSDError(msg)
svol = self.create_ldev(size)
try:
self.create_pair_on_storage(pvol, svol, is_snapshot)
except Exception:
with excutils.save_and_reraise_exception():
try:
self.delete_ldev(svol)
except utils.HBSDError:
utils.output_log(MSG.DELETE_LDEV_FAILED, ldev=svol)
return svol
def create_volume_from_src(self, volume, src, src_type):
ldev = utils.get_ldev(src)
if ldev is None:
msg = utils.output_log(
MSG.INVALID_LDEV_FOR_VOLUME_COPY, type=src_type, id=src['id'])
raise utils.HBSDError(msg)
size = volume['size']
new_ldev = self._copy_on_storage(ldev, size)
self.modify_ldev_name(new_ldev, volume['id'].replace("-", ""))
return {
'provider_location': str(new_ldev),
}
def create_cloned_volume(self, volume, src_vref):
return self.create_volume_from_src(volume, src_vref, _STR_VOLUME)
def create_volume_from_snapshot(self, volume, snapshot):
return self.create_volume_from_src(volume, snapshot, _STR_SNAPSHOT)
def delete_pair_based_on_svol(self, pvol, svol_info):
raise NotImplementedError()
def get_pair_info(self, ldev):
raise NotImplementedError()
def delete_pair(self, ldev):
pair_info = self.get_pair_info(ldev)
if not pair_info:
return
if pair_info['pvol'] == ldev:
utils.output_log(
MSG.UNABLE_TO_DELETE_PAIR, pvol=pair_info['pvol'])
raise utils.HBSDBusy()
else:
self.delete_pair_based_on_svol(
pair_info['pvol'], pair_info['svol_info'][0])
def find_all_mapped_targets_from_storage(self, targets, ldev):
raise NotImplementedError()
def unmap_ldev(self, targets, ldev):
raise NotImplementedError()
def unmap_ldev_from_storage(self, ldev):
targets = {
'list': [],
}
self.find_all_mapped_targets_from_storage(targets, ldev)
self.unmap_ldev(targets, ldev)
def delete_ldev_from_storage(self, ldev):
raise NotImplementedError()
def delete_ldev(self, ldev):
self.delete_pair(ldev)
self.unmap_ldev_from_storage(ldev)
self.delete_ldev_from_storage(ldev)
def delete_volume(self, volume):
ldev = utils.get_ldev(volume)
if ldev is None:
utils.output_log(
MSG.INVALID_LDEV_FOR_DELETION,
method='delete_volume', id=volume['id'])
return
try:
self.delete_ldev(ldev)
except utils.HBSDBusy:
raise exception.VolumeIsBusy(volume_name=volume['name'])
def create_snapshot(self, snapshot):
src_vref = snapshot.volume
ldev = utils.get_ldev(src_vref)
if ldev is None:
msg = utils.output_log(
MSG.INVALID_LDEV_FOR_VOLUME_COPY,
type='volume', id=src_vref['id'])
raise utils.HBSDError(msg)
size = snapshot['volume_size']
new_ldev = self._copy_on_storage(ldev, size, True)
return {
'provider_location': str(new_ldev),
}
def delete_snapshot(self, snapshot):
ldev = utils.get_ldev(snapshot)
if ldev is None:
utils.output_log(
MSG.INVALID_LDEV_FOR_DELETION, method='delete_snapshot',
id=snapshot['id'])
return
try:
self.delete_ldev(ldev)
except utils.HBSDBusy:
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
def get_pool_info(self):
raise NotImplementedError()
def update_volume_stats(self):
data = {}
backend_name = (self.conf.safe_get('volume_backend_name') or
self.driver_info['volume_backend_name'])
data = {
'volume_backend_name': backend_name,
'vendor_name': 'Hitachi',
'driver_version': VERSION,
'storage_protocol': self.storage_info['protocol'],
'pools': [],
}
single_pool = {}
single_pool.update(dict(
pool_name=data['volume_backend_name'],
reserved_percentage=self.conf.safe_get('reserved_percentage'),
QoS_support=False,
thick_provisioning_support=False,
multiattach=True,
consistencygroup_support=True,
consistent_group_snapshot_enabled=True
))
try:
(total_capacity, free_capacity,
provisioned_capacity) = self.get_pool_info()
except utils.HBSDError:
single_pool.update(dict(
provisioned_capacity_gb=0,
backend_state='down'))
data["pools"].append(single_pool)
LOG.debug("Updating volume status. (%s)", data)
utils.output_log(
MSG.POOL_INFO_RETRIEVAL_FAILED,
pool=self.conf.hitachi_pool)
return data
single_pool.update(dict(
total_capacity_gb=total_capacity,
free_capacity_gb=free_capacity,
provisioned_capacity_gb=provisioned_capacity,
max_over_subscription_ratio=(
volume_utils.get_max_over_subscription_ratio(
self.conf.safe_get('max_over_subscription_ratio'),
True)),
thin_provisioning_support=True
))
single_pool.update(dict(backend_state='up'))
data["pools"].append(single_pool)
LOG.debug("Updating volume status. (%s)", data)
return data
def discard_zero_page(self, volume):
raise NotImplementedError()
def check_pair_svol(self, ldev):
raise NotImplementedError()
def extend_ldev(self, ldev, old_size, new_size):
raise NotImplementedError()
def extend_volume(self, volume, new_size):
ldev = utils.get_ldev(volume)
if ldev is None:
msg = utils.output_log(MSG.INVALID_LDEV_FOR_EXTENSION,
volume_id=volume['id'])
raise utils.HBSDError(msg)
if self.check_pair_svol(ldev):
msg = utils.output_log(MSG.INVALID_VOLUME_TYPE_FOR_EXTEND,
volume_id=volume['id'])
raise utils.HBSDError(msg)
self.delete_pair(ldev)
self.extend_ldev(ldev, volume['size'], new_size)
def get_ldev_by_name(self, name):
raise NotImplementedError()
def check_ldev_manageability(self, ldev, existing_ref):
raise NotImplementedError()
def manage_existing(self, volume, existing_ref):
if 'source-name' in existing_ref:
ldev = self.get_ldev_by_name(
existing_ref.get('source-name').replace('-', ''))
elif 'source-id' in existing_ref:
ldev = _str2int(existing_ref.get('source-id'))
self.check_ldev_manageability(ldev, existing_ref)
self.modify_ldev_name(ldev, volume['id'].replace("-", ""))
return {
'provider_location': str(ldev),
}
def get_ldev_size_in_gigabyte(self, ldev, existing_ref):
raise NotImplementedError()
def manage_existing_get_size(self, existing_ref):
ldev = None
if 'source-name' in existing_ref:
ldev = self.get_ldev_by_name(
existing_ref.get('source-name').replace("-", ""))
elif 'source-id' in existing_ref:
ldev = _str2int(existing_ref.get('source-id'))
if ldev is None:
msg = utils.output_log(MSG.INVALID_LDEV_FOR_MANAGE)
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
return self.get_ldev_size_in_gigabyte(ldev, existing_ref)
def unmanage(self, volume):
ldev = utils.get_ldev(volume)
if ldev is None:
utils.output_log(MSG.INVALID_LDEV_FOR_DELETION, method='unmanage',
id=volume['id'])
return
if self.check_pair_svol(ldev):
utils.output_log(
MSG.INVALID_LDEV_TYPE_FOR_UNMANAGE, volume_id=volume['id'],
volume_type=utils.NORMAL_LDEV_TYPE)
raise exception.VolumeIsBusy(volume_name=volume['name'])
try:
self.delete_pair(ldev)
except utils.HBSDBusy:
raise exception.VolumeIsBusy(volume_name=volume['name'])
def _range2list(self, param):
values = [_str2int(value) for value in
self.conf.safe_get(param).split('-')]
if len(values) != 2 or None in values or values[0] > values[1]:
msg = utils.output_log(MSG.INVALID_PARAMETER, param=param)
raise utils.HBSDError(msg)
return values
def check_param_iscsi(self):
if self.conf.use_chap_auth:
if not self.conf.chap_username:
msg = utils.output_log(MSG.INVALID_PARAMETER,
param='chap_username')
raise utils.HBSDError(msg)
if not self.conf.chap_password:
msg = utils.output_log(MSG.INVALID_PARAMETER,
param='chap_password')
raise utils.HBSDError(msg)
def check_param(self):
utils.check_opt_value(self.conf, _INHERITED_VOLUME_OPTS)
utils.check_opts(self.conf, COMMON_VOLUME_OPTS)
utils.check_opts(self.conf, self.driver_info['volume_opts'])
if self.conf.hitachi_ldev_range:
self.storage_info['ldev_range'] = self._range2list(
'hitachi_ldev_range')
if (not self.conf.hitachi_target_ports and
not self.conf.hitachi_compute_target_ports):
msg = utils.output_log(
MSG.INVALID_PARAMETER,
param='hitachi_target_ports or '
'hitachi_compute_target_ports')
raise utils.HBSDError(msg)
if (self.conf.hitachi_group_delete and
not self.conf.hitachi_group_create):
msg = utils.output_log(
MSG.INVALID_PARAMETER,
param='hitachi_group_delete or '
'hitachi_group_create')
raise utils.HBSDError(msg)
for opt in _REQUIRED_COMMON_OPTS:
if not self.conf.safe_get(opt):
msg = utils.output_log(MSG.INVALID_PARAMETER, param=opt)
raise utils.HBSDError(msg)
if self.storage_info['protocol'] == 'iSCSI':
self.check_param_iscsi()
def need_client_setup(self):
raise NotImplementedError()
def setup_client(self):
pass
def enter_keep_session(self):
pass
def check_pool_id(self):
raise NotImplementedError()
def connect_storage(self):
self.check_pool_id()
utils.output_log(MSG.SET_CONFIG_VALUE, object='DP Pool ID',
value=self.storage_info['pool_id'])
self.storage_info['controller_ports'] = []
self.storage_info['compute_ports'] = []
def find_targets_from_storage(self, targets, connector, target_ports):
raise NotImplementedError()
def get_hba_ids_from_connector(self, connector):
if self.driver_info['hba_id'] in connector:
return connector[self.driver_info['hba_id']]
msg = utils.output_log(MSG.RESOURCE_NOT_FOUND,
resource=self.driver_info['hba_id_type'])
raise utils.HBSDError(msg)
def create_target_to_storage(self, port, connector, hba_ids):
raise NotImplementedError()
def set_target_mode(self, port, gid):
raise NotImplementedError()
def set_hba_ids(self, port, gid, hba_ids):
raise NotImplementedError()
def delete_target_from_storage(self, port, gid):
raise NotImplementedError()
def _create_target(self, targets, port, connector, hba_ids):
target_name, gid = self.create_target_to_storage(
port, connector, hba_ids)
utils.output_log(MSG.OBJECT_CREATED, object='a target',
details='port: %(port)s, gid: %(gid)s, target_name: '
'%(target)s' %
{'port': port, 'gid': gid, 'target': target_name})
try:
self.set_target_mode(port, gid)
self.set_hba_ids(port, gid, hba_ids)
except Exception:
with excutils.save_and_reraise_exception():
self.delete_target_from_storage(port, gid)
targets['info'][port] = True
targets['list'].append((port, gid))
def create_mapping_targets(self, targets, connector):
hba_ids = self.get_hba_ids_from_connector(connector)
for port in targets['info'].keys():
if targets['info'][port]:
continue
try:
self._create_target(targets, port, connector, hba_ids)
except utils.HBSDError:
utils.output_log(
self.driver_info['msg_id']['target'], port=port)
if not targets['list']:
self.find_targets_from_storage(
targets, connector, targets['info'].keys())
def init_cinder_hosts(self, **kwargs):
targets = kwargs.pop(
'targets', {'info': {}, 'list': [], 'iqns': {}, 'target_map': {}})
connector = volume_utils.brick_get_connector_properties(
multipath=self.conf.use_multipath_for_image_xfer,
enforce_multipath=self.conf.enforce_multipath_for_image_xfer)
target_ports = self.storage_info['controller_ports']
if target_ports:
if (self.find_targets_from_storage(
targets, connector, target_ports) and
self.conf.hitachi_group_create):
self.create_mapping_targets(targets, connector)
utils.require_target_existed(targets)
def do_setup(self, context):
@coordination.synchronized('{self.lock[do_setup]}')
def _with_synchronized(self):
self.connect_storage()
self.init_cinder_hosts()
self.ctxt = context
self.check_param()
if self.need_client_setup():
self.setup_client()
self.enter_keep_session()
_with_synchronized(self)
def check_ports_info(self):
if (self.conf.hitachi_target_ports and
not self.storage_info['controller_ports']):
msg = utils.output_log(MSG.RESOURCE_NOT_FOUND,
resource="Target ports")
raise utils.HBSDError(msg)
if (self.conf.hitachi_compute_target_ports and
not self.storage_info['compute_ports']):
msg = utils.output_log(MSG.RESOURCE_NOT_FOUND,
resource="Compute target ports")
raise utils.HBSDError(msg)
utils.output_log(MSG.SET_CONFIG_VALUE, object='target port list',
value=self.storage_info['controller_ports'])
utils.output_log(MSG.SET_CONFIG_VALUE,
object='compute target port list',
value=self.storage_info['compute_ports'])
def attach_ldev(self, volume, ldev, connector, targets):
raise NotImplementedError()
def get_properties_fc(self, targets):
data = {}
data['target_wwn'] = [
self.storage_info['wwns'][target[0]] for target in targets['list']
if targets['lun'][target[0]]]
return data
def get_properties_iscsi(self, targets, multipath):
data = {}
primary_target = targets['list'][0]
if not multipath:
data['target_portal'] = self.storage_info[
'portals'][primary_target[0]]
data['target_iqn'] = targets['iqns'][primary_target]
else:
data['target_portals'] = [
self.storage_info['portals'][target[0]] for target in
targets['list'] if targets['lun'][target[0]]]
data['target_iqns'] = [
targets['iqns'][target] for target in targets['list']
if targets['lun'][target[0]]]
if self.conf.use_chap_auth:
data['auth_method'] = 'CHAP'
data['auth_username'] = self.conf.chap_username
data['auth_password'] = self.conf.chap_password
return data
def get_properties(self, targets, target_lun, connector):
multipath = connector.get('multipath', False)
if self.storage_info['protocol'] == 'FC':
data = self.get_properties_fc(targets)
elif self.storage_info['protocol'] == 'iSCSI':
data = self.get_properties_iscsi(targets, multipath)
data['target_discovered'] = False
if not multipath or self.storage_info['protocol'] == 'FC':
data['target_lun'] = target_lun
else:
target_luns = []
for target in targets['list']:
if targets['lun'][target[0]]:
target_luns.append(target_lun)
data['target_luns'] = target_luns
return data
@coordination.synchronized('hbsd-host-{self.conf.hitachi_storage_id}-'
'{connector[host]}')
def initialize_connection(self, volume, connector):
targets = {
'info': {},
'list': [],
'lun': {},
'iqns': {},
'target_map': {},
}
ldev = utils.get_ldev(volume)
if ldev is None:
msg = utils.output_log(MSG.INVALID_LDEV_FOR_CONNECTION,
volume_id=volume['id'])
raise utils.HBSDError(msg)
target_lun = self.attach_ldev(volume, ldev, connector, targets)
return {
'driver_volume_type': self.driver_info['volume_type'],
'data': self.get_properties(targets, target_lun, connector),
}
def get_target_ports(self, connector):
if 'ip' in connector and connector['ip'] == CONF.my_ip:
return self.storage_info['controller_ports']
return (self.storage_info['compute_ports'] or
self.storage_info['controller_ports'])
def get_port_hostgroup_map(self, ldev_id):
raise NotImplementedError()
|
Apache License 2.0
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/gym_utils.py
|
gym_env_wrapper
|
python
|
def gym_env_wrapper(env, rl_env_max_episode_steps, maxskip_env, rendered_env,
rendered_env_resize_to, sticky_actions, output_dtype,
num_actions):
assert ((not rl_env_max_episode_steps) or
isinstance(rl_env_max_episode_steps, int))
wrap_with_time_limit = ((not rl_env_max_episode_steps) or
rl_env_max_episode_steps >= 0)
if wrap_with_time_limit:
env = remove_time_limit_wrapper(env)
if num_actions is not None:
logging.log_first_n(
logging.INFO, "Number of discretized actions: %d", 1, num_actions)
env = ActionDiscretizeWrapper(env, num_actions=num_actions)
if sticky_actions:
env = StickyActionEnv(env)
if maxskip_env:
env = MaxAndSkipEnv(env)
if rendered_env:
env = RenderedEnv(
env, resize_to=rendered_env_resize_to, output_dtype=output_dtype)
if wrap_with_time_limit and rl_env_max_episode_steps is not None:
env = gym.wrappers.TimeLimit(
env, max_episode_steps=rl_env_max_episode_steps)
return env
|
Wraps a gym environment. see make_gym_env for details.
|
https://github.com/tensorflow/tensor2tensor/blob/c22a226704e5887862bf9edd9f269892c9016ad4/tensor2tensor/rl/gym_utils.py#L273-L305
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl import logging
import gym
import gym.wrappers
import numpy as np
from PIL import Image
class StickyActionEnv(gym.Wrapper):
def __init__(self, env, p=0.25):
gym.Wrapper.__init__(self, env)
self.p = p
self.last_action = 0
def step(self, action):
if np.random.uniform() < self.p:
action = self.last_action
self.last_action = action
obs, reward, done, info = self.env.step(action)
return obs, reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
gym.Wrapper.__init__(self, env)
observation_space = env.observation_space
self._obs_buffer = np.zeros(
(2,) + observation_space.shape, dtype=observation_space.dtype)
self._skip = skip
def __str__(self):
return "MaxAndSkip<%s>" % str(self.env)
def step(self, action):
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2:
self._obs_buffer[0] = obs
if i == self._skip - 1:
self._obs_buffer[1] = obs
total_reward += reward
if done:
break
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ActionDiscretizeWrapper(gym.ActionWrapper):
def __init__(self, env, num_actions):
if not isinstance(env.action_space, gym.spaces.box.Box):
raise ValueError(
"The action space is {}, but gym.spaces.box.Box is expected".format(
env.action_space))
gym.Wrapper.__init__(self, env)
self._num_actions = np.broadcast_to(num_actions, env.action_space.shape)
if env.action_space.shape != self._num_actions.shape:
raise ValueError("Spec {} and limit shape do not match. Got {}".format(
env.action_space.shape, self._num_actions.shape))
self.action_space = gym.spaces.MultiDiscrete(nvec=self._num_actions)
self._action_map = self._discretize_env(env)
def _discretize_env(self, env):
if not np.all(self._num_actions >= 2):
raise ValueError("num_actions should all be at least size 2.")
if (math.isinf(np.min(env.action_space.low)) or
math.isinf(np.max(env.action_space.high))):
raise ValueError(
"""Minimum of boxes is {} and maximum of boxes is {},
but we expect that finite values are provided.""".
format(np.min(env.action_space.low),
np.max(env.action_space.high)))
limits = np.broadcast_to(self._num_actions,
env.action_space.shape)
minimum = np.broadcast_to(np.min(env.action_space.low),
env.action_space.shape)
maximum = np.broadcast_to(np.max(env.action_space.high),
env.action_space.shape)
action_map = [
np.linspace(env_min, env_max, num=n_actions)
for env_min, env_max, n_actions in zip(
np.nditer(minimum), np.nditer(maximum), np.nditer(limits))
]
return action_map
def _map_actions(self, action):
action = np.asarray(action)
if action.shape != self.action_space.shape:
raise ValueError(
"Received action with incorrect shape. Got {}, expected {}".format(
action.shape, self.action_space.shape))
mapped_action = [self._action_map[i][a]
for i, a in enumerate(action.flatten())]
return np.reshape(mapped_action, newshape=action.shape)
def action(self, action):
return self._map_actions(action)
def reverse_action(self, action):
raise NotImplementedError
class RenderedEnv(gym.Wrapper):
def __init__(self,
env,
mode="rgb_array",
low=0,
high=255,
resize_to=None,
output_dtype=None):
gym.Wrapper.__init__(self, env)
self.mode = mode
sample_frame = self.render(mode=self.mode)
assert sample_frame is not None
self.should_resize = False
self.output_dtype = output_dtype
if resize_to is None:
self.observation_space = gym.spaces.Box(
low=low,
high=high,
shape=sample_frame.shape,
dtype=sample_frame.dtype)
else:
assert len(resize_to) == 2
self.should_resize = True
num_channels = sample_frame.shape[-1]
self.observation_space = gym.spaces.Box(
low=low,
high=high,
shape=list(resize_to) + [num_channels],
dtype=sample_frame.dtype)
def _maybe_resize(self, obs):
if not self.should_resize:
return obs
height, width = self.observation_space.shape[:2]
img = Image.fromarray(obs)
img = img.resize([width, height], resample=Image.ANTIALIAS)
if self.output_dtype is None:
return np.array(img)
return np.array(img).astype(self.output_dtype)
def step(self, action):
_, reward, done, info = self.env.step(action)
obs = self._maybe_resize(self.env.render(mode=self.mode))
return obs, reward, done, info
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs = self._maybe_resize(self.env.render(mode=self.mode))
return obs
def remove_time_limit_wrapper(env):
if isinstance(env, gym.wrappers.TimeLimit):
env = env.env
env_ = env
while isinstance(env_, gym.Wrapper):
if isinstance(env_, gym.wrappers.TimeLimit):
raise ValueError("Can remove only top-level TimeLimit gym.Wrapper.")
env_ = env_.env
return env
|
Apache License 2.0
|
pyrustic/dresscode
|
dresscode/app.py
|
App.home
|
python
|
def home(self):
return self._home
|
Return the PID of the home page
|
https://github.com/pyrustic/dresscode/blob/4d741be5177ada4efcec4942913366a081128087/dresscode/app.py#L75-L77
|
import tkinter as tk
from viewable import Viewable
from pyrustic.app import App as PyrusticApp
from cyberpunk_theme import Cyberpunk
from megawidget.scrollbox import Scrollbox
class App:
def __init__(self, title=None, width=900, height=550,
home=None, scrollbar="vertical",
theme=Cyberpunk(), on_exit=None):
self._title = title
self._width = width
self._height = height
self._home = home
self._scrollbar = scrollbar
self._theme = theme
self._on_exit = on_exit
self._pages = {}
self._opened_page = None
self._caching = False
self._cache = {}
self._menubar = None
self._main_view = None
self._menu_map = {}
self._pids = []
self._pids_count = 0
self._pyrustic_app = PyrusticApp()
self._root = self._pyrustic_app.root
self._setup()
@property
def title(self):
return self._title
@title.setter
def title(self, val):
self._title = val
@property
def width(self):
return self._width
@width.setter
def width(self, val):
self._width = val
@property
def height(self):
return self._height
@height.setter
def height(self, val):
self._height = val
@property
|
MIT License
|
yaksok/yaksok
|
yaksok/yacc.py
|
p_function_description_item_identifier
|
python
|
def p_function_description_item_identifier(t):
t[0] = [('IDENTIFIER', t[1])]
|
function_description_item : IDENTIFIER
|
https://github.com/yaksok/yaksok/blob/73f14863d04f054eef2926f25a091f72f60352a5/yaksok/yacc.py#L117-L119
|
import ast
import logging
from .lex import tokens, IndentLexer
from .ast_tool import transform
target_language = 'python'
def set_target(lang):
global target_language
target_language = lang
precedence = (
("nonassoc", "EQ", "GT", "LT", "NE", "GTEQ", "LTEQ"),
)
binop_cls = {
'+': ast.Add,
'-': ast.Sub,
'%': ast.Mod,
'*': ast.Mult,
'/': ast.Div,
'>': ast.Gt,
'<': ast.Lt,
'>=': ast.GtE,
'<=': ast.LtE,
'=': ast.Eq,
'!=': ast.NotEq,
}
_gen_sym_idx = 0
def gen_sym():
global _gen_sym_idx
_gen_sym_idx += 1
return '____{}gs____gs'.format(_gen_sym_idx)
def flatten(l):
def flatten_iter(l):
if isinstance(l, list):
for x in l:
for y in flatten_iter(x):
yield y
else:
yield l
return list(flatten_iter(l))
assert flatten([[1,2,[3,4],5],[6,7]]) == [1,2,3,4,5,6,7]
errors = []
def report_error(t, msg):
try:
errors.append((t.lineno, msg))
except:
errors.append((-1, msg))
def p_file_input_end(t):
t[0] = t[1]
def p_file_input(t):
if isinstance(t[len(t)-1], str):
if len(t) == 3:
t[0] = t[1]
else:
t[0] = []
else:
if len(t) == 3:
t[0] = flatten(t[1]+[t[2]])
else:
t[0] = flatten([t[1]])
def p_stmts(t):
if len(t) == 3:
t[0] = flatten(t[1] + [t[2]])
else:
t[0] = flatten([t[1]])
def p_suite(t):
if len(t) == 2:
t[0] = [t[1]]
else:
t[0] = t[3]
def p_defun_str(t):
if '\n' in t[1]:
report_error(t, "ꡬ문 μ€λ₯μ
λλ€.")
report_error(t, "\tμ€λ°κΏμ΄ λ€μ΄κ° μ½μμ λ§λ€ μ μμ΅λλ€.")
raise SyntaxError
t[0] = t[1]
def p_function_description_item_ws(t):
t[0] = [('WS', t[1])]
|
BSD 3-Clause New or Revised License
|
ghosttext/ghosttext-for-sublimetext
|
WebSocket/WebSocketServer.py
|
WebSocketServer.start
|
python
|
def start(self):
print('Start')
self._socket.listen(1)
self._port = self._socket.getsockname()[1]
print('Listening on: {}'.format(self._port))
self._running = True
self._conn, self._address = self._socket.accept()
data = self._recv_all()
self._conn.sendall(self._handshake.perform(data).encode("utf-8"))
while self._running:
try:
header = self._conn.recv(24)
except OSError:
self._running = False
continue
if len(data) > 0:
self._frame = Frame()
try:
self._frame.parse(header)
except IndexError:
print(str(e))
print(traceback.format_exc())
self._running = False
continue
if self._frame.terminate:
self._close_frame_received = True
if not self._close_frame_send:
self._conn.send(self._frame.close())
self._close_frame_send = True
self._running = False
continue
data = bytearray()
data.extend(header)
offset = self._frame.get_payload_offset()
if offset > 0:
try:
for offset_part in range(0, ceil(offset/4096)):
data.extend(self._conn.recv(4096))
except MemoryError as e:
print(str(e))
print(traceback.format_exc())
continue
if self._frame.utf8:
try:
request = self._frame.get_payload(data).decode("utf-8")
self._received_payload += request.lstrip('\x00')
except UnicodeDecodeError:
continue
if self._frame.utf8 and self._frame.fin:
self._on_message_handler.on_message(self._received_payload)
self._received_payload = ''
print('Stop')
self.stop()
|
Starts the server,
|
https://github.com/ghosttext/ghosttext-for-sublimetext/blob/fb094e4899dd9a091afa677f909400c2fdc450f4/WebSocket/WebSocketServer.py#L37-L102
|
import socket
import traceback
from math import ceil
from .Frame import Frame
from .Handshake import Handshake
class WebSocketServer:
_id = 1
def __init__(self, host='localhost', port=0):
self._handshake = Handshake()
self._frame = Frame()
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind((host, port))
self._on_message_handler = None
self._on_close_handler = None
self._running = False
self._conn = None
self._address = None
self._port = 0
self._close_frame_send = False
self._close_frame_received = False
self._received_payload = ''
self._id = WebSocketServer._id
WebSocketServer._id += 1
print("WebSocketServer id: {}".format(self._id))
|
MIT License
|
sage-bionetworks/schematic
|
schematic/schemas/explorer.py
|
SchemaExplorer.generate_class_template
|
python
|
def generate_class_template(self):
template = {
"@id": "uri or curie of the class",
"@type": "rdfs:Class",
"rdfs:comment": "description of the class",
"rdfs:label": "class label, should match @id",
"rdfs:subClassOf": {"@id": "parent class, could be list"},
"schema:isPartOf": {"@id": "http://schema.biothings.io"},
}
return template
|
Generate a template for schema class
|
https://github.com/sage-bionetworks/schematic/blob/33938f9a0786c0bb3b01557c0ba3a46be58c7a51/schematic/schemas/explorer.py#L523-L533
|
import os
import string
import json
import logging
from typing import Any, Dict, Optional, Text, List
import inflection
import networkx as nx
from rdflib import Graph, Namespace, plugin, query
from networkx.algorithms.cycles import find_cycle
from networkx.readwrite import json_graph
from schematic.utils.curie_utils import (
expand_curies_in_schema,
uri2label,
extract_name_from_uri_or_curie,
)
from schematic.utils.general import find_duplicates
from schematic.utils.io_utils import load_default, load_json, load_schemaorg
from schematic.utils.schema_utils import (
load_schema_into_networkx,
node_attrs_cleanup,
class_to_node,
relationship_edges,
)
from schematic.utils.general import dict2list, unlist
from schematic.utils.viz_utils import visualize
from schematic.utils.validate_utils import (
validate_class_schema,
validate_property_schema,
validate_schema,
)
from schematic.schemas.curie import uri2curie, curie2uri
namespaces = dict(rdf=Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#"))
logger = logging.getLogger(__name__)
class SchemaExplorer:
def __init__(self):
self.load_default_schema()
def load_schema(self, schema):
self.schema = load_json(schema)
self.schema_nx = load_schema_into_networkx(self.schema)
def export_schema(self, file_path):
with open(file_path, "w") as f:
json.dump(self.schema, f, sort_keys=True, indent=4, ensure_ascii=False)
def load_default_schema(self):
self.schema = load_default()
self.schema_nx = load_schema_into_networkx(self.schema)
def get_nx_schema(self):
return self.schema_nx
def get_edges_by_relationship(
self, class_label: str, relationship: str
) -> List[str]:
edges = []
mm_graph = self.get_nx_schema()
for (u, v, key, c) in mm_graph.out_edges(node, data=True, keys=True):
if key == relationship:
edges.append((u, v))
return edges
def get_descendants_by_edge_type(
self,
source_node: str,
relationship: str,
connected: bool = True,
ordered: bool = False
) -> List[str]:
mm_graph = self.get_nx_schema()
root_descendants = nx.descendants(mm_graph, source_node)
subgraph_nodes = list(root_descendants)
subgraph_nodes.append(source_node)
descendants_subgraph = mm_graph.subgraph(subgraph_nodes)
rel_edges = []
for (u, v, key, c) in descendants_subgraph.edges(data=True, keys=True):
if key == relationship:
rel_edges.append((u, v))
relationship_subgraph = nx.DiGraph()
relationship_subgraph.add_edges_from(rel_edges)
descendants = relationship_subgraph.nodes()
if not descendants:
return []
if connected and ordered:
descendants = nx.descendants(relationship_subgraph, source_node)
descendants.add(source_node)
descendants = nx.topological_sort(
relationship_subgraph.subgraph(descendants)
)
elif connected:
descendants = nx.descendants(relationship_subgraph, source_node)
descendants.add(source_node)
elif ordered:
descendants = nx.topological_sort(relationship_subgraph)
return list(descendants)
def get_adjacent_nodes_by_relationship(
self, node: str, relationship: str
) -> List[str]:
nodes = set()
mm_graph = self.get_nx_schema()
for (u, v, key, c) in mm_graph.out_edges(node, data=True, keys=True):
if key == relationship:
nodes.add(v)
return list(nodes)
def is_class_in_schema(self, class_label):
if self.schema_nx.nodes[class_label]:
return True
else:
return False
def full_schema_graph(self, size=None):
edges = self.schema_nx.edges()
return visualize(edges, size=size)
def sub_schema_graph(self, source, direction, size=None):
if direction == "down":
edges = list(nx.edge_bfs(self.schema_nx, [source]))
return visualize(edges, size=size)
elif direction == "up":
paths = self.find_parent_classes(source)
edges = []
for _path in paths:
_path.append(source)
for i in range(0, len(_path) - 1):
edges.append((_path[i], _path[i + 1]))
return visualize(edges, size=size)
elif direction == "both":
paths = self.find_parent_classes(source)
edges = list(nx.edge_bfs(self.schema_nx, [source]))
for _path in paths:
_path.append(source)
for i in range(0, len(_path) - 1):
edges.append((_path[i], _path[i + 1]))
return visualize(edges, size=size)
def find_parent_classes(self, schema_class):
digraph = self.get_digraph_by_edge_type("parentOf")
root_node = list(nx.topological_sort(digraph))[0]
paths = nx.all_simple_paths(
self.schema_nx, source=root_node, target=schema_class
)
return [_path[:-1] for _path in paths]
def find_class_specific_properties(self, schema_class):
schema_uri = self.schema_nx.nodes[schema_class]["uri"]
properties = []
for record in self.schema["@graph"]:
if record["@type"] == "rdf:Property":
if (
type(record["schema:domainIncludes"]) == dict
and record["schema:domainIncludes"]["@id"] == schema_uri
):
properties.append(record["rdfs:label"])
elif (
type(record["schema:domainIncludes"]) == list
and [
item
for item in record["schema:domainIncludes"]
if item["@id"] == schema_uri
]
!= []
):
properties.append(record["rdfs:label"])
return properties
def find_all_class_properties(self, schema_class, display_as_table=False):
parents = self.find_parent_classes(schema_class)
properties = [
{
"class": schema_class,
"properties": self.find_class_specific_properties(schema_class),
}
]
for path in parents:
path.reverse()
for _parent in path:
properties.append(
{
"class": _parent,
"properties": self.find_class_specific_properties(_parent),
}
)
if not display_as_table:
return properties
else:
content = [["Property", "Expected Type", "Description", "Class"]]
for record in properties:
for _property in record["properties"]:
property_info = self.explore_property(_property)
if "range" in property_info:
content.append(
[
_property,
property_info["range"],
property_info["description"],
record["class"],
]
)
else:
content.append(
[_property, property_info["description"], record["class"]]
)
def find_class_usages(self, schema_class):
usages = []
schema_uri = self.schema_nx.nodes[schema_class]["uri"]
for record in self.schema["@graph"]:
usage = {}
if record["@type"] == "rdf:Property":
if "schema:rangeIncludes" in record:
p_range = dict2list(record["schema:rangeIncludes"])
for _doc in p_range:
if _doc["@id"] == schema_uri:
usage["property"] = record["rdfs:label"]
p_domain = dict2list(record["schema:domainIncludes"])
usage["property_used_on_class"] = unlist(
[self.uri2label(record["@id"]) for record in p_domain]
)
usage["description"] = record["rdfs:comment"]
if usage:
usages.append(usage)
return usages
def find_child_classes(self, schema_class):
return unlist(list(self.schema_nx.successors(schema_class)))
def find_adjacent_child_classes(self, schema_class):
return self.get_adjacent_nodes_by_relationship(schema_class, "parentOf")
def explore_class(self, schema_class):
parents = []
if "subClassOf" in self.schema_nx.nodes[schema_class]:
schema_node_val = self.schema_nx.nodes[schema_class]["subClassOf"]
parents_list = []
if isinstance(schema_node_val, dict):
parents_list.append(self.schema_nx.nodes[schema_class]["subClassOf"])
else:
parents_list = schema_node_val
for parent in parents_list:
parents.append(extract_name_from_uri_or_curie(parent["@id"]))
requires_range = []
if "rangeIncludes" in self.schema_nx.nodes[schema_class]:
schema_node_val = self.schema_nx.nodes[schema_class]["rangeIncludes"]
if isinstance(schema_node_val, dict):
subclass_list = []
subclass_list.append(
self.schema_nx.nodes[schema_class]["rangeIncludes"]
)
else:
subclass_list = schema_node_val
for range_class in subclass_list:
requires_range.append(
extract_name_from_uri_or_curie(range_class["@id"])
)
requires_dependencies = []
if "requiresDependency" in self.schema_nx.nodes[schema_class]:
schema_node_val = self.schema_nx.nodes[schema_class]["requiresDependency"]
if isinstance(schema_node_val, dict):
subclass_list = []
subclass_list.append(
self.schema_nx.nodes[schema_class]["requiresDependency"]
)
else:
subclass_list = schema_node_val
for dep_class in subclass_list:
requires_dependencies.append(
extract_name_from_uri_or_curie(dep_class["@id"])
)
requires_components = []
if "requiresComponent" in self.schema_nx.nodes[schema_class]:
schema_node_val = self.schema_nx.nodes[schema_class]["requiresComponent"]
if isinstance(schema_node_val, dict):
subclass_list = []
subclass_list.append(
self.schema_nx.nodes[schema_class]["requiresComponent"]
)
else:
subclass_list = schema_node_val
for comp_dep_class in subclass_list:
requires_components.append(
extract_name_from_uri_or_curie(comp_dep_class["@id"])
)
required = False
if "required" in self.schema_nx.nodes[schema_class]:
required = self.schema_nx.nodes[schema_class]["required"]
validation_rules = []
if "validationRules" in self.schema_nx.nodes[schema_class]:
validation_rules = self.schema_nx.nodes[schema_class]["validationRules"]
class_info = {
"properties": self.find_class_specific_properties(schema_class),
"description": self.schema_nx.nodes[schema_class]["description"],
"uri": curie2uri(self.schema_nx.nodes[schema_class]["uri"], namespaces),
"usage": "NA",
"child_classes": self.find_adjacent_child_classes(schema_class),
"subClassOf": parents,
"range": requires_range,
"dependencies": requires_dependencies,
"validation_rules": validation_rules,
"required": required,
"component_dependencies": requires_components,
"parent_classes": parents
}
if "displayName" in self.schema_nx.nodes[schema_class]:
class_info["displayName"] = self.schema_nx.nodes[schema_class][
"displayName"
]
return class_info
def get_property_label_from_display_name(self, display_name):
display_name = display_name.translate({ord(c): None for c in string.whitespace})
label = inflection.camelize(display_name.strip(), uppercase_first_letter=False)
return label
def get_class_label_from_display_name(self, display_name):
display_name = display_name.translate({ord(c): None for c in string.whitespace})
label = inflection.camelize(display_name.strip(), uppercase_first_letter=True)
return label
def get_class_by_property(self, property_display_name):
schema_property = self.get_property_label_from_display_name(
property_display_name
)
for record in self.schema["@graph"]:
if record["@type"] == "rdf:Property":
if record["rdfs:label"] == schema_property:
p_domain = dict2list(record["schema:domainIncludes"])
return unlist(
[
self.uri2label(schema_class["@id"])
for schema_class in p_domain
]
)
return None
def uri2label(self, uri):
return uri.split(":")[1]
def explore_property(self, schema_property):
property_info = {}
for record in self.schema["@graph"]:
if record["@type"] == "rdf:Property":
if record["rdfs:label"] == schema_property:
property_info["id"] = record["rdfs:label"]
property_info["description"] = record["rdfs:comment"]
property_info["uri"] = curie2uri(record["@id"], namespaces)
p_domain = dict2list(record["schema:domainIncludes"])
property_info["domain"] = unlist(
[self.uri2label(record["@id"]) for record in p_domain]
)
if "schema:rangeIncludes" in record:
p_range = dict2list(record["schema:rangeIncludes"])
property_info["range"] = [
self.uri2label(record["@id"]) for record in p_range
]
else:
property_info["range"] = []
if "sms:required" in record:
if "sms:true" == record["sms:required"]:
property_info["required"] = True
else:
property_info["required"] = False
validation_rules = []
if "sms:validationRules" in record:
property_info["validation_rules"] = record[
"sms:validationRules"
]
if "sms:requiresDependency" in record:
p_dependencies = dict2list(record["sms:requiresDependency"])
property_info["dependencies"] = [
self.uri2label(record["@id"]) for record in p_dependencies
]
else:
property_info["dependencies"] = []
if "sms:displayName" in record:
property_info["displayName"] = record["sms:displayName"]
break
return property_info
|
MIT License
|
fatescript/nnprof
|
nnprof/info_table.py
|
InfoTable.average
|
python
|
def average(self, average_key="hits"):
hits = self.info[average_key]
for i, header in enumerate(self.headers):
if header.endswith("time") or header.endswith("mem"):
self.info[header + "_avg"] = self.info[header] / hits
self.headers[i] += "_avg"
del self.info[header]
|
average table with average key
Args:
average_key:
|
https://github.com/fatescript/nnprof/blob/cc446055a0f9d9db0a7d9690cdba80ce6ca21d45/nnprof/info_table.py#L88-L100
|
import numpy as np
import tabulate
from torch.autograd.profiler import format_time
try:
from torch.autograd.profiler import format_memory
except ImportError:
from .utils import format_memory
class InfoTable:
def __init__(self, headers, data, with_percent=False):
assert len(headers) == len(data), "length of headers and data are not matched"
self.headers = headers
self.info = {key: value for key, value in zip(headers, data)}
self.with_percent = with_percent
def insert(self, header, data, position=-1):
def swap(a, b):
a, b = b, a
self.info[header] = data
if header in self.headers:
index = self.headers.index(header)
swap(self.headers[index], self.headers[position])
else:
self.headers.insert(position, header)
def sorted_by(self, keyname=None, descending=True):
if keyname is None:
return self
if keyname not in self.info:
keyname += "_avg"
assert keyname in self.info
sort_index = np.argsort(self.info[keyname], axis=0).reshape(-1)
if descending:
sort_index = sort_index[::-1]
for header in self.headers:
self.info[header] = self.info[header][sort_index]
return self
def filter(self, filter_list=None):
self.headers = [header for header in self.headers if header not in filter_list]
def filter_zeros(self):
filter_list = []
for header in self.headers:
data = self.info[header]
if "float" in data.dtype.name or "int" in data.dtype.name:
if data.sum() == 0:
filter_list.append(header)
self.filter(filter_list)
|
Apache License 2.0
|
rapid7/vm-console-client-python
|
rapid7vmconsole/models/pci.py
|
PCI.special_notes
|
python
|
def special_notes(self):
return self._special_notes
|
Gets the special_notes of this PCI. # noqa: E501
Any special notes or remarks about the vulnerability that pertain to PCI compliance. # noqa: E501
:return: The special_notes of this PCI. # noqa: E501
:rtype: str
|
https://github.com/rapid7/vm-console-client-python/blob/55e1f573967bce27cc9a2d10c12a949b1142c2b3/rapid7vmconsole/models/pci.py#L138-L146
|
import pprint
import re
import six
class PCI(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'adjusted_cvss_score': 'int',
'adjusted_severity_score': 'int',
'fail': 'bool',
'special_notes': 'str',
'status': 'str'
}
attribute_map = {
'adjusted_cvss_score': 'adjustedCVSSScore',
'adjusted_severity_score': 'adjustedSeverityScore',
'fail': 'fail',
'special_notes': 'specialNotes',
'status': 'status'
}
def __init__(self, adjusted_cvss_score=None, adjusted_severity_score=None, fail=None, special_notes=None, status=None):
self._adjusted_cvss_score = None
self._adjusted_severity_score = None
self._fail = None
self._special_notes = None
self._status = None
self.discriminator = None
if adjusted_cvss_score is not None:
self.adjusted_cvss_score = adjusted_cvss_score
if adjusted_severity_score is not None:
self.adjusted_severity_score = adjusted_severity_score
if fail is not None:
self.fail = fail
if special_notes is not None:
self.special_notes = special_notes
if status is not None:
self.status = status
@property
def adjusted_cvss_score(self):
return self._adjusted_cvss_score
@adjusted_cvss_score.setter
def adjusted_cvss_score(self, adjusted_cvss_score):
self._adjusted_cvss_score = adjusted_cvss_score
@property
def adjusted_severity_score(self):
return self._adjusted_severity_score
@adjusted_severity_score.setter
def adjusted_severity_score(self, adjusted_severity_score):
self._adjusted_severity_score = adjusted_severity_score
@property
def fail(self):
return self._fail
@fail.setter
def fail(self, fail):
self._fail = fail
@property
|
MIT License
|
robotcaresystems/roboticslanguage
|
RoboticsLanguage/Outputs/Developer/Output.py
|
prepareGroups
|
python
|
def prepareGroups(parameters):
groups = {}
for keyword, value in parameters['language'].iteritems():
group, module = tuple(value['package'].split(':'))
if not Utilities.isDefined(groups, '/' + group + '/' + module):
dpath.util.new(groups, '/' + group + '/' + module, [])
groups[group][module].append(keyword)
return groups
|
group keyword by package
|
https://github.com/robotcaresystems/roboticslanguage/blob/3bb7a2bf64ab8e9068889713fbeb18a45cd5a3ed/RoboticsLanguage/Outputs/Developer/Output.py#L41-L52
|
import os
import re
import sys
import subprocess
import dpath.util
from RoboticsLanguage.Base import Utilities
from RoboticsLanguage.Tools import Templates
ignored_files = ['.DS_Store']
include_template = """
{{% set {} %}}
{{% endset %}}
"""
@Utilities.cache_in_disk
|
Apache License 2.0
|
bitmovin/bitmovin-api-sdk-python
|
bitmovin_api_sdk/encoding/manifests/dash/periods/adaptationsets/representations/sprite/sprite_api.py
|
SpriteApi.list
|
python
|
def list(self, manifest_id, period_id, adaptationset_id, query_params=None, **kwargs):
return self.api_client.get(
'/encoding/manifests/dash/{manifest_id}/periods/{period_id}/adaptationsets/{adaptationset_id}/representations/sprite',
path_params={'manifest_id': manifest_id, 'period_id': period_id, 'adaptationset_id': adaptationset_id},
query_params=query_params,
pagination_response=True,
type=SpriteRepresentation,
**kwargs
)
|
List all Sprite Representations
:param manifest_id: Id of the manifest
:type manifest_id: string_types, required
:param period_id: Id of the period
:type period_id: string_types, required
:param adaptationset_id: Id of the adaptation set
:type adaptationset_id: string_types, required
:param query_params: Query parameters
:type query_params: SpriteRepresentationListQueryParams
:return: List of Sprite Representations
:rtype: SpriteRepresentation
|
https://github.com/bitmovin/bitmovin-api-sdk-python/blob/79dd938804197151af7cbe5501c7ec1d97872c15/bitmovin_api_sdk/encoding/manifests/dash/periods/adaptationsets/representations/sprite/sprite_api.py#L96-L119
|
from __future__ import absolute_import
from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase
from bitmovin_api_sdk.common.poscheck import poscheck_except
from bitmovin_api_sdk.models.bitmovin_response import BitmovinResponse
from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope
from bitmovin_api_sdk.models.response_error import ResponseError
from bitmovin_api_sdk.models.sprite_representation import SpriteRepresentation
from bitmovin_api_sdk.encoding.manifests.dash.periods.adaptationsets.representations.sprite.sprite_representation_list_query_params import SpriteRepresentationListQueryParams
class SpriteApi(BaseApi):
@poscheck_except(2)
def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None):
super(SpriteApi, self).__init__(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
def create(self, manifest_id, period_id, adaptationset_id, sprite_representation, **kwargs):
return self.api_client.post(
'/encoding/manifests/dash/{manifest_id}/periods/{period_id}/adaptationsets/{adaptationset_id}/representations/sprite',
sprite_representation,
path_params={'manifest_id': manifest_id, 'period_id': period_id, 'adaptationset_id': adaptationset_id},
type=SpriteRepresentation,
**kwargs
)
def delete(self, manifest_id, period_id, adaptationset_id, representation_id, **kwargs):
return self.api_client.delete(
'/encoding/manifests/dash/{manifest_id}/periods/{period_id}/adaptationsets/{adaptationset_id}/representations/sprite/{representation_id}',
path_params={'manifest_id': manifest_id, 'period_id': period_id, 'adaptationset_id': adaptationset_id, 'representation_id': representation_id},
type=BitmovinResponse,
**kwargs
)
def get(self, manifest_id, period_id, adaptationset_id, representation_id, **kwargs):
return self.api_client.get(
'/encoding/manifests/dash/{manifest_id}/periods/{period_id}/adaptationsets/{adaptationset_id}/representations/sprite/{representation_id}',
path_params={'manifest_id': manifest_id, 'period_id': period_id, 'adaptationset_id': adaptationset_id, 'representation_id': representation_id},
type=SpriteRepresentation,
**kwargs
)
|
MIT License
|
worstcase/blockade
|
blockade/chaos.py
|
BlockadeChaos._sm_cleanup
|
python
|
def _sm_cleanup(self, *args, **kwargs):
if self._done_notification_func is not None:
self._done_notification_func()
self._timer.cancel()
|
Delete all state associated with the chaos session
|
https://github.com/worstcase/blockade/blob/2e9cd5676987a80d4f758dda39d15de4ff83bd96/blockade/chaos.py#L333-L339
|
import logging
import random
import threading
from blockade import errors
from blockade import state_machine
_logger = logging.getLogger(__name__)
def _flaky(blockade, targets, all_containers):
target_names = [t.name for t in targets]
_logger.info(
"Chaos making the network drop packets for %s" % str(target_names))
blockade.flaky(target_names)
def _partition(blockade, targets, all_containers):
parts = []
for t in targets:
parts.append([t.name])
target_names = [t.name for t in targets]
_logger.info("Putting %s in their own partitions: %s"
% (str(target_names), str(parts)))
blockade.partition(parts)
def _slow(blockade, targets, all_containers):
target_names = [t.name for t in targets]
_logger.info("Chaos making the network slow for %s" % str(target_names))
blockade.slow(target_names)
def _duplicate(blockade, targets, all_containers):
target_names = [t.name for t in targets]
_logger.info("Chaos adding duplicate packets for %s" % str(target_names))
blockade.duplicate(target_names)
def _stop(blockade, targets, all_containers):
target_names = [t.name for t in targets]
_logger.info("Chaos stopping %s" % str(target_names))
blockade.stop(target_names)
_g_blockade_event_handlers = {
'PARTITION': _partition,
'STOP': _stop,
'FLAKY': _flaky,
'SLOW': _slow,
'DUPLICATE': _duplicate,
}
def get_all_event_names():
return list(_g_blockade_event_handlers.keys())
class ChaosStates(object):
NEW = "NEW"
HEALTHY = "HEALTHY"
DEGRADED = "DEGRADED"
STOPPED = "STOPPED"
FAILED_WHILE_HEALTHY = "FAILED_WHILE_HEALTHY"
FAILED_WHILE_DEGRADED = "FAILED_WHILE_DEGRADED"
DONE = "DONE"
class ChaosEvents(object):
START = "START"
STOP = "STOP"
TIMER = "TIMER"
DELETE = "DELETE"
class BlockadeChaos(object):
def __init__(self, blockade, blockade_name,
min_start_delay, max_start_delay,
min_run_time, max_run_time,
min_containers_at_once, max_containers_at_once,
event_set,
done_notification_func=None):
valid_events = get_all_event_names()
if event_set is None:
event_set = valid_events
else:
for e in event_set:
if e not in valid_events:
raise errors.BlockadeUsageError(
"%s is an unknown event." % e)
self._blockade = blockade
self._blockade_name = blockade_name
self._start_min_delay = min_start_delay
self._start_max_delay = max_start_delay
self._run_min_time = min_run_time
self._run_max_time = max_run_time
self._min_containers_at_once = min_containers_at_once
self._max_containers_at_once = max_containers_at_once
self._chaos_events = event_set[:]
self._done_notification_func = done_notification_func
self._timer = None
self._mutex = threading.Lock()
self._create_state_machine()
self._mutex.acquire()
try:
self._sm.event_occurred(ChaosEvents.START)
finally:
self._mutex.release()
def change_events(self,
min_start_delay=None, max_start_delay=None,
min_run_time=None, max_run_time=None,
min_containers_at_once=1,
max_containers_at_once=1,
event_set=None):
self._mutex.acquire()
try:
if min_start_delay is not None:
self._start_min_delay = min_start_delay
if max_start_delay is not None:
self._start_max_delay = max_start_delay
if min_run_time is not None:
self._run_min_time = min_run_time
if max_run_time is not None:
self._run_max_time = max_run_time
if min_containers_at_once is not None:
self._min_containers_at_once = min_containers_at_once
if max_containers_at_once is not None:
self._max_containers_at_once = max_containers_at_once
if event_set is not None:
self._chaos_events = event_set
finally:
self._mutex.release()
def _do_reset_all(self):
container_list = self._blockade.status()
container_names = [t.name for t in container_list]
self._blockade.start(container_names)
self._blockade.fast(container_names)
self._blockade.join()
def _do_blockade_event(self):
container_list = self._blockade.status()
random.shuffle(container_list)
count = random.randint(self._min_containers_at_once,
self._max_containers_at_once)
targets = container_list[:count]
partition_list = []
for t in targets:
e = random.choice(self._chaos_events)
if e == 'PARTITION':
partition_list.append(t)
else:
_g_blockade_event_handlers[e](
self._blockade, [t], container_list)
if len(partition_list) > 0:
_partition(self._blockade, partition_list, container_list)
def print_state_machine(self):
self._sm.draw_mapping()
def _create_state_machine(self):
self._sm = state_machine.StateMachine(ChaosStates.NEW)
self._sm.add_transition(
ChaosStates.NEW, ChaosEvents.START, ChaosStates.HEALTHY,
self._sm_start, ChaosStates.FAILED_WHILE_HEALTHY)
self._sm.add_transition(
ChaosStates.HEALTHY, ChaosEvents.TIMER,
ChaosStates.DEGRADED,
self._sm_to_pain, ChaosStates.FAILED_WHILE_HEALTHY,
error_trans_func=self._sm_panic_handler_stop_timer)
self._sm.add_transition(
ChaosStates.HEALTHY, ChaosEvents.STOP,
ChaosStates.STOPPED,
self._sm_stop_from_no_pain, ChaosStates.FAILED_WHILE_HEALTHY,
error_trans_func=self._sm_panic_handler_stop_timer)
self._sm.add_transition(
ChaosStates.DEGRADED, ChaosEvents.TIMER,
ChaosStates.HEALTHY,
self._sm_relieve_pain, ChaosStates.FAILED_WHILE_DEGRADED,
error_trans_func=self._sm_panic_handler_stop_timer)
self._sm.add_transition(
ChaosStates.DEGRADED, ChaosEvents.STOP,
ChaosStates.STOPPED,
self._sm_stop_from_pain, ChaosStates.FAILED_WHILE_DEGRADED,
error_trans_func=self._sm_panic_handler_stop_timer)
self._sm.add_transition(
ChaosStates.STOPPED, ChaosEvents.START,
ChaosStates.HEALTHY,
self._sm_start, ChaosStates.FAILED_WHILE_HEALTHY)
self._sm.add_transition(
ChaosStates.STOPPED, ChaosEvents.DELETE,
ChaosStates.DONE,
self._sm_cleanup, ChaosStates.FAILED_WHILE_HEALTHY)
self._sm.add_transition(
ChaosStates.STOPPED, ChaosEvents.TIMER,
ChaosStates.STOPPED,
self._sm_stale_timer, ChaosStates.STOPPED)
self._sm.add_transition(
ChaosStates.FAILED_WHILE_DEGRADED, ChaosEvents.TIMER,
ChaosStates.FAILED_WHILE_DEGRADED,
self._sm_stale_timer, ChaosStates.FAILED_WHILE_DEGRADED)
self._sm.add_transition(
ChaosStates.FAILED_WHILE_DEGRADED, ChaosEvents.DELETE,
ChaosStates.DONE,
self._sm_cleanup, ChaosStates.FAILED_WHILE_DEGRADED)
self._sm.add_transition(
ChaosStates.FAILED_WHILE_HEALTHY, ChaosEvents.TIMER,
ChaosStates.FAILED_WHILE_HEALTHY,
self._sm_stale_timer, ChaosStates.FAILED_WHILE_HEALTHY)
self._sm.add_transition(
ChaosStates.FAILED_WHILE_HEALTHY, ChaosEvents.DELETE,
ChaosStates.DONE,
self._sm_cleanup, ChaosStates.FAILED_WHILE_HEALTHY)
self._sm.add_transition(
ChaosStates.DONE, ChaosEvents.TIMER,
ChaosStates.DONE,
self._sm_stale_timer, ChaosStates.DONE)
def status(self):
self._mutex.acquire()
try:
return {"state": self._sm.get_state()}
finally:
self._mutex.release()
def event_timeout(self):
self._mutex.acquire()
try:
self._sm.event_occurred(ChaosEvents.TIMER)
finally:
self._mutex.release()
def start(self):
self._mutex.acquire()
try:
self._sm.event_occurred(ChaosEvents.START)
finally:
self._mutex.release()
def stop(self):
self._mutex.acquire()
try:
self._sm.event_occurred(ChaosEvents.STOP)
finally:
self._mutex.release()
def delete(self):
self._mutex.acquire()
try:
self._sm.event_occurred(ChaosEvents.DELETE)
finally:
self._mutex.release()
def _sm_start(self, *args, **kwargs):
millisec = random.randint(self._start_min_delay, self._start_max_delay)
self._timer = threading.Timer(millisec / 1000.0, self.event_timeout)
self._timer.start()
def _sm_to_pain(self, *args, **kwargs):
_logger.info("Starting chaos for blockade %s" % self._blockade_name)
self._do_blockade_event()
millisec = random.randint(self._run_min_time, self._run_max_time)
self._timer = threading.Timer(millisec / 1000.0, self.event_timeout)
self._timer.start()
def _sm_stop_from_no_pain(self, *args, **kwargs):
_logger.info("Stopping chaos for blockade %s" % self._blockade_name)
self._timer.cancel()
def _sm_relieve_pain(self, *args, **kwargs):
_logger.info(
"Ending the degradation for blockade %s" % self._blockade_name)
self._do_reset_all()
millisec = random.randint(self._start_min_delay, self._start_max_delay)
self._timer = threading.Timer(millisec/1000.0, self.event_timeout)
self._timer.start()
def _sm_stop_from_pain(self, *args, **kwargs):
_logger.info("Stopping chaos for blockade %s" % self._blockade_name)
self._do_reset_all()
|
Apache License 2.0
|
microsoft/qdk-python
|
azure-quantum/azure/quantum/target/microsoft/qio/tabu.py
|
Tabu.__init__
|
python
|
def __init__(
self,
workspace: Workspace,
name: str = "microsoft.tabu.cpu",
sweeps: Optional[int] = None,
tabu_tenure: Optional[int] = None,
timeout: Optional[int] = None,
seed: Optional[int] = None,
restarts: Optional[int] = None,
**kwargs
):
if sweeps is None and tabu_tenure is None and restarts is None:
name = "microsoft.tabu-parameterfree.cpu"
super().__init__(
workspace=workspace,
provider_id="Microsoft",
name=name,
input_data_format="microsoft.qio.v2",
output_data_format="microsoft.qio-results.v2",
**kwargs
)
self.set_one_param("sweeps", sweeps)
self.set_one_param("tabu_tenure", tabu_tenure)
self.set_one_param("timeout", timeout)
self.set_one_param("seed", seed)
self.set_one_param("restarts", restarts)
|
The constructor of an Tabu Search solver.
Multi-core Tabu Search solver for binary optimization problems
with k-local interactions on an all-to-all graph topology with double
precision support for the coupler weights.
This solver is CPU only.
:param sweeps:
specifies the number of sweeps.
:param tabu_tenure:
specifies the tabu tenure.
:param restarts:
specifies how many runs the solver will execute.
:param timeout:
specifies maximum number of seconds to run the core solver
loop. initialization time does not respect this value, so the
solver may run longer than the value specified.
:param seed:
specifies a random seed value.
|
https://github.com/microsoft/qdk-python/blob/d0a87fda57dc360c96d9ce9772b71406d9b29ebe/azure-quantum/azure/quantum/target/microsoft/qio/tabu.py#L19-L67
|
import logging
from typing import Optional
from azure.quantum.target.solvers import Solver
from azure.quantum.workspace import Workspace
logger = logging.getLogger(__name__)
class Tabu(Solver):
target_names = (
"microsoft.tabu.cpu",
"microsoft.tabu-parameterfree.cpu",
)
|
MIT License
|
xilinx/pyxir
|
python/pyxir/contrib/dpuv1/dpuv1_op_support.py
|
pooling_op_support
|
python
|
def pooling_op_support(X, bXs, tXs):
padding = X.attrs['padding']
return True
|
Check whether we can execute the provided Pooling operator
on the dpuv1 target
|
https://github.com/xilinx/pyxir/blob/bef661d6d77adcdbd2cf4163f2cf3a1d31d40406/python/pyxir/contrib/dpuv1/dpuv1_op_support.py#L167-L179
|
import math
import pyxir
import logging
logger = logging.getLogger('pyxir')
@pyxir.register_op_support_check('dpuv1', 'BatchNorm')
def batchnorm_op_support(X, bXs, tXs):
axis = X.attrs['axis']
channels = X.shapes[axis]
return channels >= 1 and channels <= 4096
@pyxir.register_op_support_check('dpuv1', 'BiasAdd')
def biasadd_op_support(X, bXs, tXs):
axis = X.attrs['axis']
channels = X.shapes[axis]
return channels >= 1 and channels <= 4096
@pyxir.register_op_support_check('dpuv1', 'Cast')
def cast_op_support(X, bXs, tXs):
dtype = X.attrs['dtype']
return dtype == 'float32'
@pyxir.register_op_support_check('dpuv1', 'Concat')
def concat_op_support(X, bXs, tXs):
axis = X.attrs['axis']
channels = X.shapes[axis]
return channels >= 1 and channels <= 4096
@pyxir.register_op_support_check('dpuv1', 'Convolution')
def conv2d_op_support(X, bXs, tXs):
data_layout = X.attrs['data_layout']
kernel_h, kernel_w = X.attrs['kernel_size']
stride_h, stride_w = X.attrs['strides']
dilation_h, dilation_w = X.attrs['dilation']
padding_h, padding_w = X.attrs['padding'][data_layout.index('H')], X.attrs['padding'][data_layout.index('H')]
padding_h_top, padding_h_bot = padding_h
padding_w_left, padding_w_right = padding_w
ch_in, ch_out = X.attrs['channels']
groups = X.attrs['groups']
return groups == 1 and kernel_h >= 1 and kernel_h <= 15 and kernel_w >= 1 and kernel_w <= 15 and stride_h in [1, 2, 4, 8] and stride_w in [1, 2, 4, 8] and ch_in >= 1 and ch_in <= 4096 and ch_out >= 1 and ch_out <= 4096 and dilation_h in [1, 2, 4] and dilation_w in [1, 2, 4]
@pyxir.register_op_support_check('dpuv1', 'Conv2DTranspose')
def conv2d_transpose_op_support(X, bXs, tXs):
data_layout = X.attrs['data_layout']
kernel_h, kernel_w = X.attrs['kernel_size']
stride_h, stride_w = X.attrs['strides']
dilation_h, dilation_w = X.attrs['dilation']
padding_h, padding_w = X.attrs['padding'][data_layout.index('H')], X.attrs['padding'][data_layout.index('W')]
padding_h_top, padding_h_bot = padding_h
padding_w_left, padding_w_right = padding_w
padding = X.attrs['padding']
ch_in, ch_out = X.attrs['channels']
groups = X.attrs['groups']
return groups == 1 and kernel_h >= 1 and kernel_h <= 15 and kernel_w >= 1 and kernel_w <= 15 and stride_h in [1, 2, 4, 8] and stride_w in [1, 2, 4, 8] and ch_in >= 1 and ch_in <= 4096 and ch_out >= 1 and ch_out <= 4096 and dilation_h in [1, 2, 4] and dilation_w in [1, 2, 4]
@pyxir.register_op_support_check('dpuv1', 'DPU')
def dpu_op_support(X, bXs, tXs):
return True
@pyxir.register_op_support_check('dpuv1', 'Eltwise')
def eltwise_op_support(X, bXs, tXs):
return True
@pyxir.register_op_support_check('dpuv1', 'Pad')
|
Apache License 2.0
|
vinzeebreak/ironcar
|
main.py
|
mode_update
|
python
|
def mode_update():
print('Sending the current state of the car')
all_state = dict()
all_state['mode'] = ironcar.mode
all_state['speed_mode'] = ironcar.speed_mode
all_state['started'] = ironcar.started
all_state['current_model'] = ironcar.current_model
all_state['max_speed_rate'] = ironcar.max_speed_rate
all_state['commands'] = ironcar.commands
return jsonify(all_state)
|
Sends the state of the car
|
https://github.com/vinzeebreak/ironcar/blob/a6baed94d68de4ed775a27a50f9edcd997b2758c/main.py#L59-L72
|
import socket
import json
from flask import Flask, render_template, send_file, jsonify
from app import app, socketio
from ironcar import *
with open(CONFIG) as json_file:
config = json.load(json_file)
MODELS_PATH = config['models_path']
@app.route('/')
def main():
models = []
if os.path.isdir(MODELS_PATH):
models = [os.path.join(MODELS_PATH, f) for f in os.listdir(MODELS_PATH) if f.endswith('.hdf5')]
print('SERVER : models : ', models)
return render_template('index.html', models=models)
@app.route('/commands')
def commands():
commands = ironcar.commands
print('SERVER : commands : ', commands)
return render_template('commands.html', commands=commands)
@app.route('/help')
def help():
return render_template('help.html')
@app.route('/picture')
def picture():
path_picture = ironcar.picture()
print('path_picture : ', path_picture)
if path_picture:
r = send_file(path_picture, as_attachment=True)
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers['Cache-Control'] = 'public, max-age=0'
return r
return None
@app.route('/car_state')
|
MIT License
|
ngalongc/openapi_security_scanner
|
schemathesis/cli/__init__.py
|
replay
|
python
|
def replay(
cassette_path: str,
id_: Optional[str],
status: Optional[str] = None,
uri: Optional[str] = None,
method: Optional[str] = None,
) -> None:
click.secho(f"{bold('Replaying cassette')}: {cassette_path}")
with open(cassette_path) as fd:
cassette = yaml.load(fd, Loader=SafeLoader)
click.secho(f"{bold('Total interactions')}: {len(cassette['http_interactions'])}\n")
for replayed in cassettes.replay(cassette, id_=id_, status=status, uri=uri, method=method):
click.secho(f" {bold('ID')} : {replayed.interaction['id']}")
click.secho(f" {bold('URI')} : {replayed.interaction['request']['uri']}")
click.secho(f" {bold('Old status code')} : {replayed.interaction['response']['status']['code']}")
click.secho(f" {bold('New status code')} : {replayed.response.status_code}\n")
|
Replay a cassette.
Cassettes in VCR-compatible format can be replayed.
For example, ones that are recorded with ``store-network-log`` option of `schemathesis run` command.
|
https://github.com/ngalongc/openapi_security_scanner/blob/9ba2244bf0e52db6f149243de403c8c7c157216f/schemathesis/cli/__init__.py#L457-L477
|
import os
import sys
import traceback
from enum import Enum
from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Tuple, Union
import click
import hypothesis
import yaml
from .. import checks as checks_module
from .. import runner
from .. import targets as targets_module
from ..constants import DEFAULT_DATA_GENERATION_METHODS, DEFAULT_STATEFUL_RECURSION_LIMIT, DataGenerationMethod
from ..fixups import ALL_FIXUPS
from ..hooks import GLOBAL_HOOK_DISPATCHER, HookContext, HookDispatcher, HookScope
from ..models import CheckFunction
from ..runner import events
from ..stateful import Stateful
from ..targets import Target
from ..types import Filter
from . import callbacks, cassettes, output
from .constants import DEFAULT_WORKERS, MAX_WORKERS, MIN_WORKERS
from .context import ExecutionContext
from .handlers import EventHandler
from .junitxml import JunitXMLHandler
from .options import CSVOption, CustomHelpMessageChoice, NotSet, OptionalInt
try:
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeLoader
def _get_callable_names(items: Tuple[Callable, ...]) -> Tuple[str, ...]:
return tuple(item.__name__ for item in items)
CONTEXT_SETTINGS = {"help_option_names": ["-h", "--help"]}
DEFAULT_CHECKS_NAMES = _get_callable_names(checks_module.DEFAULT_CHECKS)
ALL_CHECKS_NAMES = _get_callable_names(checks_module.ALL_CHECKS)
CHECKS_TYPE = click.Choice((*ALL_CHECKS_NAMES, "all"))
DEFAULT_TARGETS_NAMES = _get_callable_names(targets_module.DEFAULT_TARGETS)
ALL_TARGETS_NAMES = _get_callable_names(targets_module.ALL_TARGETS)
TARGETS_TYPE = click.Choice((*ALL_TARGETS_NAMES, "all"))
def register_target(function: Target) -> Target:
targets_module.ALL_TARGETS += (function,)
TARGETS_TYPE.choices += (function.__name__,)
return function
def register_check(function: CheckFunction) -> CheckFunction:
checks_module.ALL_CHECKS += (function,)
CHECKS_TYPE.choices += (function.__name__,)
return function
def reset_checks() -> None:
checks_module.ALL_CHECKS = checks_module.DEFAULT_CHECKS + checks_module.OPTIONAL_CHECKS
CHECKS_TYPE.choices = _get_callable_names(checks_module.ALL_CHECKS) + ("all",)
def reset_targets() -> None:
targets_module.ALL_TARGETS = targets_module.DEFAULT_TARGETS + targets_module.OPTIONAL_TARGETS
TARGETS_TYPE.choices = _get_callable_names(targets_module.ALL_TARGETS) + ("all",)
class DeprecatedOption(click.Option):
def __init__(self, *args: Any, removed_in: str, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.removed_in = removed_in
def handle_parse_result(self, ctx: click.Context, opts: Dict[str, Any], args: List[str]) -> Tuple[Any, List[str]]:
if self.name in opts:
opt_names = "/".join(f"`{name}`" for name in self.opts)
verb = "is" if len(self.opts) == 1 else "are"
click.secho(
f"\nWARNING: {opt_names} {verb} deprecated and will be removed in Schemathesis {self.removed_in}\n",
fg="yellow",
)
return super().handle_parse_result(ctx, opts, args)
@click.group(context_settings=CONTEXT_SETTINGS)
@click.option("--pre-run", help="A module to execute before the running the tests.", type=str)
@click.version_option()
def schemathesis(pre_run: Optional[str] = None) -> None:
if pre_run:
load_hook(pre_run)
@schemathesis.command(short_help="Perform schemathesis test.")
@click.argument("schema", type=str, callback=callbacks.validate_schema)
@click.option(
"--checks", "-c", multiple=True, help="List of checks to run.", type=CHECKS_TYPE, default=DEFAULT_CHECKS_NAMES
)
@click.option(
"--data-generation-method",
"-D",
"data_generation_methods",
help="Defines how Schemathesis generates data for tests.",
type=CSVOption(DataGenerationMethod),
default=DataGenerationMethod.default(),
)
@click.option(
"--max-response-time",
help="A custom check that will fail if the response time is greater than the specified one in milliseconds.",
type=click.IntRange(min=1),
)
@click.option(
"--target",
"-t",
"targets",
multiple=True,
help="Targets for input generation.",
type=TARGETS_TYPE,
default=DEFAULT_TARGETS_NAMES,
)
@click.option(
"-x", "--exitfirst", "exit_first", is_flag=True, default=False, help="Exit instantly on first error or failed test."
)
@click.option(
"--auth", "-a", help="Server user and password. Example: USER:PASSWORD", type=str, callback=callbacks.validate_auth
)
@click.option(
"--auth-type",
"-A",
type=click.Choice(["basic", "digest"], case_sensitive=False),
default="basic",
help="The authentication mechanism to be used. Defaults to 'basic'.",
)
@click.option(
"--header",
"-H",
"headers",
help=r"Custom header in a that will be used in all requests to the server. Example: Authorization: Bearer\ 123",
multiple=True,
type=str,
callback=callbacks.validate_headers,
)
@click.option(
"--endpoint",
"-E",
"endpoints",
type=str,
multiple=True,
help=r"Filter schemathesis test by endpoint pattern. Example: users/\d+",
callback=callbacks.validate_regex,
)
@click.option(
"--method",
"-M",
"methods",
type=str,
multiple=True,
help="Filter schemathesis test by HTTP method.",
callback=callbacks.validate_regex,
)
@click.option(
"--tag",
"-T",
"tags",
type=str,
multiple=True,
help="Filter schemathesis test by schema tag pattern.",
callback=callbacks.validate_regex,
)
@click.option(
"--operation-id",
"-O",
"operation_ids",
type=str,
multiple=True,
help="Filter schemathesis test by operationId pattern.",
callback=callbacks.validate_regex,
)
@click.option(
"--workers",
"-w",
"workers_num",
help="Number of workers to run tests.",
type=CustomHelpMessageChoice(
["auto"] + list(map(str, range(MIN_WORKERS, MAX_WORKERS + 1))),
choices_repr=f"[auto|{MIN_WORKERS}-{MAX_WORKERS}]",
),
default=str(DEFAULT_WORKERS),
callback=callbacks.convert_workers,
)
@click.option(
"--base-url",
"-b",
help="Base URL address of the API, required for SCHEMA if specified by file.",
type=str,
callback=callbacks.validate_base_url,
)
@click.option("--app", help="WSGI/ASGI application to test.", type=str, callback=callbacks.validate_app)
@click.option(
"--request-timeout",
help="Timeout in milliseconds for network requests during the test run.",
type=click.IntRange(1),
)
@click.option(
"--request-tls-verify",
help="Controls whether Schemathesis verifies the server's TLS certificate. "
"You can also pass the path to a CA_BUNDLE file for private certs.",
type=str,
default="true",
callback=callbacks.convert_request_tls_verify,
)
@click.option("--validate-schema", help="Enable or disable validation of input schema.", type=bool, default=True)
@click.option(
"--skip-deprecated-endpoints",
help="Skip testing of deprecated endpoints.",
is_flag=True,
is_eager=True,
default=False,
)
@click.option(
"--junit-xml", help="Create junit-xml style report file at given path.", type=click.File("w", encoding="utf-8")
)
@click.option(
"--show-errors-tracebacks",
help="Show full tracebacks for internal errors.",
is_flag=True,
is_eager=True,
default=False,
)
@click.option(
"--store-network-log", help="Store requests and responses into a file.", type=click.File("w", encoding="utf-8")
)
@click.option(
"--fixups",
help="Install specified compatibility fixups.",
multiple=True,
type=click.Choice(list(ALL_FIXUPS) + ["all"]),
)
@click.option(
"--stateful",
help="Utilize stateful testing capabilities.",
type=click.Choice([item.name for item in Stateful]),
callback=callbacks.convert_stateful,
)
@click.option(
"--stateful-recursion-limit",
help="Limit recursion depth for stateful testing.",
default=DEFAULT_STATEFUL_RECURSION_LIMIT,
type=click.IntRange(1, 100),
cls=DeprecatedOption,
removed_in="3.0",
)
@click.option(
"--force-schema-version",
help="Force Schemathesis to parse the input schema with the specified spec version.",
type=click.Choice(["20", "30"]),
)
@click.option(
"--hypothesis-deadline",
help="Duration in milliseconds that each individual example with a test is not allowed to exceed.",
type=OptionalInt(1, 999999999 * 24 * 3600 * 1000),
)
@click.option("--hypothesis-derandomize", help="Use Hypothesis's deterministic mode.", is_flag=True, default=None)
@click.option(
"--hypothesis-max-examples",
help="Maximum number of generated examples per each method/endpoint combination.",
type=click.IntRange(1),
)
@click.option("--hypothesis-phases", help="Control which phases should be run.", type=CSVOption(hypothesis.Phase))
@click.option(
"--hypothesis-report-multiple-bugs", help="Raise only the exception with the smallest minimal example.", type=bool
)
@click.option("--hypothesis-seed", help="Set a seed to use for all Hypothesis tests.", type=int)
@click.option(
"--hypothesis-suppress-health-check",
help="Comma-separated list of health checks to disable.",
type=CSVOption(hypothesis.HealthCheck),
)
@click.option(
"--hypothesis-verbosity",
help="Verbosity level of Hypothesis messages.",
type=click.Choice([item.name for item in hypothesis.Verbosity]),
callback=callbacks.convert_verbosity,
)
@click.option("--verbosity", "-v", help="Reduce verbosity of error output.", count=True)
def run(
schema: str,
auth: Optional[Tuple[str, str]],
auth_type: str,
headers: Dict[str, str],
checks: Iterable[str] = DEFAULT_CHECKS_NAMES,
data_generation_methods: Tuple[DataGenerationMethod, ...] = DEFAULT_DATA_GENERATION_METHODS,
max_response_time: Optional[int] = None,
targets: Iterable[str] = DEFAULT_TARGETS_NAMES,
exit_first: bool = False,
endpoints: Optional[Filter] = None,
methods: Optional[Filter] = None,
tags: Optional[Filter] = None,
operation_ids: Optional[Filter] = None,
workers_num: int = DEFAULT_WORKERS,
base_url: Optional[str] = None,
app: Optional[str] = None,
request_timeout: Optional[int] = None,
request_tls_verify: bool = True,
validate_schema: bool = True,
skip_deprecated_endpoints: bool = False,
junit_xml: Optional[click.utils.LazyFile] = None,
show_errors_tracebacks: bool = False,
store_network_log: Optional[click.utils.LazyFile] = None,
fixups: Tuple[str] = (),
stateful: Optional[Stateful] = None,
stateful_recursion_limit: int = DEFAULT_STATEFUL_RECURSION_LIMIT,
force_schema_version: Optional[str] = None,
hypothesis_deadline: Optional[Union[int, NotSet]] = None,
hypothesis_derandomize: Optional[bool] = None,
hypothesis_max_examples: Optional[int] = None,
hypothesis_phases: Optional[List[hypothesis.Phase]] = None,
hypothesis_report_multiple_bugs: Optional[bool] = None,
hypothesis_suppress_health_check: Optional[List[hypothesis.HealthCheck]] = None,
hypothesis_seed: Optional[int] = None,
hypothesis_verbosity: Optional[hypothesis.Verbosity] = None,
verbosity: int = 0,
) -> None:
selected_targets = tuple(target for target in targets_module.ALL_TARGETS if target.__name__ in targets)
if "all" in checks:
selected_checks = checks_module.ALL_CHECKS
else:
selected_checks = tuple(check for check in checks_module.ALL_CHECKS if check.__name__ in checks)
prepared_runner = runner.prepare(
schema,
auth=auth,
auth_type=auth_type,
headers=headers,
request_timeout=request_timeout,
request_tls_verify=request_tls_verify,
base_url=base_url,
endpoint=endpoints,
method=methods,
tag=tags,
operation_id=operation_ids,
app=app,
seed=hypothesis_seed,
exit_first=exit_first,
store_interactions=store_network_log is not None,
checks=selected_checks,
data_generation_methods=data_generation_methods,
max_response_time=max_response_time,
targets=selected_targets,
workers_num=workers_num,
validate_schema=validate_schema,
skip_deprecated_endpoints=skip_deprecated_endpoints,
fixups=fixups,
stateful=stateful,
stateful_recursion_limit=stateful_recursion_limit,
force_schema_version=force_schema_version,
hypothesis_deadline=hypothesis_deadline,
hypothesis_derandomize=hypothesis_derandomize,
hypothesis_max_examples=hypothesis_max_examples,
hypothesis_phases=hypothesis_phases,
hypothesis_report_multiple_bugs=hypothesis_report_multiple_bugs,
hypothesis_suppress_health_check=hypothesis_suppress_health_check,
hypothesis_verbosity=hypothesis_verbosity,
)
execute(prepared_runner, workers_num, show_errors_tracebacks, store_network_log, junit_xml, verbosity)
def get_output_handler(workers_num: int) -> EventHandler:
if workers_num > 1:
output_style = OutputStyle.short
else:
output_style = OutputStyle.default
return output_style.value()
def load_hook(module_name: str) -> None:
try:
sys.path.append(os.getcwd())
__import__(module_name)
except Exception as exc:
click.secho("An exception happened during the hook loading:\n", fg="red")
message = traceback.format_exc()
click.secho(message, fg="red")
raise click.Abort() from exc
class OutputStyle(Enum):
default = output.default.DefaultOutputStyleHandler
short = output.short.ShortOutputStyleHandler
def execute(
prepared_runner: Generator[events.ExecutionEvent, None, None],
workers_num: int,
show_errors_tracebacks: bool,
store_network_log: Optional[click.utils.LazyFile],
junit_xml: Optional[click.utils.LazyFile],
verbosity: int,
) -> None:
handlers: List[EventHandler] = []
if junit_xml is not None:
handlers.append(JunitXMLHandler(junit_xml))
if store_network_log is not None:
handlers.append(cassettes.CassetteWriter(store_network_log))
handlers.append(get_output_handler(workers_num))
execution_context = ExecutionContext(
workers_num=workers_num,
show_errors_tracebacks=show_errors_tracebacks,
cassette_file_name=store_network_log.name if store_network_log is not None else None,
junit_xml_file=junit_xml.name if junit_xml is not None else None,
verbosity=verbosity,
)
GLOBAL_HOOK_DISPATCHER.dispatch("after_init_cli_run_handlers", HookContext(), handlers, execution_context)
try:
for event in prepared_runner:
for handler in handlers:
handler.handle_event(execution_context, event)
except click.exceptions.Exit:
raise
except Exception as exc:
for handler in handlers:
handler.shutdown()
if isinstance(exc, click.Abort):
sys.exit(1)
raise
@schemathesis.command(short_help="Replay requests from a saved cassette.")
@click.argument("cassette_path", type=click.Path(exists=True))
@click.option("--id", "id_", help="ID of interaction to replay.", type=str)
@click.option("--status", help="Status of interactions to replay.", type=str)
@click.option("--uri", help="A regexp that filters interactions by their request URI.", type=str)
@click.option("--method", help="A regexp that filters interactions by their request method.", type=str)
|
Apache License 2.0
|
mwcvitkovic/open-vocabulary-learning-on-source-code-with-a-graph-structured-cache
|
models/Model.py
|
Model.preprocess_task
|
python
|
def preprocess_task(cls,
task: Task,
output_dir: str,
n_jobs: int,
excluded_edge_types=frozenset(),
data_encoder: DataEncoder = None,
data_encoder_kwargs: dict = None,
instance_to_datapoints_kwargs: dict = None):
if data_encoder is None:
raise ValueError('''You must set data_encoder to either:
1) The string "new" to initialize a DataEncoder based on this task,
2) A DataEncoder instance to encode the data with a pre-trained encoder''')
graphs_and_instances = task.graphs_and_instances
n_datapoints = sum(len(i[1]) for i in graphs_and_instances)
logger.info('Preprocessing {} graphs with {} datapoints'.format(len(graphs_and_instances), n_datapoints))
logger.info('Removing excluded edge types and adding reverse subgraph')
with Parallel(n_jobs=n_jobs, verbose=50) as parallel:
graphs_and_instances = parallel(
delayed(cls.fix_up_edges)(graph, instances, excluded_edge_types) for
graph, instances in
graphs_and_instances)
if data_encoder == 'new':
logger.info('Initializing a DataEncoder based on this task')
de = cls.DataEncoder(graphs_and_instances, instance_to_datapoints_kwargs=instance_to_datapoints_kwargs,
excluded_edge_types=excluded_edge_types, **data_encoder_kwargs)
else:
assert type(data_encoder) == cls.DataEncoder
de = data_encoder
logger.info('Using a pre-existing {}'.format(type(de)))
logger.info('Doing optional extra graph processing')
with Parallel(n_jobs=n_jobs, verbose=50) as parallel:
graphs_and_instances = parallel(
delayed(cls.extra_graph_processing)(graph, instances, de) for graph, instances in
graphs_and_instances)
logger.info('Process graphs into DataPoints and saving to {}'.format(output_dir))
os.makedirs(output_dir, exist_ok=True)
batched_graphs_and_instances = []
n_batch = math.ceil(max(len(i[1]) for i in task.graphs_and_instances if i) / n_jobs)
for graph, instances in graphs_and_instances:
for i in range(math.ceil(len(instances) / n_batch)):
batched_graphs_and_instances.append((graph, instances[i * n_batch: (i + 1) * n_batch]))
filenames = []
for i, (graph, instances) in enumerate(batched_graphs_and_instances):
filename = os.path.join(output_dir, str(i) + '.pkl')
filenames.append(filename)
with open(filename, 'wb') as f:
pickle.dump((graph, instances, de, output_dir, de.instance_to_datapoints_kwargs), f)
cls.process_graph_to_datapoints_with_xargs(filenames, output_dir, n_jobs)
logger.info('Preprocessed {} datapoints'.format(len(os.listdir(output_dir))))
if data_encoder is 'new':
de.save(output_dir)
|
Converts a task into a set of preprocessed DataPoints appropriate for this model, which are ultimately saved to disk
|
https://github.com/mwcvitkovic/open-vocabulary-learning-on-source-code-with-a-graph-structured-cache/blob/fc2faa84a34090cc90290897669bbb32d8f89f68/models/Model.py#L31-L90
|
import logging
import math
import os
import pickle
import subprocess
import sys
from typing import List
import mxnet as mx
from joblib import Parallel, delayed
from mxnet import gluon
from tqdm import tqdm
from data import AugmentedAST, project_root_path
from data.BaseDataEncoder import BaseDataEncoder
from data.Tasks import Task
logger = logging.getLogger()
class Model(gluon.HybridBlock):
DataEncoder = None
DataClass = None
@classmethod
|
MIT License
|
parsl/parsl
|
parsl/tests/low_latency/utils.py
|
ping_time
|
python
|
def ping_time(ip, n=5):
cmd = "ping {} -c {}".format(ip, n)
p = subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE)
output = str(p.communicate()[0])
stats = output.split("\n")[-1].split(" = ")[-1].split("/")
avg_ping_time = float(stats[1])
return avg_ping_time * 1000
|
Returns the average ping time in microseconds.
Note: This function is inherently platform specific.
It currently works on Midway.
|
https://github.com/parsl/parsl/blob/0b38750cdedb10869cd03f34bfde8390017fa256/parsl/tests/low_latency/utils.py#L4-L16
|
import subprocess
|
Apache License 2.0
|
yoshida-lab/xenonpy
|
xenonpy/model/training/trainer.py
|
Trainer.__call__
|
python
|
def __call__(self,
x_train: Union[torch.Tensor, Tuple[torch.Tensor]] = None,
y_train: Any = None,
x_val: Union[Any, Tuple[Any]] = None,
y_val: Any = None,
*,
training_dataset: DataLoader = None,
validation_dataset: DataLoader = None,
epochs: int = None,
checkpoint: Union[bool, int, Callable[[int], bool]] = None,
**model_params):
if self._model is None:
raise RuntimeError(
'no model for training, use `trainer.model = <model>` or `trainer.reset(to=<model>)` to set one')
if self._loss_func is None:
raise RuntimeError('no loss function for training, use `trainer.loss_func = <loss_func>` to set one')
if self._optimizer is None:
raise RuntimeError('no optimizer for training, use `trainer.optimizer = <optimizer>` to set one')
if epochs is None:
epochs = self._epochs
if training_dataset is not None:
if y_train is not None or x_train is not None:
raise RuntimeError('parameter <training_dataset> is exclusive of <x_train> and <y_train>')
else:
if y_train is None or x_train is None:
raise RuntimeError('missing parameter <x_train> or <y_train>')
def _step(x_, y_, i_b=0):
def closure():
self._optimizer.zero_grad()
y_p_ = self._model(*x_, **model_params)
y_p_, y_t_ = self.output_proc(y_p_, y_, trainer=self, is_training=True)
loss_ = self._loss_func(y_p_, y_t_)
loss_.backward()
if self._clip_grad is not None:
self._clip_grad(self._model.parameters())
return loss_
if not self._model.training:
self._model.train()
train_loss = self._optimizer.step(closure).item()
step_info = OrderedDict(
total_iters=self._total_its,
i_epoch=self._total_epochs,
i_batch=i_b + 1,
)
step_info[self._loss_type] = train_loss
self._total_its += 1
self._step_forward(step_info=step_info, trainer=self, is_training=True)
self._training_info.append(step_info)
if self._lr_scheduler is not None:
if isinstance(self._lr_scheduler, ReduceLROnPlateau):
self._lr_scheduler.step(train_loss)
else:
self._lr_scheduler.step()
return step_info
def _snapshot():
if checkpoint is not None:
if isinstance(checkpoint, bool) and checkpoint:
self.set_checkpoint()
if isinstance(checkpoint, int):
if self._total_epochs % checkpoint == 0:
self.set_checkpoint()
if callable(checkpoint):
flag, msg = checkpoint(self._total_epochs)
if flag:
self.set_checkpoint(msg)
if validation_dataset is not None:
if y_val is not None or x_val is not None:
raise RuntimeError('parameter <validation_dataset> is exclusive of <x_val> and <y_val>')
else:
self._validate_dataset = validation_dataset
else:
if y_val is not None and x_val is not None:
self._x_val, self._y_val = self.input_proc(x_val, y_val, trainer=self, is_training=False)
self._before_proc(trainer=self, is_training=True)
if training_dataset:
for i_epoch in range(self._total_epochs, epochs + self._total_epochs):
self._total_epochs += 1
for i_batch, (x_train, y_train) in enumerate(training_dataset):
x_train, y_train = self.input_proc(x_train, y_train, trainer=self, is_training=True)
if not isinstance(x_train, tuple):
x_train = (x_train,)
yield _step(x_train, y_train, i_batch)
if self._early_stopping[0]:
print(f'Early stopping is applied: {self._early_stopping[1]}')
self._after_proc(trainer=self, is_training=True)
self._model.eval()
return
_snapshot()
else:
x_train, y_train = self.input_proc(x_train, y_train, trainer=self, is_training=True)
if not isinstance(x_train, tuple):
x_train = (x_train,)
for i_epoch in range(self._total_epochs, epochs + self._total_epochs):
self._total_epochs += 1
yield _step(x_train, y_train)
if self._early_stopping[0]:
print(f'Early stopping is applied: {self._early_stopping[1]}.')
self._after_proc(trainer=self, is_training=True)
self._model.eval()
return
_snapshot()
self._after_proc(trainer=self, is_training=True)
self._model.eval()
|
Train the Neural Network model
Parameters
----------
x_train
Training data. Will be ignored will ``training_dataset`` is given.
y_train
Test data. Will be ignored will ``training_dataset`` is given.
training_dataset: DataLoader
Torch DataLoader. If given, will only use this as training dataset.
When loop over this dataset, it should yield a tuple contains ``x_train`` and ``y_train`` in order.
x_val : Union[Any, Tuple[Any]]
Data for validation.
y_val : Any
Data for validation.
validation_dataset : DataLoader
epochs : int
Epochs. If not ``None``, it will overwrite ``self.epochs`` temporarily.
checkpoint: Union[bool, int, Callable[[int], bool]]
If ``True``, will save model states at each step.
If ``int``, will save model states every `checkpoint` steps.
If ``Callable``, the function should take current ``total_epochs`` as input return ``bool``.
model_params: dict
Other model parameters.
Yields
------
namedtuple
|
https://github.com/yoshida-lab/xenonpy/blob/244b241ea23dcf03af34d86b0ae68f911ae301a5/xenonpy/model/training/trainer.py#L352-L509
|
from collections import OrderedDict, namedtuple
from copy import deepcopy
from pathlib import Path
from typing import Union, Tuple, List, Any, Dict, Callable
import numpy as np
import pandas as pd
import torch
from torch.nn import Module
from torch.optim.lr_scheduler import _LRScheduler, ReduceLROnPlateau
from torch.utils.data import DataLoader
from deprecated import deprecated
from xenonpy.model.training import ClipValue, ClipNorm, Checker
from xenonpy.model.training.base import BaseOptimizer, BaseLRScheduler, BaseRunner
from xenonpy.utils import camel_to_snake
__all__ = ['Trainer']
class Trainer(BaseRunner):
checkpoint_tuple = namedtuple('checkpoint', 'id iterations model_state')
results_tuple = namedtuple('results', 'total_epochs device training_info checkpoints model')
def __init__(
self,
*,
loss_func: torch.nn.Module = None,
optimizer: BaseOptimizer = None,
model: Module = None,
lr_scheduler: BaseLRScheduler = None,
clip_grad: Union[ClipNorm, ClipValue] = None,
epochs: int = 200,
cuda: Union[bool, str, torch.device] = False,
non_blocking: bool = False,
):
super().__init__(cuda=cuda)
self._clip_grad = clip_grad
self._epochs = epochs
self._non_blocking = non_blocking
self._loss_func = None
self._loss_type = None
self._model = None
self._init_states = None
self._optim = None
self._optimizer = None
self._optimizer_state = None
self._scheduler = None
self._lr_scheduler = None
self._early_stopping: Tuple[bool, str] = (False, '')
self._checkpoints: Dict[Union[int, str], Trainer.checkpoint_tuple] = OrderedDict()
self._training_info: List[OrderedDict] = []
self._total_its: int = 0
self._total_epochs: int = 0
self._x_val = None
self._y_val = None
self._validate_dataset = None
self.model = model
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.loss_func = loss_func
@property
def epochs(self):
return self._epochs
@property
def non_blocking(self):
return self._non_blocking
@property
def loss_type(self):
return self._loss_type
@property
def total_epochs(self):
return self._total_epochs
@property
def total_iterations(self):
return self._total_its
@property
def x_val(self):
return self._x_val
@property
def y_val(self):
return self._y_val
@property
def validate_dataset(self):
return self._validate_dataset
@property
def loss_func(self):
return self._loss_func
@loss_func.setter
def loss_func(self, loss_func):
if loss_func is not None:
self._loss_func = loss_func
self._loss_type = 'train_' + camel_to_snake(loss_func.__class__.__name__)
@property
def training_info(self):
if len(self._training_info) > 0:
return pd.DataFrame(data=self._training_info)
return None
@property
def device(self):
return self._device
@device.setter
def device(self, v):
self._device = self.check_device(v)
self.model = None
@property
def model(self):
return self._model
@model.setter
def model(self, model):
if model is not None:
if isinstance(model, torch.nn.Module):
self.reset(to=model)
else:
raise TypeError('parameter `m` must be a instance of <torch.nn.modules> but got %s' % type(model))
@property
def optimizer(self):
return self._optimizer
@optimizer.setter
def optimizer(self, optimizer):
if optimizer is not None:
self._optim = optimizer
if self._optim is not None and self._model is not None:
self._optimizer = self._optim(self._model.parameters())
self._optimizer_state = deepcopy(self._optimizer.state_dict())
self.lr_scheduler = None
@property
def lr_scheduler(self):
return self._lr_scheduler
@lr_scheduler.setter
def lr_scheduler(self, scheduler):
if scheduler is not None:
self._scheduler = scheduler
if self._scheduler is not None and self._optimizer is not None:
self._lr_scheduler: Union[_LRScheduler, None] = self._scheduler(self._optimizer)
@property
def clip_grad(self):
return self._clip_grad
@clip_grad.setter
def clip_grad(self, fn):
self._clip_grad = fn
@property
def checkpoints(self):
return self._checkpoints
def get_checkpoint(self, checkpoint: Union[int, str] = None):
if checkpoint is None:
return list(self._checkpoints.keys())
if isinstance(checkpoint, int):
id_ = f'cp_{checkpoint}'
return self._checkpoints[id_]
if isinstance(checkpoint, str):
return self._checkpoints[checkpoint]
raise TypeError(f'parameter <cp> must be str or int but got {checkpoint.__class__}')
def set_checkpoint(self, id_: str = None):
if id_ is None:
id_ = f'cp_{self._total_its}'
cp = self.checkpoint_tuple(
id=id_,
iterations=self._total_its,
model_state=deepcopy(self._model.state_dict()),
)
self._checkpoints[id_] = cp
self._on_checkpoint(checkpoint=cp, trainer=self, is_training=True)
def early_stop(self, msg: str):
self._early_stopping = (True, msg)
def reset(self, *, to: Union[Module, int, str] = None, remove_checkpoints: bool = True):
self._training_info = []
self._total_its = 0
self._total_epochs = 0
self._early_stopping = (False, '')
if isinstance(to, Module):
self._model = to.to(self._device, non_blocking=self._non_blocking)
self._init_states = deepcopy(to.state_dict())
self.optimizer = None
self.lr_scheduler = None
elif isinstance(to, (int, str)):
cp = self.get_checkpoint(to)
self._model.load_state_dict(cp.model_state)
elif to is None:
self._model.load_state_dict(self._init_states)
self._optimizer.load_state_dict(self._optimizer_state)
else:
raise TypeError(f'parameter <to> must be torch.nnModule, int, or str but got {type(to)}')
if remove_checkpoints:
self._checkpoints = OrderedDict()
self._on_reset(trainer=self, is_training=True)
def fit(self,
x_train: Union[Any, Tuple[Any]] = None,
y_train: Any = None,
x_val: Union[Any, Tuple[Any]] = None,
y_val: Any = None,
*,
training_dataset: DataLoader = None,
validation_dataset: DataLoader = None,
epochs: int = None,
checkpoint: Union[bool, int, Callable[[int], Tuple[bool, str]]] = None,
progress_bar: Union[str, None] = 'auto',
**model_params):
if epochs is None:
epochs = self._epochs
prob = self._total_epochs
if progress_bar is not None:
if progress_bar == 'auto':
from tqdm.auto import tqdm
else:
from tqdm import tqdm
with tqdm(total=epochs, desc='Training') as pbar:
for _ in self(x_train=x_train,
y_train=y_train,
x_val=x_val,
y_val=y_val,
training_dataset=training_dataset,
validation_dataset=validation_dataset,
epochs=epochs,
checkpoint=checkpoint,
**model_params):
delta = self._total_epochs - prob
if delta:
prob = self._total_epochs
pbar.update(delta)
else:
for _ in self(x_train=x_train,
y_train=y_train,
x_val=x_val,
y_val=y_val,
training_dataset=training_dataset,
validation_dataset=validation_dataset,
epochs=epochs,
checkpoint=checkpoint,
**model_params):
pass
|
BSD 3-Clause New or Revised License
|
atomtoast/voice-of-light
|
ext/surrenderat20.py
|
SurrenderAt20.subscribe
|
python
|
async def subscribe(self, ctx, *, categories=None):
async with self.bot.pool.acquire() as db:
rows = await db.fetch("SELECT SurrenderAt20NotifChannel FROM Guilds WHERE ID=$1", ctx.guild.id)
if len(rows) == 0 or rows[0][0] is None:
await ctx.send("You need to set up a notifications channel before subscribing! \nUse either ;setchannel or ;surrenderat20 setchannel")
return
results = await db.fetch("SELECT * FROM SurrenderAt20Subscriptions WHERE Guild=$1", ctx.guild.id)
if len(results) == 1:
if categories is None:
categories = "all categories"
redposts = True
pbe = True
rotations = True
esports = True
releases = True
other = True
else:
categories = categories.lower()
if "red posts" not in categories and "pbe" not in categories and "rotations" not in categories and "esports" not in categories and "releases" not in categories and "other" not in categories:
await ctx.send("No categories found, potentially check for typos")
return
result = results[0]
redposts, pbe, rotations, esports, releases, other = result[1:7]
categories = categories.lower()
redposts = "red posts" in categories
pbe = "pbe" in categories
rotations = "rotations" in categories
esports = "esports" in categories
releases = "releases" in categories
other = "other" in categories
await db.execute("UPDATE SurrenderAt20Subscriptions \
SET RedPosts=$1, PBE=$2, Rotations=$3, Esports=$4, Releases=$5, Other=$6 \
WHERE Guild=$7",
redposts, pbe, rotations, esports, releases, other, ctx.guild.id)
else:
if categories is None:
categories = "all categories"
redposts = True
pbe = True
rotations = True
esports = True
releases = True
other = True
else:
categories = categories.lower()
redposts = "red posts" in categories
pbe = "pbe" in categories
rotations = "rotations" in categories
esports = "esports" in categories
releases = "releases" in categories
other = "other" in categories
if not redposts and not pbe and not rotations and not esports and not releases and not other:
await ctx.send("No categories found, potentially check for typos")
return
await db.execute("INSERT INTO SurrenderAt20Subscriptions (Guild, RedPosts, PBE, Rotations, Esports, Releases, Other) \
VALUES ($1, $2, $3, $4, $5, $6, $7)",
ctx.guild.id, redposts, pbe, rotations, esports, other, releases)
emb = discord.Embed(title="Successfully subscribed to " + categories.title(),
color=discord.Colour.green())
emb.set_thumbnail(
url="https://images-ext-2.discordapp.net/external/p4GLboECWMVLnDH-Orv6nkWm3OG8uLdI2reNRQ9RX74/http/3.bp.blogspot.com/-M_ecJWWc5CE/Uizpk6U3lwI/AAAAAAAACLo/xyh6eQNRzzs/s640/sitethumb.jpg")
await ctx.send(embed=emb)
|
Subscribes to Surrender@20
You can specify the categories you want to subscribe to or name none and subscribe to all.
The possible categories are:
- Red Posts
- PBE
- Rotations
- Esports
- Releases
- Other
|
https://github.com/atomtoast/voice-of-light/blob/7aaf56e90f8f6337dccc1f468a9fc776debd9f4c/ext/surrenderat20.py#L55-L145
|
import discord
from discord.ext import commands
import auth_token
import datetime
import re
class SurrenderAt20(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.cleanr = re.compile('<.*?>')
@commands.has_permissions(manage_messages=True)
@commands.guild_only()
@commands.group(aliases=["ff20"])
async def surrenderat20(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send("You need to specify an action \n(use 'help surrenderat20' for more information)")
@surrenderat20.command()
async def setchannel(self, ctx, channel=None):
if len(ctx.message.channel_mentions) > 0:
channel_obj = ctx.message.channel_mentions[0]
elif channel is not None:
channel_obj = discord.utils.get(
ctx.guild.channels, name=channel.replace("#", ""))
if channel_obj is None:
await ctx.send(f"No channel named {channel}")
return
else:
await ctx.send("Missing channel parameter")
return
bot_id = ctx.guild.get_member(self.bot.user.id)
permissions = channel_obj.permissions_for(bot_id)
if not permissions.send_messages or not permissions.embed_links:
await ctx.send("Command failed, please make sure that the bot has both permissions for sending messages and using embeds in the specified channel!")
return
async with self.bot.pool.acquire() as db:
await db.execute("UPDATE Guilds SET SurrenderAt20NotifChannel=$1 WHERE ID=$2",
channel_obj.id, ctx.guild.id)
await ctx.send("Successfully set Surrender@20 notifications to " + channel_obj.mention)
@surrenderat20.command(aliases=["sub"])
|
MIT License
|
chovanecm/sacredboard
|
sacredboard/app/data/datastorage.py
|
DataStorage.get_run_dao
|
python
|
def get_run_dao(self) -> RunDAO:
raise NotImplementedError(
"Run Data Access Object must be implemented.")
|
Return a data access object for Runs.
:return: RunDAO
|
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/data/datastorage.py#L47-L54
|
from sacredboard.app.data.rundao import RunDAO
from .errors import NotFoundError
from .filesdao import FilesDAO
from .metricsdao import MetricsDAO
class Cursor:
def __init__(self):
pass
def count(self):
raise NotImplementedError()
def __iter__(self):
raise NotImplementedError()
class DataStorage:
def __init__(self):
pass
def get_metrics_dao(self) -> MetricsDAO:
return DummyMetricsDAO()
|
MIT License
|
ulikoehler/uliengineering
|
UliEngineering/Math/Coordinates.py
|
BoundingBox.aspect_ratio
|
python
|
def aspect_ratio(self):
return self.width / self.height
|
width / height
|
https://github.com/ulikoehler/uliengineering/blob/58eadc8a848854b154a4be20ee2d2d1c614d802f/UliEngineering/Math/Coordinates.py#L46-L48
|
import numpy as np
__all__ = ["BoundingBox"]
class BoundingBox(object):
def __init__(self, points):
if len(points.shape) != 2 or points.shape[1] != 2:
raise ValueError("Points must be a (n,2), array but it has shape {}".format(
points.shape))
if points.shape[0] < 1:
raise ValueError("Can't compute bounding box for empty coordinates")
self.minx, self.miny = np.min(points, axis=0)
self.maxx, self.maxy = np.max(points, axis=0)
@property
def width(self):
return self.maxx - self.minx
@property
def height(self):
return self.maxy - self.miny
@property
def area(self):
return self.width * self.height
@property
|
Apache License 2.0
|
inveniosoftware/training
|
08-data-models-from-scratch/solution/my-site/my_site/authors/ext.py
|
Authors.init_app
|
python
|
def init_app(self, app):
self.init_config(app)
app.extensions['my-site-authors'] = self
|
Flask application initialization.
|
https://github.com/inveniosoftware/training/blob/f9caea9c3d3a8849f18d34cc89fce7d5ed751b3b/08-data-models-from-scratch/solution/my-site/my_site/authors/ext.py#L23-L26
|
from __future__ import absolute_import, print_function
from . import config
class Authors(object):
def __init__(self, app=None):
if app:
self.init_app(app)
|
MIT License
|
biolink/ontobio
|
ontobio/__init__.py
|
configure_logging
|
python
|
def configure_logging():
dictConfig(DEFAULT_LOGGING)
default_formatter = logging.Formatter(
"%(asctime)s [%(levelname)s] [PID:%(process)d TID:%(thread)d] [%(filename)s:%(lineno)s in `%(funcName)s`] %(message)s",
"%Y-%m-%d %H:%M:%S")
if len(logging.getLogger().handlers) > 0:
for h in logging.getLogger().handlers:
if isinstance(h, logging.StreamHandler):
h.setLevel(logging.DEBUG)
h.setFormatter(default_formatter)
else:
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(default_formatter)
logging.root.addHandler(console_handler)
logging.root.setLevel(logging.WARNING)
|
Initialize logging defaults for Project.
:param logfile_path: logfile used to the logfile
:type logfile_path: string
This function does:
- Assign INFO and DEBUG level to logger file handler and console handler
|
https://github.com/biolink/ontobio/blob/da9c5ff912785ee4ab98a8a39585562ecd2bdef5/ontobio/__init__.py#L22-L57
|
from __future__ import absolute_import
__version__ = '2.7.12'
from .ontol_factory import OntologyFactory
from .ontol import Ontology, Synonym, TextDefinition
from .assoc_factory import AssociationSetFactory
from .io.ontol_renderers import GraphRenderer
import logging
import logging.handlers
from logging.config import dictConfig
logging.getLogger("ontobio")
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
}
|
BSD 3-Clause New or Revised License
|
checkpointsw/cp_mgmt_api_python_sdk
|
cpapi/cli.py
|
simple_yaml
|
python
|
def simple_yaml(root, as_string=True):
if as_string:
return '\n'.join(simple_yaml(root, False) + [''])
return
if not isinstance(root, (dict, list)) or not root:
return [safe_string(root)]
if isinstance(root, dict):
items = root.items()
else:
items = ((None, v) for v in root)
lines = []
for k, v in items:
v_lines = simple_yaml(v, False)
indent = ' '
if k is None:
lines.append('- ' + v_lines.pop(0))
else:
lines.append(safe_string(k) + ':')
if isinstance(v, list):
indent = ''
if not v or not isinstance(v, (dict, list)):
lines[-1] += ' ' + v_lines.pop(0)
lines.extend([indent + line for line in v_lines])
return lines
|
Print the configuration in a user friendly format.
|
https://github.com/checkpointsw/cp_mgmt_api_python_sdk/blob/7294417a702a06ef910ca012d9b347a5908b599a/cpapi/cli.py#L122-L149
|
import argparse
import collections
import json
import os
import re
import sys
import traceback
from cpapi.utils import compatible_loads
from . import APIClient, APIClientArgs
if sys.version_info < (3,):
string_type = basestring
else:
string_type = str
def log(msg):
msg = '%s' % msg
sys.stderr.write(msg)
sys.stderr.flush()
log.debug = os.environ.get('MGMT_CLI_DEBUG') == 'on'
def debug(*args, **kwargs):
if log.debug:
log(*args, **kwargs)
class Pairs(object):
NO_KEY = None
def __init__(self, pair_list=None):
if pair_list is None:
pair_list = []
self.list = list(pair_list)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, repr(self.list))
def __len__(self):
return len(self.list)
def __getitem__(self, i):
return self.list[i]
def __iter__(self):
return ((k, v) for k, v in self.list)
def prefixes(self):
prefixes = collections.OrderedDict()
for k, _ in self:
prefix = k.partition('.')[0]
prefixes[prefix] = None
return prefixes.keys()
def get(self, prefix):
found = Pairs()
suffixes = collections.OrderedDict()
for k, v in self:
if k == prefix:
suffix = self.NO_KEY
elif k.startswith(prefix + '.'):
suffix = k[len(prefix) + 1:]
if not suffix:
raise ValueError('empty suffix: "%s"' % k)
else:
continue
if suffix in suffixes:
raise ValueError('duplicate key: "%s"' % k)
suffixes[suffix] = None
found.add(suffix, v)
if self.NO_KEY in suffixes and len(suffixes) != 1:
suffixes.pop(self.NO_KEY)
raise ValueError('mixed keys: ["%s" "%s"]' % (
prefix, '" "'.join(['%s.%s' % (prefix, s) for s in suffixes])))
return found
def add(self, key, val):
self.list.append((key, val))
def to_obj(self):
if len(self) == 1 and self[0][0] is Pairs.NO_KEY:
val = self[0][1]
if val in {'null', 'true', 'false'} or val[0] in '"{[':
return compatible_loads(val)
elif re.match(r'\d+$', val):
return int(val, 10)
return val
pairs = Pairs()
all_nums = True
any_nums = False
for prefix in self.prefixes():
vals = self.get(prefix)
if re.match(r'\d+$', prefix):
prefix = int(prefix, 10)
any_nums = True
else:
all_nums = False
pairs.add(prefix, vals.to_obj())
if not all_nums:
if any_nums:
raise ValueError('mixed (sub)keys: ["%s"]' % '" "'.join(
str(i[0]) for i in pairs))
return collections.OrderedDict(pairs)
return [i[1] for i in sorted(pairs)]
def safe_string(v):
if isinstance(v, string_type) and re.match(
r'[A-Za-z_][-0-9A-Za-z_]*$', v) and v.lower() not in {
'null', 'true', 'yes', 'on', 'false', 'no', 'off',
'infinity', 'nan', '---', '...'} and not re.match(
r'[0-9][0-9][0-9][0-9]-', v):
return v
return json.dumps(v)
|
Apache License 2.0
|
datapane/datapane
|
src/datapane/common/df_processor.py
|
timedelta_to_str
|
python
|
def timedelta_to_str(df: pd.DataFrame):
df_td = df.select_dtypes("timedelta")
df[df_td.columns] = np.where(pd.isnull(df_td), pd.NA, df_td.astype("string"))
|
convert timedelta to str
NOTE - only until arrow.js supports Duration type
|
https://github.com/datapane/datapane/blob/859c3cc76eb8da123139ba2bba09da81983ffd75/src/datapane/common/df_processor.py#L62-L68
|
import datetime
from numbers import Number
from typing import Any
import numpy as np
import pandas as pd
from packaging.specifiers import SpecifierSet
from packaging.version import Version
PD_VERSION = Version(pd.__version__)
PD_1_3_GREATER = SpecifierSet(">=1.3.0")
PD_1_2_x = SpecifierSet("~=1.2.0")
PD_1_1_x = SpecifierSet("~=1.1.0")
def convert_axis(df: pd.DataFrame):
if df.columns.nlevels > 1:
df.columns = ["/".join(a) for a in df.columns.to_flat_index()]
df.columns = df.columns.astype("string")
if isinstance(df.index, pd.RangeIndex):
pass
elif isinstance(df.index, pd.Int64Index):
df.reset_index(inplace=True, drop=True)
else:
df.reset_index(inplace=True)
def downcast_numbers(data: pd.DataFrame):
def downcast_ints(ser: pd.Series) -> pd.Series:
try:
ser = pd.to_numeric(ser, downcast="signed")
ser = pd.to_numeric(ser, downcast="unsigned")
except Exception:
pass
return ser
df_num = data.select_dtypes("integer", exclude=["timedelta"])
data[df_num.columns] = df_num.apply(downcast_ints)
def downcast_floats(ser: pd.Series) -> pd.Series:
ser = pd.to_numeric(ser, downcast="float", errors="ignore")
return ser
|
Apache License 2.0
|
agermanidis/autosub
|
autosub/formatters.py
|
raw_formatter
|
python
|
def raw_formatter(subtitles):
return ' '.join(text for (_rng, text) in subtitles)
|
Serialize a list of subtitles as a newline-delimited string.
|
https://github.com/agermanidis/autosub/blob/d32389cb76e63ec6959111c3f989a72f36f726fe/autosub/formatters.py#L54-L58
|
from __future__ import unicode_literals
import json
import pysrt
import six
def srt_formatter(subtitles, padding_before=0, padding_after=0):
sub_rip_file = pysrt.SubRipFile()
for i, ((start, end), text) in enumerate(subtitles, start=1):
item = pysrt.SubRipItem()
item.index = i
item.text = six.text_type(text)
item.start.seconds = max(0, start - padding_before)
item.end.seconds = end + padding_after
sub_rip_file.append(item)
return '\n'.join(six.text_type(item) for item in sub_rip_file)
def vtt_formatter(subtitles, padding_before=0, padding_after=0):
text = srt_formatter(subtitles, padding_before, padding_after)
text = 'WEBVTT\n\n' + text.replace(',', '.')
return text
def json_formatter(subtitles):
subtitle_dicts = [
{
'start': start,
'end': end,
'content': text,
}
for ((start, end), text)
in subtitles
]
return json.dumps(subtitle_dicts)
|
MIT License
|
tensorflow/transform
|
tensorflow_transform/test_case.py
|
_graph_function_handler
|
python
|
def _graph_function_handler(input_signature):
def wrapper(fn):
def _run_graph(*inputs):
with context.graph_mode():
assert len(input_signature) == len(inputs)
placeholders = list(map(_make_placeholder, input_signature))
output_tensor = fn(*placeholders)
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.tables_initializer())
return sess.run(output_tensor,
feed_dict=dict(zip(placeholders, inputs)))
return _run_graph
return wrapper
|
Run the given function in graph mode, utilizing placeholders.
Args:
input_signature: A possibly nested sequence of `tf.TensorSpec` objects
specifying the shapes and dtypes of the Tensors that will be supplied to
this function.
Returns:
A wrapper function that accepts arguments specified by `input_signature`.
|
https://github.com/tensorflow/transform/blob/6349d7f6d847cb8979f31b9b315981d79ffba3e5/tensorflow_transform/test_case.py#L127-L151
|
from builtins import zip
import functools
import inspect
import itertools
import os
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import unittest
from tensorflow.python import tf2
from tensorflow.python.eager import context
main = tf.test.main
named_parameters = parameterized.named_parameters
SkipTest = unittest.SkipTest
def is_tf_api_version_1():
return hasattr(tf, 'Session')
def cross_named_parameters(*args):
def _cross_test_cases(parameters_list):
crossed_parameters = parameters_list[0].copy()
for current_parameters in parameters_list[1:]:
for name, value in current_parameters.items():
if name == 'testcase_name':
crossed_parameters[name] = '{}_{}'.format(
crossed_parameters[name], value)
else:
assert name not in crossed_parameters, name
crossed_parameters[name] = value
return crossed_parameters
return list(map(_cross_test_cases, itertools.product(*args)))
def parameters(*testcases):
def wrapper(fn):
arg_names = inspect.getargspec(fn).args
if arg_names[0] != 'self':
raise ValueError(
'First argument to test is expected to be "self", but is {}'.format(
arg_names[0]))
arg_names = arg_names[1:]
def to_arg_dict(testcase):
testcase = tuple(testcase)
if len(testcase) != len(arg_names):
raise ValueError(
'The number of arguments to parameterized test do not match the '
'number of expected arguments: {} != {}, arguments: {}, names: {}'.
format(len(testcase), len(arg_names), testcase, arg_names))
return dict(zip(arg_names, testcase))
testcases_with_names = [to_arg_dict(testcase) for testcase in testcases]
return parameterized.parameters(*testcases_with_names)(fn)
return wrapper
def cross_parameters(*args):
for p in itertools.product(*args):
yield functools.reduce(lambda x, y: x + y, p)
def _make_placeholder(tensor_spec):
if isinstance(tensor_spec, tf.SparseTensorSpec):
return tf.compat.v1.sparse_placeholder(
shape=tensor_spec.shape, dtype=tensor_spec.dtype)
if isinstance(tensor_spec, tf.RaggedTensorSpec):
return tf.compat.v1.ragged.placeholder(
tensor_spec._dtype, tensor_spec._ragged_rank, value_shape=())
else:
return tf.compat.v1.placeholder(
shape=tensor_spec.shape, dtype=tensor_spec.dtype)
|
Apache License 2.0
|
bioconda/bioconda-utils
|
bioconda_utils/cran_skeleton.py
|
main
|
python
|
def main():
setup_logger()
parser = argparse.ArgumentParser()
parser.add_argument('package', help='name of the cran package')
parser.add_argument('output_dir', help='output directory for the recipe')
parser.add_argument('--no-win', action="store_true",
help='runs the skeleton and removes windows specific information')
parser.add_argument('--force', action='store_true',
help='If a directory exists for any recipe, overwrite it')
args = parser.parse_args()
write_recipe(args.package, args.output_dir, no_windows=args.no_win,
force=args.force)
|
Adding support for arguments here
|
https://github.com/bioconda/bioconda-utils/blob/df49b2169672255d5937b181cb86fbe08f7ebaaa/bioconda_utils/cran_skeleton.py#L287-L300
|
import os
import re
from itertools import zip_longest
import argparse
import logging
from conda_build.api import skeletonize
from .utils import run, setup_logger
logger = logging.getLogger(__name__)
INVALID_NAME_MAP = {
'r-edger': 'bioconductor-edger',
}
gpl2_short = r" license_family: GPL2"
gpl2_long = r"""
license_family: GPL2
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/GPL-2' # [unix]
license_file: '{{ environ["PREFIX"] }}\\R\\share\\licenses\\GPL-2' # [win]
""".strip('\n')
gpl3_short = r" license_family: GPL3"
gpl3_long = r"""
license_family: GPL3
license_file: '{{ environ["PREFIX"] }}/lib/R/share/licenses/GPL-3' # [unix]
license_file: '{{ environ["PREFIX"] }}\\R\\share\\licenses\\GPL-3' # [win]
""".strip('\n')
win32_string = 'number: 0\n skip: true # [win32]'
def write_recipe(package, recipe_dir='.', recursive=False, force=False,
no_windows=False, **kwargs):
logger.debug('Building skeleton for %s', package)
conda_version = package.startswith('r-')
if not conda_version:
outdir = os.path.join(
recipe_dir, 'r-' + package.lower())
else:
outdir = os.path.join(
recipe_dir, package)
if os.path.exists(outdir):
if force:
logger.warning('Removing %s', outdir)
run(['rm', '-r', outdir], mask=False)
else:
logger.warning('%s exists, skipping', outdir)
return
try:
skeletonize(
package, repo='cran', output_dir=recipe_dir, version=None, recursive=recursive)
clean_skeleton_files(
package=os.path.join(recipe_dir, 'r-' + package.lower()),
no_windows=no_windows)
except NotImplementedError as e:
logger.error('%s had dependencies that specified versions: skipping.', package)
def clean_skeleton_files(package, no_windows=True):
clean_yaml_file(package, no_windows)
clean_build_file(package, no_windows)
clean_bld_file(package, no_windows)
def clean_yaml_file(package, no_windows):
path = os.path.join(package, 'meta.yaml')
with open(path, 'r') as yaml:
lines = list(yaml.readlines())
lines = filter_lines_regex(lines, r'^\s*#.*$', '')
lines = remove_empty_lines(lines)
lines = filter_lines_regex(lines, r' [+|] file LICEN[SC]E', '')
lines = filter_lines_regex(lines, r'^\s+fn:\s*.*$', '')
lines = filter_lines_regex(lines, gpl2_short, gpl2_long)
lines = filter_lines_regex(lines, gpl3_short, gpl3_long)
if no_windows:
lines = filter_lines_regex(lines, r'number: 0', win32_string)
add_maintainers(lines)
with open(path, 'w') as yaml:
out = "".join(lines)
out = out.replace('{indent}', '\n - ')
for wrong, correct in INVALID_NAME_MAP.items():
out = out.replace(wrong, correct)
yaml.write(out)
def clean_build_file(package, no_windows=False):
path = os.path.join(package, 'build.sh')
with open(path, 'r') as build:
lines = list(build.readlines())
lines = filter_lines_regex(lines, r'^mv\s.*$', '')
lines = filter_lines_regex(lines, r'^grep\s.*$', '')
lines = filter_lines_regex(lines, r'^\s*#.*$', '')
lines = remove_empty_lines(lines)
with open(path, 'w') as build:
build.write("".join(lines))
def clean_bld_file(package, no_windows):
path = os.path.join(package, 'bld.bat')
if not os.path.exists(path):
return
if no_windows:
os.unlink(path)
return
with open(path, 'r') as bld:
lines = list(bld.readlines())
lines = filter_lines_regex(lines, r'^@.*$', '')
lines = remove_empty_lines(lines)
with open(path, 'w') as bld:
bld.write("".join(lines))
def filter_lines_regex(lines, regex, substitute):
return [re.sub(regex, substitute, line) for line in lines]
def remove_empty_lines(lines):
cleaned_lines = []
for line, next_line in zip_longest(lines, lines[1:]):
if (
(line.isspace() and next_line is None) or
(line.isspace() and next_line.isspace())
):
pass
else:
cleaned_lines.append(line)
if cleaned_lines[0].isspace():
cleaned_lines = cleaned_lines[1:]
return cleaned_lines
def add_maintainers(lines):
HERE = os.path.abspath(os.path.dirname(__file__))
maintainers_yaml = os.path.join(HERE, 'maintainers.yaml')
with open(maintainers_yaml, 'r') as yaml:
extra_lines = list(yaml.readlines())
lines.extend(extra_lines)
|
MIT License
|
yyhaker/machinecomprehension
|
src/trainer/base_trainer.py
|
BaseTrainer.train
|
python
|
def train(self):
self.logger.info("start training.....")
for epoch in range(self.start_epoch, self.epochs + 1):
result = self._train_epoch(epoch)
log = {'epoch': epoch}
for key, value in result.items():
if key == 'train_metrics':
log.update({mtr.__name__: value[i] for i, mtr in enumerate(self.metrics)})
elif key == 'val_metrics':
log.update({'val_' + mtr.__name__: value[i] for i, mtr in enumerate(self.metrics)})
else:
log[key] = value
best = False
if self.monitor_mode != 'off':
try:
if (self.monitor_mode == 'min' and log[self.monitor] < self.monitor_best) or (self.monitor_mode == 'max' and log[self.monitor] > self.monitor_best):
self.monitor_best = log[self.monitor]
best = True
except KeyError:
if epoch == 1:
msg = "Warning: Can\'t recognize metric named '{}' ".format(self.monitor) + "for performance monitoring. model_best checkpoint won\'t be updated."
self.logger.warning(msg)
if best:
self._save_best_model(epoch)
self.logger.info("training is done, the best val {} is: {}".format(self.monitor, self.monitor_best))
|
Full training logic.
|
https://github.com/yyhaker/machinecomprehension/blob/d92d39f43f72df8bac29e8947d7457b19e088b43/src/trainer/base_trainer.py#L102-L142
|
import datetime
import json
import logging
import math
import os
import fnmatch
import torch
from utils import ensure_dir
from tensorboardX import SummaryWriter
class BaseTrainer(object):
def __init__(self, model, loss, metrics, optimizer, resume, config):
self.config = config
self.logger = logging.getLogger('MC')
self.device, device_ids = self._prepare_device(config['n_gpu'])
self.model = model.to(self.device)
if len(device_ids) > 1:
self.model = torch.nn.DataParallel(model, device_ids=device_ids)
self.loss = loss
self.metrics = metrics
self.optimizer = optimizer
self.epochs = config['trainer']['epochs']
self.save_freq = config['trainer']['save_freq']
self.verbosity = config['trainer']['verbosity']
self.monitor = config['trainer']['monitor']
self.monitor_mode = config['trainer']['monitor_mode']
assert self.monitor_mode in ['min', 'max', 'off']
self.monitor_best = math.inf if self.monitor_mode == 'min' else -math.inf
self.start_epoch = 1
self.log_step = config['trainer']['log_step']
self.checkpoint_dir = os.path.join(config['trainer']['save_dir'], config['arch']['type'], config["name"])
writer_dir = os.path.join(self.checkpoint_dir, config['visualization']['log_dir'])
self.writer = SummaryWriter(log_dir=writer_dir)
ensure_dir(self.checkpoint_dir)
config_save_path = os.path.join(self.checkpoint_dir, 'config.json')
with open(config_save_path, 'w') as f:
json.dump(config, f, indent=4, sort_keys=False)
if resume:
self._resume_checkpoint(resume)
def _prepare_device(self, n_gpu_use):
n_gpu = torch.cuda.device_count()
if n_gpu_use > 0 and n_gpu == 0:
self.logger.warning(
"Warning: There\'s no GPU available on this machine, training will be performed on CPU.")
n_gpu_use = 0
if n_gpu_use > n_gpu:
msg = "Warning: The number of GPU\'s configured to use is {}, but only {} are available on this machine.".format(
n_gpu_use, n_gpu)
self.logger.warning(msg)
n_gpu_use = n_gpu
device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')
list_ids = list(range(n_gpu_use))
return device, list_ids
|
MIT License
|
google/floq-client
|
floq/client/errors.py
|
ServiceError.__init__
|
python
|
def __init__(self, status_code: int, message: str) -> None:
super().__init__(f"API service error: {message}")
self.message = message
self.status_code = status_code
|
Creates ServiceError class instance.
Args:
status_code: HTTP error code.
message: API error message.
|
https://github.com/google/floq-client/blob/c784c321b801cfa74a25d92d41f31c1ae3e0ac3e/floq/client/errors.py#L42-L51
|
import uuid
class FloqError(Exception):
class SerializationError(FloqError):
def __init__(self) -> None:
super().__init__("cirq encountered a serialization error.")
class ServiceError(FloqError):
|
Apache License 2.0
|
hail-is/hail
|
hail/python/hail/expr/functions.py
|
literal
|
python
|
def literal(x: Any, dtype: Optional[Union[HailType, str]] = None):
wrapper = {'has_expr': False}
def typecheck_expr(t, x):
if isinstance(x, Expression):
wrapper['has_expr'] = True
if x.dtype != t:
raise TypeError(f"'literal': type mismatch: expected '{t}', found '{x.dtype}'")
elif x._indices.source is not None:
if x._indices.axes:
raise ExpressionException(f"'literal' can only accept scalar or global expression arguments,"
f" found indices {x._indices.axes}")
return False
elif x is None:
return False
else:
t._typecheck_one_level(x)
return True
if dtype is None:
dtype = impute_type(x)
if isinstance(x, np.generic):
x = x.item()
elif isinstance(x, np.ndarray):
pass
else:
try:
dtype._traverse(x, typecheck_expr)
except TypeError as e:
raise TypeError("'literal': object did not match the passed type '{}'"
.format(dtype)) from e
if wrapper['has_expr']:
return literal(hl.eval(to_expr(x, dtype)), dtype)
if x is None:
return hl.missing(dtype)
elif is_primitive(dtype):
if dtype == tint32:
assert isinstance(x, builtins.int)
assert tint32.min_value <= x <= tint32.max_value
return construct_expr(ir.I32(x), tint32)
elif dtype == tint64:
assert isinstance(x, builtins.int)
assert tint64.min_value <= x <= tint64.max_value
return construct_expr(ir.I64(x), tint64)
elif dtype == tfloat32:
assert isinstance(x, (builtins.float, builtins.int))
return construct_expr(ir.F32(x), tfloat32)
elif dtype == tfloat64:
assert isinstance(x, (builtins.float, builtins.int))
return construct_expr(ir.F64(x), tfloat64)
elif dtype == tbool:
assert isinstance(x, builtins.bool)
return construct_expr(ir.TrueIR() if x else ir.FalseIR(), tbool)
else:
assert dtype == tstr
assert isinstance(x, builtins.str)
return construct_expr(ir.Str(x), tstr)
else:
return construct_expr(ir.Literal(dtype, x), dtype)
|
Captures and broadcasts a Python variable or object as an expression.
Examples
--------
>>> table = hl.utils.range_table(8)
>>> greetings = hl.literal({1: 'Good morning', 4: 'Good afternoon', 6 : 'Good evening'})
>>> table.annotate(greeting = greetings.get(table.idx)).show()
+-------+------------------+
| idx | greeting |
+-------+------------------+
| int32 | str |
+-------+------------------+
| 0 | NA |
| 1 | "Good morning" |
| 2 | NA |
| 3 | NA |
| 4 | "Good afternoon" |
| 5 | NA |
| 6 | "Good evening" |
| 7 | NA |
+-------+------------------+
Notes
-----
Use this function to capture large Python objects for use in expressions. This
function provides an alternative to adding an object as a global annotation on a
:class:`.Table` or :class:`.MatrixTable`.
Parameters
----------
x
Object to capture and broadcast as an expression.
Returns
-------
:class:`.Expression`
|
https://github.com/hail-is/hail/blob/6d6c87149d62d6ad2c148d9c8e56d1a2626079df/hail/python/hail/expr/functions.py#L177-L276
|
import operator
import builtins
import functools
from typing import Union, Optional, Any, Callable, Iterable, TypeVar
from deprecated import deprecated
import hail
import hail as hl
from hail.expr.expressions import (Expression, ArrayExpression, SetExpression,
Int32Expression, Int64Expression, Float32Expression, Float64Expression,
DictExpression, StructExpression, LocusExpression, StringExpression,
IntervalExpression, ArrayNumericExpression, BooleanExpression,
CallExpression, TupleExpression, ExpressionException, NumericExpression,
unify_all, construct_expr, to_expr, unify_exprs, impute_type,
construct_variable, apply_expr, coercer_from_dtype, unify_types_limited,
expr_array, expr_any, expr_struct, expr_int32, expr_int64, expr_float32,
expr_float64, expr_oneof, expr_bool, expr_tuple, expr_dict, expr_str,
expr_set, expr_call, expr_locus, expr_interval, expr_ndarray, expr_numeric,
cast_expr)
from hail.expr.types import (HailType, hail_type, tint32, tint64, tfloat32,
tfloat64, tstr, tbool, tarray, tset, tdict,
tstruct, tlocus, tinterval, tcall, ttuple,
tndarray, is_primitive, is_numeric)
from hail.genetics.reference_genome import reference_genome_type, ReferenceGenome
import hail.ir as ir
from hail.typecheck import (typecheck, nullable, anytype, enumeration, tupleof,
func_spec, oneof, arg_check, args_check, anyfunc)
from hail.utils.java import Env, warning
from hail.utils.misc import plural
import numpy as np
Coll_T = TypeVar('Collection_T', ArrayExpression, SetExpression)
Num_T = TypeVar('Numeric_T', Int32Expression, Int64Expression, Float32Expression, Float64Expression)
def _func(name, ret_type, *args, type_args=()):
indices, aggregations = unify_all(*args)
return construct_expr(ir.Apply(name, ret_type, *(a._ir for a in args), type_args=type_args), ret_type, indices, aggregations)
def _seeded_func(name, ret_type, seed, *args):
seed = seed if seed is not None else Env.next_seed()
indices, aggregations = unify_all(*args)
return construct_expr(ir.ApplySeeded(name, seed, ret_type, *(a._ir for a in args)), ret_type, indices, aggregations)
@typecheck(a=expr_array(), x=expr_any)
def _lower_bound(a, x):
if a.dtype.element_type != x.dtype:
raise TypeError(f"_lower_bound: incompatible types: {a.dtype}, {x.dtype}")
indices, aggregations = unify_all(a, x)
return construct_expr(ir.LowerBoundOnOrderedCollection(a._ir, x._ir, on_key=False), tint32, indices, aggregations)
@typecheck(cdf=expr_struct(), q=expr_oneof(expr_float32, expr_float64))
def _quantile_from_cdf(cdf, q):
def compute(cdf):
n = cdf.ranks[cdf.ranks.length() - 1]
pos = hl.int64(q * n) + 1
idx = hl.max(0, hl.min(cdf.values.length() - 1, _lower_bound(cdf.ranks, pos) - 1))
res = hl.if_else(n == 0,
hl.missing(cdf.values.dtype.element_type),
cdf.values[idx])
return res
return hl.rbind(cdf, compute)
@typecheck(cdf=expr_struct(), failure_prob=expr_oneof(expr_float32, expr_float64), all_quantiles=bool)
def _error_from_cdf(cdf, failure_prob, all_quantiles=False):
def compute_sum(cdf):
s = hl.sum(hl.range(0, hl.len(cdf._compaction_counts)).map(lambda i: cdf._compaction_counts[i] * (2 ** (2 * i))))
return s / (cdf.ranks[-1] ** 2)
def update_grid_size(p, s):
return 4 * hl.sqrt(hl.log(2 * p / failure_prob) / (2 * s))
def compute_grid_size(s):
return hl.fold(lambda p, i: update_grid_size(p, s), 1 / failure_prob, hl.range(0, 5))
def compute_single_error(s, failure_prob=failure_prob):
return hl.sqrt(hl.log(2 / failure_prob) * s / 2)
def compute_global_error(s):
return hl.rbind(compute_grid_size(s), lambda p: 1 / p + compute_single_error(s, failure_prob / p))
if all_quantiles:
return hl.rbind(cdf, lambda cdf: hl.rbind(compute_sum(cdf), compute_global_error))
else:
return hl.rbind(cdf, lambda cdf: hl.rbind(compute_sum(cdf), compute_single_error))
@typecheck(t=hail_type)
def missing(t: Union[HailType, str]):
return construct_expr(ir.NA(t), t)
@deprecated(version="0.2.62", reason="Replaced by hl.missing")
@typecheck(t=hail_type)
def null(t: Union[HailType, str]):
return missing(t)
@typecheck(x=anytype, dtype=nullable(hail_type))
|
MIT License
|
morepath/dectate
|
dectate/config.py
|
Configurable.get_action_group
|
python
|
def get_action_group(self, action_class):
return self._action_groups.get(action_class, None)
|
Return ActionGroup for ``action_class`` or ``None`` if not found.
:param action_class: the action class to find the action group of.
:return: an ``ActionGroup`` instance.
|
https://github.com/morepath/dectate/blob/511920acbeba5b070c532c9b0fa54b53c2baeb0a/dectate/config.py#L189-L195
|
import abc
import logging
import sys
import inspect
from .error import (
ConflictError,
ConfigError,
DirectiveError,
DirectiveReportError,
)
from .toposort import topological_sort
from .sentinel import NOT_FOUND
order_count = 0
class Configurable:
app_class = None
def __init__(self, extends, config):
self.extends = extends
self.config = config
self._action_classes = {}
self._directives = []
self.committed = False
def register_directive(self, directive, obj):
self._directives.append((directive, obj))
def _fixup_directive_names(self):
app_class = self.app_class
for name, method in app_class.get_directive_methods():
func = method.__func__
func.__name__ = name
if hasattr(func, "__qualname__"):
func.__qualname__ = type(app_class).__name__ + "." + name
def get_action_classes(self):
result = {}
app_class = self.app_class
for name, method in app_class.get_directive_methods():
result[method.__func__.action_factory] = name
for configurable in self.extends:
for action_class, name in configurable._action_classes.items():
if action_class not in result:
result[action_class] = name
return result
def setup(self):
self._fixup_directive_names()
self._action_classes = self.get_action_classes()
grouped_action_classes = sort_action_classes(
group_action_classes(self._action_classes.keys())
)
for action_class in grouped_action_classes:
self.delete_config(action_class)
self._action_groups = d = {}
self._factories_seen = {}
for action_class in grouped_action_classes:
self.setup_config(action_class)
d[action_class] = ActionGroup(
action_class, self.action_extends(action_class)
)
def setup_config(self, action_class):
items = topological_sort(action_class.config.items(), factory_key)
seen = self._factories_seen
config = self.config
for name, factory in items:
configured = getattr(config, name, None)
if configured is not None:
if seen[name] is not factory:
raise ConfigError(
"Inconsistent factories for config %r (%r and %r)"
% ((name, seen[name], factory))
)
continue
seen[name] = factory
kw = get_factory_arguments(
action_class, config, factory, self.app_class
)
setattr(config, name, factory(**kw))
def delete_config(self, action_class):
config = self.config
for name, factory in action_class.config.items():
if hasattr(config, name):
delattr(config, name)
factory_arguments = getattr(factory, "factory_arguments", None)
if factory_arguments is None:
continue
for name in factory_arguments.keys():
if hasattr(config, name):
delattr(config, name)
def group_actions(self):
actions = [
(directive.action(), obj) for (directive, obj) in self._directives
]
d = self._action_groups
for action, obj in expand_actions(actions):
action_class = action.group_class
if action_class is None:
action_class = action.__class__
d[action_class].add(action, obj)
|
BSD 3-Clause New or Revised License
|
testsmt/yinyang
|
yinyang/src/mutators/SemanticFusion/VariableFusion.py
|
get_first_assert_idx
|
python
|
def get_first_assert_idx(template):
for first_ass_idx, cmd in enumerate(template.commands):
if isinstance(cmd, Assert):
return first_ass_idx
return -1
|
Find first assert's idx.
|
https://github.com/testsmt/yinyang/blob/e3fca4a6a3c5a2662cbf24aaaeb0a277be6e57eb/yinyang/src/mutators/SemanticFusion/VariableFusion.py#L40-L47
|
import random
import copy
import string
from yinyang.src.parsing.Ast import (
Const, Var, Expr, Assert, DeclareConst, DeclareFun, Script
)
def gen_random_string(length):
letters = string.ascii_lowercase
return "".join(random.choice(letters) for i in range(length))
|
MIT License
|
dawsonjon/chips-2.0
|
chips/api/api.py
|
Chip.generate_verilog
|
python
|
def generate_verilog(self):
for component in self.components.values():
component.generate_verilog()
for i in self.wires:
if i.source is None:
raise C2CHIPError(
"wire %s has no source" % i.name, i.filename, i.lineno)
if i.sink is None:
raise C2CHIPError(
"wire %s has no sink" % i.name, i.filename, i.lineno)
for i in self.inputs.values():
if i.sink is None:
raise C2CHIPError(
"input %s has no sink" % i.name, i.filename, i.lineno)
for i in self.outputs.values():
if i.source is None:
raise C2CHIPError(
"output %s has no source" % i.name, i.filename, i.lineno)
ports = ["clk", "rst", "exception"]
ports += ["%s" % i.name for i in self.inputs.values()]
ports += ["%s_stb" % i.name for i in self.inputs.values()]
ports += ["%s_ack" % i.name for i in self.inputs.values()]
ports += ["%s" % i.name for i in self.outputs.values()]
ports += ["%s_stb" % i.name for i in self.outputs.values()]
ports += ["%s_ack" % i.name for i in self.outputs.values()]
ports = ", ".join(ports)
output_file = open(self.name + ".v", "w")
output_file.write("module %s(%s);\n" % (self.name, ports))
output_file.write(" input clk;\n")
output_file.write(" input rst;\n")
output_file.write(" output exception;\n")
for i in self.inputs.values():
output_file.write(" input [31:0] %s;\n" % i.name)
output_file.write(" input %s_stb;\n" % i.name)
output_file.write(" output %s_ack;\n" % i.name)
for i in self.outputs.values():
output_file.write(" output [31:0] %s;\n" % i.name)
output_file.write(" output %s_stb;\n" % i.name)
output_file.write(" input %s_ack;\n" % i.name)
for i in self.wires:
output_file.write(" wire [31:0] %s;\n" % i.name)
output_file.write(" wire %s_stb;\n" % i.name)
output_file.write(" wire %s_ack;\n" % i.name)
for instance in self.instances:
output_file.write(" wire exception_%s;\n" % (id(instance)))
for instance in self.instances:
component = instance.component_name
output_file.write(
" %s %s_%s(\n " % (component, component, id(instance)))
ports = []
ports.append(".clk(clk)")
ports.append(".rst(rst)")
ports.append(".exception(exception_%s)" % id(instance))
for name, i in instance.inputs.iteritems():
ports.append(".input_%s(%s)" % (name, i.name))
ports.append(".input_%s_stb(%s_stb)" % (name, i.name))
ports.append(".input_%s_ack(%s_ack)" % (name, i.name))
for name, i in instance.outputs.iteritems():
ports.append(".output_%s(%s)" % (name, i.name))
ports.append(".output_%s_stb(%s_stb)" % (name, i.name))
ports.append(".output_%s_ack(%s_ack)" % (name, i.name))
output_file.write(",\n ".join(ports))
output_file.write(");\n")
output_file.write(" assign exception = %s;\n" % (
" || ".join(["exception_" + str(id(i)) for i in self.instances])
))
output_file.write("endmodule\n")
output_file.close()
|
Synopsis:
.. code-block:: python
chip.generate_verilog(name)
Description:
Generate synthesisable Verilog output.
Arguments:
None
Returns:
None
|
https://github.com/dawsonjon/chips-2.0/blob/57a986b8df36248bb4736bd84e3e68046b8665af/chips/api/api.py#L291-L385
|
import os
import itertools
import tempfile
import shutil
import inspect
import textwrap
import subprocess
from chips.compiler.exceptions import C2CHIPError
from chips.compiler.python_model import StopSim
from chips_c import bits_to_float, float_to_bits, bits_to_double, double_to_bits, join_words, high_word, low_word
import chips.compiler.compiler
class Chip:
def __init__(self, name):
self.name = name
self.instances = []
self.wires = []
self.inputs = {}
self.outputs = {}
self.components = {}
self.sn = 0
_, self.filename, self.lineno, _, _, _ = inspect.stack()[1]
|
MIT License
|
satish1901/methane-detection-from-hyperspectral-imagery
|
ensemble_detectors/src/Algorithm_1_matchfilter/spectral_lib/spectral/io/bsqfile.py
|
BsqFile.read_band
|
python
|
def read_band(self, band, use_memmap=True):
from array import array
if self._memmap is not None and use_memmap is True:
data = np.array(self._memmap[band, :, :])
if self.scale_factor != 1:
data = data / float(self.scale_factor)
return data
vals = array(byte_typecode)
offset = self.offset + band * self.sample_size * self.nrows * self.ncols
f = self.fid
f.seek(offset, 0)
vals.fromfile(f, self.nrows * self.ncols * self.sample_size)
arr = np.fromstring(vals.tostring(), dtype=self.dtype)
arr = arr.reshape(self.nrows, self.ncols)
if self.scale_factor != 1:
return arr / float(self.scale_factor)
return arr
|
Reads a single band from the image.
Arguments:
`band` (int):
Index of band to read.
`use_memmap` (bool, default True):
Specifies whether the file's memmap interface should be used
to read the data. Setting this arg to True only has an effect
if a memmap is being used (i.e., if `img.using_memmap` is True).
Returns:
:class:`numpy.ndarray`
An `MxN` array of values for the specified band.
|
https://github.com/satish1901/methane-detection-from-hyperspectral-imagery/blob/741dee02e76931f572cf3e06af8faabe871e8e4a/ensemble_detectors/src/Algorithm_1_matchfilter/spectral_lib/spectral/io/bsqfile.py#L73-L117
|
from __future__ import division, print_function, unicode_literals
import numpy as np
from .spyfile import SpyFile, MemmapFile
from spectral.utilities.python23 import typecode
byte_typecode = typecode('b')
class BsqFile(SpyFile, MemmapFile):
def __init__(self, params, metadata=None):
import spectral
self.interleave = spectral.BSQ
if metadata is None:
metadata = {}
SpyFile.__init__(self, params, metadata)
self._memmap = self._open_memmap('r')
def _open_memmap(self, mode):
import os
import sys
if (os.path.getsize(self.filename) < sys.maxsize):
try:
(R, C, B) = self.shape
return np.memmap(self.filename, dtype=self.dtype, mode=mode,
offset=self.offset, shape=(B, R, C))
except:
print('Unable to create memmap interface.')
return None
else:
return None
|
MIT License
|
asteroid-team/torch-audiomentations
|
torch_audiomentations/utils/file.py
|
find_audio_files
|
python
|
def find_audio_files(
root_path,
filename_endings=SUPPORTED_EXTENSIONS,
traverse_subdirectories=True,
follow_symlinks=True,
):
file_paths = []
for root, dirs, filenames in os.walk(root_path, followlinks=follow_symlinks):
filenames = sorted(filenames)
for filename in filenames:
input_path = os.path.abspath(root)
file_path = os.path.join(input_path, filename)
if filename.lower().endswith(filename_endings):
file_paths.append(Path(file_path))
if not traverse_subdirectories:
break
return file_paths
|
Return a list of paths to all audio files with the given extension(s) in a directory.
Also traverses subdirectories by default.
|
https://github.com/asteroid-team/torch-audiomentations/blob/1152f1e7734fa17cfd8cd1a05bc5e596859e8813/torch_audiomentations/utils/file.py#L11-L34
|
import os
from pathlib import Path
import soundfile
from .dsp import resample_audio
SUPPORTED_EXTENSIONS = (".wav",)
|
MIT License
|
hubblestack/hubble
|
hubblestack/utils/stdrec.py
|
get_fqdn
|
python
|
def get_fqdn():
minion_id = __opts__['id']
fqdn = __grains__['fqdn']
fqdn = fqdn if fqdn else minion_id
bad_fqdns = ['localhost', 'localhost.localdomain', 'localhost6.localdomain6']
if fqdn in bad_fqdns:
new_fqdn = socket.gethostname()
if '.' not in new_fqdn or new_fqdn in bad_fqdns:
new_fqdn = get_fqdn_ip4()
fqdn = new_fqdn
return fqdn
|
Do lots of error checking and get as close to a useable fqdn as possible
|
https://github.com/hubblestack/hubble/blob/804eb29321f16ae36ce1e4a53f5c3e7bbcda1d0a/hubblestack/utils/stdrec.py#L31-L47
|
import socket
def std_info():
minion_id = __opts__['id']
local_fqdn = __grains__.get('local_fqdn', __grains__['fqdn'])
ret = {
'minion_id': minion_id,
'dest_host': get_fqdn(),
'dest_ip': get_fqdn_ip4(),
'dest_fqdn': local_fqdn,
'system_uuid': __grains__.get('system_uuid')
}
ret.update(__grains__.get('cloud_details', {}))
return ret
|
Apache License 2.0
|
lightstep/lightstep-tracer-python
|
lightstep/crouton/ReportingService.py
|
Iface.Report
|
python
|
def Report(self, auth, request):
pass
|
Parameters:
- auth
- request
|
https://github.com/lightstep/lightstep-tracer-python/blob/d9ff74a2ab7eefa3e0d1f7d53c31cd32ea28b687/lightstep/crouton/ReportingService.py#L23-L30
|
from __future__ import absolute_import
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
all_structs = []
class Iface(object):
|
MIT License
|
heroku/python-salesforce-client
|
salesforce/metadata/v30/__init__.py
|
SalesforceMetadataClient.custom_object
|
python
|
def custom_object(self, object_name, label, plural_label, name_field,
name_field_label, name_field_type='Text',
deployment_status='Deployed', sharing_model='ReadWrite'):
object_name = self._ensure_custom_name(object_name)
name_field = self._ensure_custom_name(name_field)
custom_object = self.client.factory.create('CustomObject')
custom_object.fullName = object_name
custom_object.label = label
custom_object.pluralLabel = plural_label
custom_object.deploymentStatus = deployment_status
custom_object.sharingModel = sharing_model
custom_object.nameField.fullName = name_field
custom_object.nameField.type = name_field_type
custom_object.nameField.label = name_field_label
return custom_object
|
Generate a salesforce custom object.
This method populates all of the required fields for custom objects and
returns a SOAP object which can be passed to CRUD methods.
|
https://github.com/heroku/python-salesforce-client/blob/fd14fcd9e51506d7651707ae02adc85b56bec421/salesforce/metadata/v30/__init__.py#L24-L43
|
from __future__ import absolute_import, unicode_literals
import logging
import os
from ...soap.base import SalesforceSoapClientBase
from ...soap.exceptions import SalesforceSoapException
logger = logging.getLogger(__name__)
class SalesforceMetadataClient(SalesforceSoapClientBase):
version = '30.0'
wsdl_path = os.path.join(os.path.dirname(__file__), 'metadata.wsdl')
def _ensure_custom_name(self, name):
if not name.endswith('__c'):
name = '{0}__c'.format(name)
return name
|
MIT License
|
stanvanrooy/instauto
|
instauto/helpers/post.py
|
update_caption
|
python
|
def update_caption(client: ApiClient, media_id: str, new_caption: str) -> bool:
caption = ps.UpdateCaption(
media_id=media_id,
caption_text=new_caption
)
resp = client.post_update_caption(caption)
logger.info(f"Updated caption of post {media_id} to {new_caption}")
return is_resp_ok(resp)
|
Update the caption of a post.
Args:
client: your `ApiClient`
media_id: the media_id of a post
new_caption: the new caption
Returns:
`True` if success else `False`
|
https://github.com/stanvanrooy/instauto/blob/a4ec54997480f4ba28236774770ce2f9fd1156a1/instauto/helpers/post.py#L58-L75
|
from typing import List, Optional
from instauto.api.client import ApiClient
from instauto.api.actions import post as ps
from instauto.api.actions.structs.post import RetrieveCommenters, RetrieveLikers
from instauto.api.exceptions import NotFoundError
from instauto.helpers.common import is_resp_ok
from instauto.helpers.search import get_user_id_from_username
from instauto.helpers import models
import logging
logger = logging.getLogger(__name__)
def upload_image_to_feed(
client: ApiClient, image_path: str,
caption: Optional[str] = None, location: Optional[ps.Location] = None
) -> bool:
post = ps.PostFeed(
path=image_path,
caption=caption or '',
location=location,
)
resp = client.post_post(post, 80)
logger.info(f"Uploaded image to feed")
return is_resp_ok(resp)
def upload_image_to_story(client: ApiClient, image_path: str) -> bool:
post = ps.PostStory(
path=image_path
)
resp = client.post_post(post)
logger.info(f"Uploaded image to story")
return is_resp_ok(resp)
|
MIT License
|
onseigmbh/flask-dialogflow
|
flask_dialogflow/conversation.py
|
V2DialogflowConversation.to_webhook_response
|
python
|
def to_webhook_response(self) -> _df.WebhookResponse:
self._webhook_response.output_contexts = self._contexts.as_list()
for integration, integration_conv in self._integration_convs.items():
self._webhook_response.payload[integration] = integration_conv.to_webhook_response_payload()
return self._webhook_response
|
Render the :class:`.WebhookResponse` for this conversation.
This is the last step during conversation handling and is usually done
automatically by the framework. Modifying the conversation after the
response has been rendered may lead to unexpected results.
Returns:
A complete Dialogflow WebhookResponse that can be serialized to
JSON.
|
https://github.com/onseigmbh/flask-dialogflow/blob/ba3a62e5a128c66d59e7ec189dce82fabbf4eb43/flask_dialogflow/conversation.py#L376-L391
|
from collections import defaultdict
from typing import Optional, Mapping, Any, DefaultDict, List
from flask_dialogflow.context import ContextManager, SessionContext, Context
from flask_dialogflow.google_apis import import_dialogflow_api
from flask_dialogflow.integrations import (
AbstractIntegrationConversation, GenericIntegrationConversation
)
from flask_dialogflow.integrations.actions_on_google import (
V2ActionsOnGoogleDialogflowConversation
)
class V2DialogflowConversation:
_df = import_dialogflow_api('v2')
def __init__(
self,
webhook_request: Optional[_df.WebhookRequest] = None,
context_manager: Optional['ContextManager'] = None,
integration_convs: Optional[
Mapping[str, AbstractIntegrationConversation]
] = None,
) -> None:
if webhook_request is None:
odir = self._df.OriginalDetectIntentRequest()
webhook_request = self._df.WebhookRequest(
query_result=self._df.QueryResult(intent=self._df.Intent()),
original_detect_intent_request=odir,
)
self._webhook_request = webhook_request
self._contexts = context_manager or ContextManager(
contexts=[
Context('_session_context', parameters=SessionContext())
]
)
self._integration_convs: DefaultDict[
str, AbstractIntegrationConversation
] = defaultdict(GenericIntegrationConversation)
if integration_convs:
self._integration_convs.update(integration_convs)
self._session_ctx = self.contexts.get('_session_context').parameters
self._webhook_response = self._df.WebhookResponse()
if self.webhook_request.query_result.intent.is_fallback:
self._session_ctx.fallback_level += 1
else:
self._session_ctx.fallback_level = 0
@property
def webhook_request(self) -> _df.WebhookRequest:
return self._webhook_request
@property
def session(self) -> Optional[str]:
return self.webhook_request.session
@property
def response_id(self) -> Optional[str]:
return self.webhook_request.response_id
@property
def query_text(self) -> Optional[str]:
return self.webhook_request.query_result.query_text
@property
def language_code(self) -> Optional[str]:
return self.webhook_request.query_result.language_code
@property
def intent(self) -> str:
return self.webhook_request.query_result.intent.display_name
@property
def action(self) -> Optional[str]:
return self.webhook_request.query_result.action
@property
def contexts(self) -> 'ContextManager':
return self._contexts
@property
def parameters(self) -> Mapping[str, Any]:
return self.webhook_request.query_result.parameters
@property
def all_required_params_present(self) -> Optional[bool]:
return self.webhook_request.query_result.all_required_params_present
@property
def fallback_level(self) -> int:
return self._session_ctx.fallback_level
@property
def diagnostic_info(self) -> Mapping[str, Any]:
return self.webhook_request.query_result.diagnostic_info
@property
def intent_detection_confidence(self) -> Optional[float]:
return self.webhook_request.query_result.intent_detection_confidence
@property
def speech_recognition_confidence(self) -> Optional[float]:
return self.webhook_request.query_result.speech_recognition_confidence
@property
def sentiment(self) -> Optional[_df.Sentiment]:
res = self.webhook_request.query_result.sentiment_analysis_result
return res.query_text_sentiment if res else None
@property
def source(self) -> str:
return self.webhook_request.original_detect_intent_request.source
@property
def version(self) -> Optional[str]:
return self.webhook_request.original_detect_intent_request.version
@property
def payload(self) -> Mapping[str, Any]:
return self.webhook_request.original_detect_intent_request.payload
@property
def integrations(
self
) -> DefaultDict[str, 'AbstractIntegrationConversation']:
return self._integration_convs
def ask(self, *texts: str) -> None:
self._add_fulfillment_message(text=self._df.Text(list(texts)))
def show_quick_replies(
self, *quick_replies: str, title: Optional[str] = None
) -> None:
self._add_fulfillment_message(
quick_replies=self._df.QuickReplies(
title=title, quick_replies=list(quick_replies)
)
)
def show_card(self, card: _df.Card) -> None:
self._add_fulfillment_message(card=card)
def show_image(self, image: _df.Image) -> None:
self._add_fulfillment_message(image=image)
def _add_fulfillment_message(self, **name_and_message):
self._webhook_response.fulfillment_messages.append(
self._df.Message(**name_and_message)
)
@property
def google(self) -> V2ActionsOnGoogleDialogflowConversation:
return self._integration_convs['google']
@property
def facebook(self) -> GenericIntegrationConversation:
return self._integration_convs['facebook']
@property
def slack(self) -> GenericIntegrationConversation:
return self._integration_convs['slack']
@property
def telegram(self) -> GenericIntegrationConversation:
return self._integration_convs['telegram']
@property
def kik(self) -> GenericIntegrationConversation:
return self._integration_convs['kik']
@property
def skype(self) -> GenericIntegrationConversation:
return self._integration_convs['skype']
@property
def twilio(self) -> GenericIntegrationConversation:
return self._integration_convs['twilio']
@property
def twilio_ip(self) -> GenericIntegrationConversation:
return self._integration_convs['twilio-ip']
@property
def line(self) -> GenericIntegrationConversation:
return self._integration_convs['line']
@property
def spark(self) -> GenericIntegrationConversation:
return self._integration_convs['spark']
@property
def tropo(self) -> GenericIntegrationConversation:
return self._integration_convs['tropo']
@property
def viber(self) -> GenericIntegrationConversation:
return self._integration_convs['viber']
def __getattr__(self, item) -> AbstractIntegrationConversation:
return self._integration_convs[item]
|
Apache License 2.0
|
irskep/clubsandwich
|
clubsandwich/ui/view.py
|
View.draw
|
python
|
def draw(self, ctx):
if self.clear:
ctx.clear_area(self.bounds)
|
:param BearLibTerminalContext ctx:
Draw this view. *ctx* is a full copy of the BearLibTerminal API moved into
this view's frame of reference, so you can use (0, 0) as the upper left
corner.
This method will not be called if :py:attr:`View.is_hidden` is ``True``.
|
https://github.com/irskep/clubsandwich/blob/bd4aa3cf76bd4a7c8ced3da651e5ccda03944bc3/clubsandwich/ui/view.py#L145-L156
|
import weakref
from collections import namedtuple
from numbers import Real
from clubsandwich.geom import Point, Rect, Size
from clubsandwich.blt.context import BearLibTerminalContext
from .layout_options import LayoutOptions
ZERO_RECT = Rect(Point(0, 0), Size(0, 0))
def _option_field_to_id(val):
if val == 'frame':
value_start = 'frame'
elif isinstance(val, Real):
value_start = 'fraction'
else:
value_start = 'derive'
class View:
def __init__(self, frame=None, subviews=None, scene=None, layout_options=None, clear=False):
if isinstance(layout_options, dict):
opts = LayoutOptions()._asdict()
opts.update(layout_options)
layout_options = LayoutOptions(**opts)
self.clear = clear
self._scene = scene
self._superview_weakref = lambda: None
self.needs_layout = True
self._frame = frame or ZERO_RECT
self._bounds = self.frame.with_origin(Point(0, 0))
self.subviews = []
self.add_subviews(subviews or [])
self.is_first_responder = False
self.is_hidden = False
self.layout_spec = frame
self.layout_options = layout_options or LayoutOptions()
@property
def scene(self):
if self._scene:
return self._scene
else:
return self.superview.scene
@property
def superview(self):
try:
return self._superview_weakref()
except AttributeError:
return None
@superview.setter
def superview(self, new_value):
if new_value:
self._superview_weakref = weakref.ref(new_value)
else:
self._superview_weakref = lambda: None
def set_needs_layout(self, val=True):
self.needs_layout = val
def add_subviews(self, subviews):
for v in subviews:
v.superview = self
self.subviews.extend(subviews)
def remove_subviews(self, subviews):
for v in subviews:
v.superview = None
self.subviews = [v for v in self.subviews if v not in subviews]
def add_subview(self, subview):
self.add_subviews([subview])
def remove_subview(self, subview):
self.remove_subviews([subview])
def perform_draw(self, ctx):
if self.is_hidden:
return
self.draw(ctx)
for view in self.subviews:
with ctx.translate(view.frame.origin):
view.perform_draw(ctx)
|
MIT License
|
ww-tech/primrose
|
primrose/templates/awesome_model.py
|
AwesomeModel.train_model
|
python
|
def train_model(self, data_object):
print("I am training my model.")
try:
training_data = data_object.get_upstream_data(self.instance_name)
except:
print("No upstream data exists")
return data_object
|
Code to train your model and return a data_object after adding any training or model info
|
https://github.com/ww-tech/primrose/blob/ab3733dea316e3bea3659493587f97955cf6d983/primrose/templates/awesome_model.py#L36-L52
|
from primrose.base.model import AbstractModel
class AwesomeModel(AbstractModel):
@staticmethod
def necessary_config(node_config):
return AbstractModel.necessary_config(node_config)
|
Apache License 2.0
|
tellapart/taba
|
src/taba/server/model/client_storage.py
|
ClientStorageManager.GetAllClients
|
python
|
def GetAllClients(self):
op = util.StrictOp('retrieving all Client Names',
self.index_manager.GetAllValues)
return op.response_value
|
Retrieve all known Client Names.
Returns:
List of Client Names.
|
https://github.com/tellapart/taba/blob/0254e76348d247ab957ff547df9662a69cab4c9c/src/taba/server/model/client_storage.py#L77-L85
|
import logging
from taba.server.storage import util
from taba.server.storage.double_index_storage import DoubleIndexStorageManager
KEY_PREFIX = 'C'
CACHE_TTL_SEC = 3600
LOG = logging.getLogger(__name__)
class ClientStorageManager(object):
def __init__(self, engine):
self.index_manager = DoubleIndexStorageManager(
engine=engine,
key_prefix=KEY_PREFIX,
cache_ttl=CACHE_TTL_SEC)
def GetCids(self, clients, create_if_new=False):
op = util.StrictOp('retrieving CIDs for Client Names',
self.index_manager.GetIdsForValues,
clients, create_if_new)
return op.response_value
def GetClients(self, cids):
op = util.StrictOp('retrieving Client Names for CIDs',
self.index_manager.GetValuesForIds,
cids)
return op.response_value
|
Apache License 2.0
|
rustychris/stompy
|
stompy/spatial/interpXYZ.py
|
Inputs.loadnc
|
python
|
def loadnc(self):
nc = Dataset(self.infile, 'r')
if self.convert2utm:
try:
xvar = 'lon'
yvar = 'lat'
self.xgrd = nc.variables[xvar][:]
except:
xvar = 'longitude'
yvar = 'latitude'
self.xgrd = nc.variables[xvar][:]
else:
xvar = 'x'
yvar = 'y'
try:
self.xgrd = nc.variables[xvar][:]
self.ygrd = nc.variables[yvar][:]
self.Zin = nc.variables['topo'][:]
except:
self.xgrd = nc.variables[xvar][:]
self.ygrd = nc.variables[yvar][:]
self.Zin = nc.variables['z'][:]
nc.close()
|
Load the DEM data from a netcdf file
|
https://github.com/rustychris/stompy/blob/ef04d8b3ee9c9af827c87c72c7b50d365e5e567d/stompy/spatial/interpXYZ.py#L364-L392
|
import gzip
from scipy import spatial
import numpy as np
from netCDF4 import Dataset
from scipy.interpolate import LinearNDInterpolator,interp1d
import time
import matplotlib.pyplot as plt
from .. import memoize
class interpXYZ(object):
""
method = 'nn'
maxdist=np.inf
NNear = 3
p = 1.0
varmodel = 'spherical'
nugget = 0.1
sill = 0.8
vrange = 250.0
fill_value=0.
clip=False
def __init__(self,XY,XYout,**kwargs):
self.__dict__.update(kwargs)
self.bbox = [XYout[:,0].min(),XYout[:,0].max(),XYout[:,1].min(),XYout[:,1].max()]
if self.clip:
print('Clipping points outside of range')
self.XY = self.clipPoints(XY)
else:
self.XY = XY
self.XYout = XYout
if self.method=='nn':
self._nearestNeighbour()
elif self.method=='idw':
self._invdistweight()
elif self.method=='kriging':
self._krig()
elif self.method=='linear':
self._linear()
else:
print('Error - Unknown interpolation type: %s.'%self.method)
def __call__(self,Zin):
if self.clip:
self.Zin = Zin[self.clipindex]
else:
self.Zin = Zin
if self.method in ['nn','idw','kriging']:
self.Z = self.Finterp(Zin)
elif self.method=='linear':
self.Finterp.values[:]=Zin[:,np.newaxis]
self.Z=self.Finterp(self.XYout)
else:
print('Error - Unknown interpolation type: %s.'%self.method)
return self.Z
def _nearestNeighbour(self):
self.Finterp = nn(self.XY,self.XYout,maxdist=self.maxdist)
def _invdistweight(self):
self.Finterp=idw(self.XY,self.XYout,maxdist=self.maxdist,NNear=self.NNear,p=self.p)
def _krig(self):
self.Finterp = kriging(self.XY,self.XYout,maxdist=self.maxdist,NNear=self.NNear)
def _linear(self):
self.Finterp = LinearNDInterpolator(self.XY,np.zeros((self.XY.shape[0]),),fill_value=self.fill_value)
def clipPoints(self,LL):
X = LL[:,0]
Y = LL[:,1]
self.clipindex = np.all([X>=self.bbox[0],X<=self.bbox[1],Y>=self.bbox[2],Y<=self.bbox[3]],axis=0)
return LL[self.clipindex,:]
def save(self,outfile='DEM.nc'):
if self.isnorth:
proj = "UTM %d (%s) in northern hemisphere."%(self.utmzone,self.CS)
else:
proj = "UTM %d (%s) in southern hemisphere."%(self.utmzone,self.CS)
intparamstr = 'Interpolation Type: %s, Number of neighbours: %d, Maximum search distance: %3.1f m'%(self.method,self.NNear,self.maxdist)
if self.method=='idw':
intparamstr += ', IDW power: %2.1f'%self.p
elif self.method=='kriging':
intparamstr += ', Variogram model: %s, sill: %3.1f, nugget: %3.1f, range: %3.1f'%(self.varmodel,self.sill,self.nugget,self.vrange)
globalatts = {'title':'DEM model', 'history':'Created on '+time.ctime(), 'Input dataset':self.infile, 'Projection':proj, 'Interpolation Parameters':intparamstr}
nc = Dataset(outfile, 'w', format='NETCDF4')
for gg in globalatts.keys():
nc.setncattr(gg,globalatts[gg])
dimnamex = 'nx'
dimlength = self.grd.nx
nc.createDimension(dimnamex,dimlength)
dimnamey = 'ny'
dimlength = self.grd.ny
nc.createDimension(dimnamey,dimlength)
tmpvarx=nc.createVariable('X','f8',(dimnamex,))
tmpvary=nc.createVariable('Y','f8',(dimnamey,))
tmpvarx[:] = self.grd.X[0,:]
tmpvary[:] = self.grd.Y[:,0]
tmpvarx.setncattr('long_name','Easting')
tmpvarx.setncattr('units','metres')
tmpvary.setncattr('long_name','Northing')
tmpvary.setncattr('units','metres')
tmpvarz=nc.createVariable('topo','f8',(dimnamey,dimnamex),zlib=True,least_significant_digit=1)
tmpvarz[:] = self.Z
tmpvarz.setncattr('long_name','Topographic elevation')
tmpvarz.setncattr('units','metres')
tmpvarz.setncattr('coordinates','X, Y')
tmpvarz.setncattr('positive','up')
tmpvarz.setncattr('datum',self.vdatum)
nc.close()
print('DEM save to %s.'%outfile)
def scatter(self,**kwargs):
fig= plt.figure(figsize=(9,8))
plt.scatter(np.ravel(self.grd.X),np.ravel(self.grd.Y),c=np.ravel(self.Z),s=10,**kwargs)
plt.colorbar()
return fig
class Interp4D(object):
zinterp_method = 'linear'
tinterp_method = 'linear'
def __init__(self,xin,yin,zin,tin,xout,yout,zout,tout,mask=None,**kwargs):
self.is4D=True
if zin == None:
self.is4D=False
self.nz=1
else:
self.zin = zin
self.zout = zout
self.nz = zin.shape[0]
self.szxy = xin.shape
if mask==None:
self.mask = np.zeros((self.nz,)+self.szxy,np.bool)
else:
self.mask=mask
self._Fxy = []
for kk in range(self.nz):
if self.is4D:
mask = self.mask[kk,...]
else:
mask = self.mask
xyin = np.vstack([xin[~mask].ravel(),yin[~mask].ravel()]).T
xyout = np.vstack([xout.ravel(),yout.ravel()]).T
self.nxy = xyout.shape[0]
self._Fxy.append(interpXYZ(xyin,xyout,**kwargs))
self.tin = othertime.SecondsSince(tin)
self.tout = othertime.SecondsSince(tout)
self.nt = tin.shape[0]
def __call__(self,data):
if self.is4D:
data_xy = np.zeros((self.nt,self.nz,self.nxy))
data=data.reshape((self.nt,self.nz,self.szxy[0]))
else:
data_xy = np.zeros((self.nt,self.nxy))
data=data.reshape((self.nt,self.szxy[0]))
for tt in range(self.nt):
if self.is4D:
for kk in range(self.nz):
mask = self.mask[kk,...]
tmp = self._Fxy[kk](data[tt,kk,~mask].ravel())
data_xy[tt,kk,:] = tmp
else:
data_xy[tt,:] = self._Fxy[0](data[tt,~self.mask].ravel())
if self.is4D:
_Fz = interp1d(self.zin,data_xy,axis=1,kind=self.zinterp_method, bounds_error=False,fill_value=0.)
data_xyz = _Fz(self.zout)
else:
data_xyz = data_xy
_Ft = interp1d(self.tin,data_xyz,axis=0,kind=self.tinterp_method, bounds_error=False,fill_value=0.)
return _Ft(self.tout)
class Inputs(object):
convert2utm=True
CS='NAD83'
utmzone=15
isnorth=True
vdatum = 'MSL'
shapefieldname='contour'
def __init__(self,infile,**kwargs):
self.infile = infile
self.__dict__.update(kwargs)
print('Reading data from: %s...'%self.infile)
if self.infile[-3:]=='.gz':
LL,self.Zin = read_xyz_gz(self.infile)
elif self.infile[-3:] in ['txt','dat']:
LL,self.Zin = read_xyz(self.infile)
self.Zin = np.ravel(self.Zin)
elif self.infile[-3:]=='shp':
LL,self.Zin = readShpBathy(self.infile,FIELDNAME=self.shapefieldname)
elif self.infile[-3:]=='.nc':
self.loadnc()
LL = self._returnXY(self.xgrd,self.ygrd)
self.Zin = np.ravel(self.Zin)
elif self.infile[-3:] in ['dem','asc']:
xgrd,ygrd,self.Zin = readraster(self.infile)
LL = self._returnXY(xgrd,ygrd)
self.Zin = np.ravel(self.Zin)
self.npt = len(LL)
if self.convert2utm:
print('Transforming the coordinates to UTM...')
self.XY=ll2utm(LL,self.utmzone,self.CS,self.isnorth)
else:
self.XY=LL
self._returnNonNan()
def _returnXY(self, x, y):
X,Y = np.meshgrid(x,y)
nx = np.prod(np.shape(X))
return np.hstack((np.reshape(np.ravel(X),(nx,1)),np.reshape(np.ravel(Y),(nx,1))))
def _returnNonNan(self):
ind = np.isnan(self.Zin)
ind = ind==False
self.Zin=self.Zin[ind]
self.XY = self.XY[ind,:]
|
MIT License
|
kiwicom/pytest-recording
|
src/pytest_recording/validation.py
|
validate_block_network_mark
|
python
|
def validate_block_network_mark(mark: Mark) -> None:
if mark.args or list(mark.kwargs) not in ([], ALLOWED_BLOCK_NETWORK_ARGUMENTS):
allowed_arguments = ", ".join("`{}`".format(arg) for arg in ALLOWED_BLOCK_NETWORK_ARGUMENTS)
raise UsageError(
"Invalid arguments to `block_network`. "
"It accepts only the following keyword arguments: {}. "
"Got args: {!r}; kwargs: {!r}".format(allowed_arguments, mark.args, mark.kwargs)
)
|
Validate the input arguments for the `block_network` pytest mark.
|
https://github.com/kiwicom/pytest-recording/blob/53f42b22f8ca5de00a534e95ea51b9fdcf4a84ef/src/pytest_recording/validation.py#L8-L16
|
from _pytest.mark import Mark
from .exceptions import UsageError
ALLOWED_BLOCK_NETWORK_ARGUMENTS = ["allowed_hosts"]
|
MIT License
|
gmr/consulate
|
consulate/utils.py
|
response_ok
|
python
|
def response_ok(response, raise_on_404=False):
if response.status_code == 200:
return True
elif response.status_code == 400:
raise exceptions.ClientError(_response_error(response))
elif response.status_code == 401:
raise exceptions.ACLDisabled(_response_error(response))
elif response.status_code == 403:
raise exceptions.Forbidden(_response_error(response))
elif response.status_code == 404 and raise_on_404:
raise exceptions.NotFound(_response_error(response))
elif response.status_code == 500:
raise exceptions.ServerError(_response_error(response))
return False
|
Evaluate the HTTP response and raise the appropriate exception if
required.
:param requests.response response: The HTTP response
:param bool raise_on_404: Raise an exception on 404 error
:rtype: bool
:raises: consulate.exceptions.ConsulateException
|
https://github.com/gmr/consulate/blob/3980bd9caf8fd410d4b043c7cea536eef1a15a2e/consulate/utils.py#L64-L86
|
import re
import sys
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
try:
from urllib import parse as _urlparse
except ImportError:
import urlparse as _urlparse
from consulate import exceptions
DURATION_PATTERN = re.compile(r'^(?:(?:-|)(?:\d+|\d+\.\d+)(?:Β΅s|ms|s|m|h))+$')
PYTHON3 = True if sys.version_info > (3, 0, 0) else False
def is_string(value):
checks = [isinstance(value, t) for t in [bytes, str]]
if not PYTHON3:
checks.append(isinstance(value, unicode))
return any(checks)
def maybe_encode(value):
try:
return value.encode('utf-8')
except AttributeError:
return value
def _response_error(response):
return (response.body.decode('utf-8')
if hasattr(response, 'body') and response.body
else str(response.status_code))
|
BSD 3-Clause New or Revised License
|
openstack/ironic
|
ironic/tests/unit/common/test_driver_factory.py
|
TestFakeHardware.supported_deploy_interfaces
|
python
|
def supported_deploy_interfaces(self):
return [fake.FakeDeploy]
|
List of supported deploy interfaces.
|
https://github.com/openstack/ironic/blob/a4a6f26333be31b84a9b1a874dde506e61d407d3/ironic/tests/unit/common/test_driver_factory.py#L385-L387
|
from unittest import mock
from oslo_utils import uuidutils
from stevedore import named
from ironic.common import driver_factory
from ironic.common import exception
from ironic.conductor import task_manager
from ironic.drivers import base as drivers_base
from ironic.drivers import fake_hardware
from ironic.drivers import hardware_type
from ironic.drivers.modules import fake
from ironic.drivers.modules import noop
from ironic.tests import base
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as obj_utils
class FakeEp(object):
name = 'fake-hardware'
class DriverLoadTestCase(db_base.DbTestCase):
def _fake_init_name_err(self, *args, **kwargs):
kwargs['on_load_failure_callback'](None, FakeEp, NameError('aaa'))
def _fake_init_driver_err(self, *args, **kwargs):
kwargs['on_load_failure_callback'](None, FakeEp,
exception.DriverLoadError(
driver='aaa', reason='bbb'))
def test_driver_load_error_if_driver_enabled(self):
self.config(enabled_hardware_types=['fake-hardware'])
with mock.patch.object(named.NamedExtensionManager,
'__init__', self._fake_init_driver_err):
self.assertRaises(
exception.DriverLoadError,
driver_factory.HardwareTypesFactory._init_extension_manager)
def test_wrap_in_driver_load_error_if_driver_enabled(self):
self.config(enabled_hardware_types=['fake-hardware'])
with mock.patch.object(named.NamedExtensionManager,
'__init__', self._fake_init_name_err):
self.assertRaises(
exception.DriverLoadError,
driver_factory.HardwareTypesFactory._init_extension_manager)
@mock.patch.object(named.NamedExtensionManager, 'names',
autospec=True)
def test_no_driver_load_error_if_driver_disabled(self, mock_em):
self.config(enabled_hardware_types=[])
with mock.patch.object(named.NamedExtensionManager,
'__init__', self._fake_init_driver_err):
driver_factory.HardwareTypesFactory._init_extension_manager()
self.assertEqual(1, mock_em.call_count)
@mock.patch.object(driver_factory.LOG, 'warning', autospec=True)
def test_driver_duplicated_entry(self, mock_log):
self.config(enabled_hardware_types=['fake-hardware',
'fake-hardware'])
driver_factory.HardwareTypesFactory._init_extension_manager()
self.assertEqual(
['fake-hardware'],
driver_factory.HardwareTypesFactory._extension_manager.names())
self.assertTrue(mock_log.called)
@mock.patch.object(driver_factory.LOG, 'warning', autospec=True)
def test_driver_empty_entry(self, mock_log):
self.config(enabled_hardware_types=['fake-hardware', ''])
driver_factory.HardwareTypesFactory._init_extension_manager()
self.assertEqual(
['fake-hardware'],
driver_factory.HardwareTypesFactory._extension_manager.names())
self.assertTrue(mock_log.called)
@mock.patch.object(driver_factory, '_warn_if_unsupported', autospec=True)
def test_driver_init_checks_unsupported(self, mock_warn):
self.config(enabled_hardware_types=['fake-hardware'])
driver_factory.HardwareTypesFactory._init_extension_manager()
self.assertEqual(
['fake-hardware'],
driver_factory.HardwareTypesFactory._extension_manager.names())
self.assertTrue(mock_warn.called)
class WarnUnsupportedDriversTestCase(base.TestCase):
@mock.patch.object(driver_factory.LOG, 'warning', autospec=True)
def _test__warn_if_unsupported(self, supported, mock_log):
ext = mock.Mock()
ext.obj = mock.Mock()
ext.obj.supported = supported
driver_factory._warn_if_unsupported(ext)
if supported:
self.assertFalse(mock_log.called)
else:
self.assertTrue(mock_log.called)
def test__warn_if_unsupported_with_supported(self):
self._test__warn_if_unsupported(True)
def test__warn_if_unsupported_with_unsupported(self):
self._test__warn_if_unsupported(False)
class NetworkInterfaceFactoryTestCase(db_base.DbTestCase):
@mock.patch.object(driver_factory, '_warn_if_unsupported', autospec=True)
def test_build_driver_for_task(self, mock_warn):
factory = driver_factory.NetworkInterfaceFactory
node = obj_utils.create_test_node(self.context,
network_interface='flat')
with task_manager.acquire(self.context, node.id) as task:
extension_mgr = factory._extension_manager
self.assertIn('flat', extension_mgr)
self.assertIn('neutron', extension_mgr)
self.assertIn('noop', extension_mgr)
self.assertEqual(extension_mgr['flat'].obj, task.driver.network)
self.assertEqual('ironic.hardware.interfaces.network',
factory._entrypoint_name)
self.assertEqual(['flat', 'neutron', 'noop'],
sorted(factory._enabled_driver_list))
self.assertTrue(mock_warn.called)
def test_build_driver_for_task_default_is_flat(self):
factory = driver_factory.NetworkInterfaceFactory
node = obj_utils.create_test_node(self.context)
with task_manager.acquire(self.context, node.id) as task:
extension_mgr = factory._extension_manager
self.assertIn('flat', extension_mgr)
self.assertIn('neutron', extension_mgr)
self.assertIn('noop', extension_mgr)
self.assertEqual(extension_mgr['flat'].obj, task.driver.network)
def test_build_driver_for_task_unknown_network_interface(self):
node = obj_utils.create_test_node(self.context,
network_interface='meow')
self.assertRaises(exception.InterfaceNotFoundInEntrypoint,
task_manager.acquire, self.context, node.id)
class StorageInterfaceFactoryTestCase(db_base.DbTestCase):
def test_build_interface_for_task(self):
factory = driver_factory.StorageInterfaceFactory
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
with task_manager.acquire(self.context, node.id) as task:
manager = factory._extension_manager
self.assertIn('noop', manager)
self.assertEqual('noop', task.node.storage_interface)
class NewDriverFactory(driver_factory.BaseDriverFactory):
_entrypoint_name = 'woof'
class NewFactoryTestCase(db_base.DbTestCase):
def test_new_driver_factory_unknown_entrypoint(self):
factory = NewDriverFactory()
self.assertEqual('woof', factory._entrypoint_name)
self.assertEqual([], factory._enabled_driver_list)
class CheckAndUpdateNodeInterfacesTestCase(db_base.DbTestCase):
def test_no_network_interface(self):
node = obj_utils.get_test_node(self.context)
self.assertTrue(driver_factory.check_and_update_node_interfaces(node))
self.assertEqual('flat', node.network_interface)
def test_none_network_interface(self):
node = obj_utils.get_test_node(self.context, network_interface=None)
self.assertTrue(driver_factory.check_and_update_node_interfaces(node))
self.assertEqual('flat', node.network_interface)
def test_no_network_interface_default_from_conf(self):
self.config(default_network_interface='noop')
node = obj_utils.get_test_node(self.context)
self.assertTrue(driver_factory.check_and_update_node_interfaces(node))
self.assertEqual('noop', node.network_interface)
def test_create_node_valid_interfaces(self):
node = obj_utils.get_test_node(self.context,
network_interface='noop',
storage_interface='noop')
self.assertTrue(driver_factory.check_and_update_node_interfaces(node))
self.assertEqual('noop', node.network_interface)
self.assertEqual('noop', node.storage_interface)
def test_create_node_invalid_network_interface(self):
node = obj_utils.get_test_node(self.context,
network_interface='banana')
self.assertRaises(exception.InterfaceNotFoundInEntrypoint,
driver_factory.check_and_update_node_interfaces,
node)
def test_create_node_valid_network_interface_instance_info_override(self):
instance_info = {'network_interface': 'noop',
'storage_interface': 'noop'}
node = obj_utils.get_test_node(self.context,
instance_info=instance_info)
self.assertTrue(driver_factory.check_and_update_node_interfaces(node))
self.assertIsNone(node.network_interface)
self.assertIsNone(node.storage_interface)
self.assertEqual('noop', node.instance_info.get('network_interface'))
self.assertEqual('noop', node.instance_info.get('storage_interface'))
def test_create_node_invalid_network_interface_instance_info_override(
self):
instance_info = {'network_interface': 'banana'}
node = obj_utils.get_test_node(self.context,
instance_info=instance_info)
self.assertRaises(exception.InterfaceNotFoundInEntrypoint,
driver_factory.check_and_update_node_interfaces,
node)
def _get_valid_default_interface_name(self, iface):
i_name = 'fake'
if iface == 'network':
i_name = 'noop'
return i_name
def _set_config_interface_options_hardware_type(self):
for iface in drivers_base.ALL_INTERFACES:
i_name = self._get_valid_default_interface_name(iface)
config_kwarg = {'enabled_%s_interfaces' % iface: [i_name],
'default_%s_interface' % iface: i_name}
self.config(**config_kwarg)
def test_create_node_dynamic_driver_interfaces_set(self):
self._set_config_interface_options_hardware_type()
for iface in drivers_base.ALL_INTERFACES:
iface_name = '%s_interface' % iface
i_name = self._get_valid_default_interface_name(iface)
node_kwargs = {'uuid': uuidutils.generate_uuid(),
iface_name: i_name}
node = obj_utils.get_test_node(
self.context, driver='fake-hardware', **node_kwargs)
driver_factory.check_and_update_node_interfaces(node)
self.assertEqual(i_name, getattr(node, iface_name))
def test_node_update_dynamic_driver_set_interfaces(self):
self._set_config_interface_options_hardware_type()
for iface in drivers_base.ALL_INTERFACES:
iface_name = '%s_interface' % iface
node_kwargs = {'uuid': uuidutils.generate_uuid()}
node = obj_utils.create_test_node(self.context,
driver='fake-hardware',
**node_kwargs)
i_name = self._get_valid_default_interface_name(iface)
setattr(node, iface_name, i_name)
driver_factory.check_and_update_node_interfaces(node)
self.assertEqual(i_name, getattr(node, iface_name))
class DefaultInterfaceTestCase(db_base.DbTestCase):
def setUp(self):
super(DefaultInterfaceTestCase, self).setUp()
self.config(enabled_hardware_types=['manual-management'])
self.driver = driver_factory.get_hardware_type('manual-management')
def test_from_config(self):
self.config(default_deploy_interface='direct')
iface = driver_factory.default_interface(self.driver, 'deploy')
self.assertEqual('direct', iface)
def test_from_additional_defaults(self):
self.config(default_storage_interface=None)
iface = driver_factory.default_interface(self.driver, 'storage')
self.assertEqual('noop', iface)
def test_network_from_additional_defaults_hardware_type(self):
self.config(default_network_interface=None)
self.config(dhcp_provider='none', group='dhcp')
self.config(enabled_network_interfaces=['neutron'])
iface = driver_factory.default_interface(self.driver, 'network')
self.assertEqual('neutron', iface)
def test_calculated_with_one(self):
self.config(default_deploy_interface=None)
self.config(enabled_deploy_interfaces=['ansible'])
iface = driver_factory.default_interface(self.driver, 'deploy')
self.assertEqual('ansible', iface)
def test_calculated_with_two(self):
self.config(default_deploy_interface=None)
self.config(enabled_deploy_interfaces=['ansible', 'direct'])
iface = driver_factory.default_interface(self.driver, 'deploy')
self.assertEqual('direct', iface)
def test_calculated_with_unsupported(self):
self.config(default_deploy_interface=None)
self.config(enabled_deploy_interfaces=['fake', 'ansible'])
iface = driver_factory.default_interface(self.driver, 'deploy')
self.assertEqual('ansible', iface)
def test_calculated_no_answer(self):
self.config(default_power_interface=None)
self.config(enabled_power_interfaces=[])
self.assertRaisesRegex(
exception.NoValidDefaultForInterface,
"For hardware type 'ManualManagementHardware', no default "
"value found for power interface.",
driver_factory.default_interface, self.driver, 'power')
def test_calculated_no_answer_drivername(self):
self.config(default_power_interface=None)
self.config(enabled_power_interfaces=[])
self.assertRaisesRegex(
exception.NoValidDefaultForInterface,
"For hardware type 'foo', no default value found for power "
"interface.",
driver_factory.default_interface, self.driver, 'power',
driver_name='foo')
def test_calculated_no_answer_drivername_node(self):
self.config(default_power_interface=None)
self.config(enabled_power_interfaces=[])
self.assertRaisesRegex(
exception.NoValidDefaultForInterface,
"For node bar with hardware type 'foo', no default "
"value found for power interface.",
driver_factory.default_interface, self.driver, 'power',
driver_name='foo', node='bar')
@mock.patch.object(driver_factory, 'get_interface', autospec=True)
def test_check_exception_IncompatibleInterface(self, mock_get_interface):
self.config(enabled_management_interfaces=['redfish'])
self.config(default_management_interface=['redfish'])
mock_get_interface.side_effect = exception.IncompatibleInterface(
interface_type='management',
hardware_type=self.driver)
self.assertRaises(exception.NoValidDefaultForInterface,
driver_factory.default_interface, self.driver,
'management')
class TestFakeHardware(hardware_type.AbstractHardwareType):
@property
def supported_bios_interfaces(self):
return [fake.FakeBIOS]
@property
def supported_boot_interfaces(self):
return [fake.FakeBoot]
@property
def supported_console_interfaces(self):
return [fake.FakeConsole]
@property
|
Apache License 2.0
|
chrhenning/hypercl
|
toy_example/gaussian_mixture_data.py
|
GaussianData.__init__
|
python
|
def __init__(self, mean=np.array([0, 0]), cov=0.05**2 * np.eye(2),
num_train=100, num_test=100, map_function=None, rseed=None):
super().__init__()
warn('Please use class ' +
'"data.special.gaussian_mixture_data.GaussianData" instead.',
DeprecationWarning)
if rseed is None:
rand = np.random
else:
rand = np.random.RandomState(rseed)
n_x = mean.size
assert(n_x == 2)
train_x = rand.multivariate_normal(mean, cov, size=num_train)
test_x = rand.multivariate_normal(mean, cov, size=num_test)
if map_function is None:
map_function = lambda x : multivariate_normal.pdf(x, mean, cov). reshape(-1, 1)
train_y = map_function(train_x)
test_y = map_function(test_x)
else:
train_y = map_function(train_x)
test_y = map_function(test_x)
self._data['classification'] = False
self._data['sequence'] = False
self._data['in_data'] = np.vstack([train_x, test_x])
self._data['in_shape'] = [n_x]
self._data['out_data'] = np.vstack([train_y, test_y])
self._data['out_shape'] = [1]
self._data['train_inds'] = np.arange(num_train)
self._data['test_inds'] = np.arange(num_train, num_train + num_test)
self._mean = mean
self._cov = cov
self._map = map_function
|
Generate a new dataset.
The input data x for train and test samples will be drawn iid from the
given Gaussian. Per default, the map function is the probability
density of the given Gaussian: y = f(x) = p(x).
Args:
mean: The mean of the Gaussian.
cov: The covariance of the Gaussian.
num_train: Number of training samples.
num_test: Number of test samples.
map_function (optional): A function handle that receives input
samples and maps them to output samples. If not specified, the
density function will be used as map function.
rseed: If None, the current random state of numpy is used to
generate the data. Otherwise, a new random state with the given
seed is generated.
|
https://github.com/chrhenning/hypercl/blob/4645ef0e5b64abe40674d287b65c23e109ecfca1/toy_example/gaussian_mixture_data.py#L120-L180
|
import numpy as np
from scipy.stats import multivariate_normal
import itertools
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
from scipy.spatial import cKDTree
from warnings import warn
from data.dataset import Dataset
DEFAULT_MEANS = [np.array([i, j]) for i, j in
itertools.product(range(-4, 5, 2), range(-4, 5, 2))]
DEFAULT_VARIANCES = [0.05**2 * np.eye(len(mean)) for mean in DEFAULT_MEANS]
METZ_ANGLES = [i/8 * 2 * np.pi for i in range(8)]
METZ_MEANS = [np.array([2. * np.sin(a), 2. * np.cos(a)]) for a in METZ_ANGLES]
METZ_VARIANCES = [0.02**2 * np.eye(len(mean)) for mean in METZ_MEANS]
CHE_ANGLES = [(i+0.5)/6 * 2 * np.pi for i in range(6)]
CHE_MEANS = [np.array([5. * np.sin(a), 5. * np.cos(a)]) for a in CHE_ANGLES]
CHE_VARIANCES = [0.1**2 * np.eye(len(mean)) for mean in CHE_MEANS]
def get_gmm_tasks(means=DEFAULT_MEANS, covs=DEFAULT_VARIANCES, num_train=100,
num_test=100, map_functions=None, rseed=None):
warn('Please use function ' +
'"data.special.gaussian_mixture_data.get_gmm_tasks" instead.',
DeprecationWarning)
assert(len(means) == len(covs))
if map_functions is None:
map_functions = [None] * len(means)
else:
assert(len(map_functions) == len(means))
ret = []
for i in range(len(means)):
ret.append(GaussianData(mean=means[i], cov=covs[i], num_train=num_train,
num_test=num_test, map_function=map_functions[i], rseed=rseed))
return ret
class GaussianData(Dataset):
|
Apache License 2.0
|
anatolikalysch/vmattack
|
lib/Instruction.py
|
Instruction.get_op_str
|
python
|
def get_op_str(self, op):
if op < 1 or op > len(self.Instruction.operands):
return None
return str(self.Instruction.operands[op-1]).lower()
|
@param op Access to operand; for first operand: op = 1
@return Returns the string represtentation of op
|
https://github.com/anatolikalysch/vmattack/blob/67dcce6087163d85bbe7780e3f6e6e9e72e2212a/lib/Instruction.py#L457-L464
|
import distorm3
from lib import StartVal as SV
class Instruction(object):
def __init__(self, offset, code, type = distorm3.Decode32Bits, feature = 0):
self.valid = False
if SV.dissassm_type == 64:
type = distorm3.Decode64Bits
else:
type = distorm3.Decode32Bits
inst = distorm3.Decompose(offset, code, type, feature)
if len(inst) == 1:
self.Instruction = inst[0]
if self.Instruction.valid:
self.valid = True
self.opcode_len = len(code)
self.opcode_bytes = []
self.addr = offset
for x in code:
self.opcode_bytes.append(ord(x))
self._len = len(self.Instruction.operands) + 1
def __str__(self):
return str(self.Instruction).lower()
def __len__(self):
return self._len
def is_catch_instr(self):
if len(self.Instruction.operands) != 2:
return False
if (self.is_mov() and
self.Instruction.operands[1].type == distorm3.OPERAND_MEMORY and
self.Instruction.operands[0].type == distorm3.OPERAND_REGISTER):
reg_index = self.Instruction.operands[1].index
if reg_index != None:
reg_name = distorm3.Registers[reg_index]
if('ESI' in reg_name or 'RSI' in reg_name):
return True
else:
return False
else:
return False
else:
return False
def is_mov(self):
mnem = distorm3.Mnemonics[self.Instruction.opcode]
return ('MOV' in mnem) and (self.Instruction.instructionClass == 'ISC_INTEGER')
def is_byte_mov(self):
if len(self.Instruction.operands) != 2:
return False
return (self.Instruction.operands[0].size == 8 or
self.Instruction.operands[1].size == 8)
def is_word_mov(self):
if len(self.Instruction.operands) != 2:
return False
sizeOp1 = self.Instruction.operands[0].size
sizeOp2 = self.Instruction.operands[1].size
if (sizeOp1 == 16 and sizeOp2 >= 16):
return True
elif (sizeOp1 >= 16 and sizeOp2 == 16):
return True
else:
return False
def is_double_mov(self):
if len(self.Instruction.operands) != 2:
return False
sizeOp1 = self.Instruction.operands[0].size
sizeOp2 = self.Instruction.operands[1].size
if (sizeOp1 == 32 and sizeOp2 >= 32):
return True
elif (sizeOp1 >= 32 and sizeOp2 == 32):
return True
else:
return False
def is_quad_mov(self):
if len(self.Instruction.operands) != 2:
return False
sizeOp1 = self.Instruction.operands[0].size
sizeOp2 = self.Instruction.operands[1].size
if (sizeOp1 == 64 and sizeOp2 >= 64):
return True
elif (sizeOp1 >= 64 and sizeOp2 == 64):
return True
else:
return False
def get_mov_size(self):
if self.is_quad_mov():
return 8
elif self.is_double_mov():
return 4
elif self.is_word_mov():
return 2
elif self.is_byte_mov():
return 1
else:
return None
def get_size(self):
return self.Instruction.size
def is_mov_basep_stackp(self):
if len(self.Instruction.operands) != 2:
return False
Op0 = self.Instruction.operands[0]
Op1 = self.Instruction.operands[1]
if (Op0.type == distorm3.OPERAND_REGISTER and
Op1.type == distorm3.OPERAND_REGISTER and
(Op0.name == 'EBP' or Op0.name == 'RBP') and
(Op1.name == 'ESP' or Op1.name == 'RSP')):
return True
else:
return False
def is_write_stack(self):
if len(self.Instruction.operands) != 2:
return False
op0 = self.Instruction.operands[0]
if op0.index == None or op0.disp != 0:
return False
if (self.is_mov() and
op0.type == distorm3.OPERAND_MEMORY and
(distorm3.Registers[op0.index] == 'EBP' or
distorm3.Registers[op0.index] == 'RBP')):
return True
else:
return False
def is_read_stack(self):
if len(self.Instruction.operands) != 2:
return False
op1 = self.Instruction.operands[1]
if op1.index == None or op1.disp != 0:
return False
if (self.is_mov() and
op1.type == distorm3.OPERAND_MEMORY and
(distorm3.Registers[op1.index] == 'EBP' or
distorm3.Registers[op1.index] == 'RBP')):
return True
else:
return False
def is_isp_mov(self):
if len(self.Instruction.operands) != 2:
return False
op0 = self.Instruction.operands[0]
if op0.index == None:
return False
if (self.is_mov() and
op0.type == distorm3.OPERAND_REGISTER and
(distorm3.Registers[op0.index] == 'ESI' or
distorm3.Registers[op0.index] == 'RSI')):
return True
else:
return False
def op_is_reg(self, op):
if op < 1 or op > len(self.Instruction.operands):
return False
return self.Instruction.operands[op-1].type == distorm3.OPERAND_REGISTER
def op_is_imm(self, op):
if op < 1 or op > len(self.Instruction.operands):
return False
return self.Instruction.operands[op-1].type == distorm3.OPERAND_IMMEDIATE
def op_is_mem(self, op):
if op < 1 or op > len(self.Instruction.operands):
return False
return self.Instruction.operands[op-1].type == distorm3.OPERAND_MEMORY
def op_is_mem_abs(self, op):
if op < 1 or op > len(self.Instruction.operands):
return False
return self.Instruction.operands[op-1].type == distorm3.OPERAND_ABSOLUTE_ADDRESS
def is_vinst(self):
for op in self.Instruction.operands:
if op.type == distorm3.OPERAND_REGISTER:
if op.name == 'ESI' or op.name == 'RSI':
return True
elif op.type == distorm3.OPERAND_MEMORY:
if op.index != None:
if (distorm3.Registers[op.index] == 'ESI' or
distorm3.Registers[op.index] == 'RSI'):
return True
return False
def is_ret(self):
return self.Instruction.flowControl == 'FC_RET'
def is_call(self):
return (self.Instruction.mnemonic.startswith('CALL') and
self.Instruction.instructionClass == 'ISC_INTEGER')
def is_and(self):
return (self.Instruction.mnemonic.startswith('AND') and
self.Instruction.instructionClass == 'ISC_INTEGER')
def is_shr(self):
return (self.Instruction.mnemonic == 'SHR' and
self.Instruction.instructionClass == 'ISC_INTEGER')
def is_shl(self):
return (self.Instruction.mnemonic == 'SHL' and
self.Instruction.instructionClass == 'ISC_INTEGER')
def is_shld(self):
return (self.Instruction.mnemonic == 'SHLD' and
self.Instruction.instructionClass == 'ISC_INTEGER')
def is_shrd(self):
return (self.Instruction.mnemonic == 'SHRD' and
self.Instruction.instructionClass == 'ISC_INTEGER')
def is_cwde(self):
return self.Instruction.mnemonic == 'CWDE'
def is_cbw(self):
return self.Instruction.mnemonic == 'CBW'
def is_cdqe(self):
return self.Instruction.mnemonic == 'CDQE'
def is_imul(self):
return self.Instruction.mnemonic == 'IMUL'
def is_idiv(self):
return self.Instruction.mnemonic == 'IDIV'
def is_add(self):
return (self.Instruction.mnemonic.startswith('ADD') and
self.Instruction.instructionClass == 'ISC_INTEGER')
def is_not(self):
return (self.Instruction.mnemonic.startswith('NOT') and
self.Instruction.instructionClass == 'ISC_INTEGER')
def is_pop(self):
return (self.Instruction.mnemonic == 'POP' or
self.Instruction.mnemonic == 'POPF')
def is_push(self):
return (self.Instruction.mnemonic == 'PUSH' or
self.Instruction.mnemonic == 'PUSHF')
def is_uncnd_jmp(self):
return self.Instruction.flowControl == 'FC_UNC_BRANCH'
def is_sub_basepointer(self):
return (('SUB' in self.Instruction.mnemonic) and
(self.Instruction.instructionClass == 'ISC_INTEGER') and
(self.Instruction.operands[0].name == 'EBP' or
self.Instruction.operands[0].name == 'RBP'))
def is_add_basepointer(self):
return (('ADD' in self.Instruction.mnemonic) and
(self.Instruction.instructionClass == 'ISC_INTEGER') and
(self.Instruction.operands[0].name == 'EBP' or
self.Instruction.operands[0].name == 'RBP'))
|
MIT License
|
oboynitro/django-frontier
|
venv/Lib/site-packages/pip/_internal/req/req_uninstall.py
|
StashedUninstallPathSet._get_directory_stash
|
python
|
def _get_directory_stash(self, path):
try:
save_dir = AdjacentTempDirectory(path)
except OSError:
save_dir = TempDirectory(kind="uninstall")
self._save_dirs[os.path.normcase(path)] = save_dir
return save_dir.path
|
Stashes a directory.
Directories are stashed adjacent to their original location if
possible, or else moved/copied into the user's temp dir.
|
https://github.com/oboynitro/django-frontier/blob/89bec0199aadcc5e976a1cc42ad9284603f6439a/venv/Lib/site-packages/pip/_internal/req/req_uninstall.py#L227-L240
|
import csv
import functools
import logging
import os
import sys
import sysconfig
from importlib.util import cache_from_source
from pip._vendor import pkg_resources
from pip._internal.exceptions import UninstallationError
from pip._internal.locations import bin_py, bin_user
from pip._internal.utils.compat import WINDOWS
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import (
ask,
dist_in_usersite,
dist_is_local,
egg_link_path,
is_local,
normalize_path,
renames,
rmtree,
)
from pip._internal.utils.temp_dir import AdjacentTempDirectory, TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
Set,
Tuple,
)
from pip._vendor.pkg_resources import Distribution
logger = logging.getLogger(__name__)
def _script_names(dist, script_name, is_gui):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
exe_name = os.path.join(bin_dir, script_name)
paths_to_remove = [exe_name]
if WINDOWS:
paths_to_remove.append(exe_name + '.exe')
paths_to_remove.append(exe_name + '.exe.manifest')
if is_gui:
paths_to_remove.append(exe_name + '-script.pyw')
else:
paths_to_remove.append(exe_name + '-script.py')
return paths_to_remove
def _unique(fn):
@functools.wraps(fn)
def unique(*args, **kw):
seen = set()
for item in fn(*args, **kw):
if item not in seen:
seen.add(item)
yield item
return unique
@_unique
def uninstallation_paths(dist):
r = csv.reader(dist.get_metadata_lines('RECORD'))
for row in r:
path = os.path.join(dist.location, row[0])
yield path
if path.endswith('.py'):
dn, fn = os.path.split(path)
base = fn[:-3]
path = os.path.join(dn, base + '.pyc')
yield path
path = os.path.join(dn, base + '.pyo')
yield path
def compact(paths):
sep = os.path.sep
short_paths = set()
for path in sorted(paths, key=len):
should_skip = any(
path.startswith(shortpath.rstrip("*")) and
path[len(shortpath.rstrip("*").rstrip(sep))] == sep
for shortpath in short_paths
)
if not should_skip:
short_paths.add(path)
return short_paths
def compress_for_rename(paths):
case_map = {os.path.normcase(p): p for p in paths}
remaining = set(case_map)
unchecked = sorted({os.path.split(p)[0] for p in case_map.values()}, key=len)
wildcards = set()
def norm_join(*a):
return os.path.normcase(os.path.join(*a))
for root in unchecked:
if any(os.path.normcase(root).startswith(w)
for w in wildcards):
continue
all_files = set()
all_subdirs = set()
for dirname, subdirs, files in os.walk(root):
all_subdirs.update(norm_join(root, dirname, d)
for d in subdirs)
all_files.update(norm_join(root, dirname, f)
for f in files)
if not (all_files - remaining):
remaining.difference_update(all_files)
wildcards.add(root + os.sep)
return set(map(case_map.__getitem__, remaining)) | wildcards
def compress_for_output_listing(paths):
will_remove = set(paths)
will_skip = set()
folders = set()
files = set()
for path in will_remove:
if path.endswith(".pyc"):
continue
if path.endswith("__init__.py") or ".dist-info" in path:
folders.add(os.path.dirname(path))
files.add(path)
_normcased_files = set(map(os.path.normcase, files))
folders = compact(folders)
for folder in folders:
for dirpath, _, dirfiles in os.walk(folder):
for fname in dirfiles:
if fname.endswith(".pyc"):
continue
file_ = os.path.join(dirpath, fname)
if (os.path.isfile(file_) and
os.path.normcase(file_) not in _normcased_files):
will_skip.add(file_)
will_remove = files | {
os.path.join(folder, "*") for folder in folders
}
return will_remove, will_skip
class StashedUninstallPathSet:
def __init__(self):
self._save_dirs = {}
self._moves = []
|
MIT License
|
psss/tmt
|
tmt/steps/provision/testcloud.py
|
GuestTestcloud.start
|
python
|
def start(self):
if self.opt('dry'):
return
os.makedirs(TESTCLOUD_DATA, exist_ok=True)
os.makedirs(TESTCLOUD_IMAGES, exist_ok=True)
GuestTestcloud._create_template()
self.prepare_config()
if not re.match(r'^(?:https?|file)://.*', self.image_url):
self.image_url = self._guess_image_url(self.image_url)
self.debug(f"Guessed image url: '{self.image_url}'", level=3)
self.image = testcloud.image.Image(self.image_url)
self.verbose('qcow', self.image.name, 'green')
if not os.path.exists(self.image.local_path):
self.info('progress', 'downloading...', 'cyan')
try:
self.image.prepare()
except FileNotFoundError as error:
raise ProvisionError(
f"Image '{self.image.local_path}' not found.", original=error)
except (testcloud.exceptions.TestcloudPermissionsError,
PermissionError) as error:
raise ProvisionError(
f"Failed to prepare the image. Check the '{TESTCLOUD_IMAGES}' "
f"directory permissions.", original=error)
_, run_id = os.path.split(self.parent.plan.my_run.workdir)
self.instance_name = self._random_name(
prefix="tmt-{0}-".format(run_id[-3:]))
self.instance = testcloud.instance.Instance(
name=self.instance_name, image=self.image,
connection='qemu:///session')
self.verbose('name', self.instance_name, 'green')
try:
import guestfs
except ImportError:
match_legacy = re.search(
r'(rhel|centos).*-7', self.image_url.lower())
if match_legacy:
self.instance.pci_net = "e1000"
else:
self.instance.pci_net = "virtio-net-pci"
self.prepare_ssh_key()
self.info('progress', 'booting...', 'cyan')
self.instance.ram = self.memory
self.instance.disk_size = self.disk
try:
self.instance.prepare()
self.instance.spawn_vm()
self.instance.start(DEFAULT_BOOT_TIMEOUT)
except (testcloud.exceptions.TestcloudInstanceError,
libvirt.libvirtError) as error:
raise ProvisionError(
f'Failed to boot testcloud instance ({error}).')
self.guest = self.instance.get_ip()
self.port = self.instance.get_instance_port()
self.verbose('ip', self.guest, 'green')
self.verbose('port', self.port, 'green')
self.instance.create_ip_file(self.guest)
timeout = DEFAULT_CONNECT_TIMEOUT
wait = 1
while True:
try:
self.execute('whoami')
break
except tmt.utils.RunError:
if timeout < 0:
raise ProvisionError(
f'Failed to connect in {DEFAULT_CONNECT_TIMEOUT}s.')
self.debug(
f'Failed to connect to machine, retrying, '
f'{fmf.utils.listed(timeout, "second")} left.')
time.sleep(wait)
wait += 1
timeout -= wait
|
Start provisioned guest
|
https://github.com/psss/tmt/blob/1cd284946155eced0c6522fa42e327ab22683d0d/tmt/steps/provision/testcloud.py#L422-L519
|
import os
import re
import time
import click
import fmf
import requests
import tmt
from tmt.utils import WORKDIR_ROOT, ProvisionError, retry_session
def import_testcloud():
global testcloud
global libvirt
try:
import libvirt
import testcloud.image
import testcloud.instance
import testcloud.util
except ImportError:
raise ProvisionError(
"Install 'testcloud' to provision using this method.")
TESTCLOUD_DATA = os.path.join(WORKDIR_ROOT, 'testcloud')
TESTCLOUD_IMAGES = os.path.join(TESTCLOUD_DATA, 'images')
USER_DATA = """#cloud-config
password: %s
chpasswd:
expire: false
users:
- default
- name: {user_name}
ssh_authorized_keys:
- {public_key}
ssh_pwauth: true
disable_root: false
runcmd:
- sed -i -e '/^.*PermitRootLogin/s/^.*$/PermitRootLogin yes/'
/etc/ssh/sshd_config
- systemctl reload sshd
- [sh, -c, 'mkdir -p /etc/systemd/network/']
# echo multiple times, sh echo doesn't support newline
- [sh, -c, 'if [ ! -f /etc/systemd/network/20-tc-usernet.network ];
then echo "[Match]" >> /etc/systemd/network/20-tc-usernet.network &&
echo "Name=en*" >> /etc/systemd/network/20-tc-usernet.network &&
echo "[Network]" >> /etc/systemd/network/20-tc-usernet.network &&
echo "DHCP=yes" >> /etc/systemd/network/20-tc-usernet.network; fi']
- [sh, -c, 'if systemctl status systemd-networkd |
grep -q "enabled;\\svendor\\spreset:\\senabled"; then
systemctl restart systemd-networkd; fi']
# CentOS and RHEL 8 keeps waiting before restarting sshd causing delays
- [sh, -c, 'if cat /etc/os-release |
grep -q platform:el8; then systemctl restart sshd; fi']
"""
DOMAIN_TEMPLATE_NAME = 'domain-template.jinja'
DOMAIN_TEMPLATE_FILE = os.path.join(TESTCLOUD_DATA, DOMAIN_TEMPLATE_NAME)
DOMAIN_TEMPLATE = """<domain type='kvm' xmlns:qemu='http://libvirt.org/schemas/domain/qemu/1.0'>
<name>{{ domain_name }}</name>
<uuid>{{ uuid }}</uuid>
<memory unit='KiB'>{{ memory }}</memory>
<currentMemory unit='KiB'>{{ memory }}</currentMemory>
<vcpu placement='static'>1</vcpu>
<os>
<type arch='x86_64' machine='pc'>hvm</type>
{{ uefi_loader }}
<boot dev='hd'/>
</os>
<features>
<acpi/>
<apic/>
<vmport state='off'/>
</features>
<cpu mode='host-passthrough'/>
<clock offset='utc'>
<timer name='rtc' tickpolicy='catchup'/>
<timer name='pit' tickpolicy='delay'/>
<timer name='hpet' present='no'/>
</clock>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
<pm>
<suspend-to-mem enabled='no'/>
<suspend-to-disk enabled='no'/>
</pm>
<devices>
<emulator>{{ emulator_path }}</emulator>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' cache='unsafe'/>
<source file="{{ disk }}"/>
<target dev='vda' bus='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='raw' cache='unsafe'/>
<source file="{{ seed }}"/>
<target dev='vdb' bus='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>
</disk>
<interface type='user'>
<mac address="{{ mac_address }}"/>
<ip family='ipv4' address='172.17.2.0' prefix='24'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<serial type='pty'>
<target port='0'/>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<input type='keyboard' bus='ps2'/>
<memballoon model='virtio'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/>
</memballoon>
<rng model='virtio'>
<backend model='random'>/dev/urandom</backend>
</rng>
</devices>
{{ qemu_args }}
</domain>
"""
DEFAULT_BOOT_TIMEOUT = 60
DEFAULT_CONNECT_TIMEOUT = 60
class ProvisionTestcloud(tmt.steps.provision.ProvisionPlugin):
_guest = None
_methods = [
tmt.steps.Method(name='virtual.testcloud', doc=__doc__, order=50),
]
@classmethod
def options(cls, how=None):
return [
click.option(
'-i', '--image', metavar='IMAGE',
help='Select image to be used. Provide a short name, '
'full path to a local file or a complete url.'),
click.option(
'-m', '--memory', metavar='MEMORY',
help='Set available memory in MB, 2048 MB by default.'),
click.option(
'-D', '--disk', metavar='MEMORY',
help='Specify disk size in GB, 10 GB by default.'),
click.option(
'-u', '--user', metavar='USER',
help='Username to use for all guest operations.'),
] + super().options(how)
def default(self, option, default=None):
defaults = {
'user': 'root',
'memory': 2048,
'disk': 10,
'image': 'fedora',
}
if option in defaults:
return defaults[option]
return default
def show(self):
super().show(['image', 'user', 'memory', 'disk'])
def wake(self, data=None):
super().wake(['image', 'memory', 'disk', 'user'])
for key in ['memory', 'disk']:
if isinstance(self.get(key), str):
self.data[key] = int(self.data[key])
if data:
guest = GuestTestcloud(data, name=self.name, parent=self.step)
guest.wake()
self._guest = guest
def go(self):
super().go()
data = dict()
for key in ['image', 'user', 'memory', 'disk']:
data[key] = self.get(key)
if key == 'memory':
self.info('memory', f"{self.get('memory')} MB", 'green')
elif key == 'disk':
self.info('disk', f"{self.get('disk')} GB", 'green')
else:
self.info(key, data[key], 'green')
self._guest = GuestTestcloud(data, name=self.name, parent=self.step)
self._guest.start()
def guest(self):
return self._guest
@classmethod
def clean_images(cls, clean, dry):
clean.info('testcloud', shift=1, color='green')
if not os.path.exists(TESTCLOUD_IMAGES):
clean.warn(
f"Directory '{TESTCLOUD_IMAGES}' does not exist.", shift=2)
return
for image in os.listdir(TESTCLOUD_IMAGES):
image = os.path.join(TESTCLOUD_IMAGES, image)
if dry:
clean.verbose(f"Would remove '{image}'.", shift=2)
else:
clean.verbose(f"Removing '{image}'.", shift=2)
os.remove(image)
class GuestTestcloud(tmt.Guest):
def _get_url(self, url, message):
timeout = DEFAULT_CONNECT_TIMEOUT
wait = 1
while True:
try:
response = retry_session().get(url)
if response.ok:
return response
except requests.RequestException:
pass
if timeout < 0:
raise ProvisionError(
f'Failed to {message} in {DEFAULT_CONNECT_TIMEOUT}s.')
self.debug(
f'Unable to {message} ({url}), retrying, '
f'{fmf.utils.listed(timeout, "second")} left.')
time.sleep(wait)
wait += 1
timeout -= wait
def _guess_image_url(self, name):
if os.path.isabs(name) and os.path.isfile(name):
return f'file://{name}'
name = name.lower().strip()
url = None
matched_fedora = re.match(r'^f(edora)?-?(\d+)$', name)
matched_centos = [re.match(r'^c(entos)?-?(\d+)$', name),
re.match(r'^c(entos-stream)?-?(\d+)$', name)]
matched_ubuntu = re.match(r'^u(buntu)?-?(\w+)$', name)
matched_debian = re.match(r'^d(ebian)?-?(\w+)$', name)
if name == 'fedora':
url = testcloud.util.get_fedora_image_url("latest")
elif name == 'centos':
url = testcloud.util.get_centos_image_url("latest")
elif name == 'centos-stream':
url = testcloud.util.get_centos_image_url(
"latest", stream=True)
elif name == 'ubuntu':
url = testcloud.util.get_ubuntu_image_url("latest")
elif name == 'debian':
url = testcloud.util.get_debian_image_url("latest")
elif matched_fedora:
url = testcloud.util.get_fedora_image_url(matched_fedora.group(2))
elif matched_centos[0]:
url = testcloud.util.get_centos_image_url(
matched_centos[0].group(2))
elif matched_centos[1]:
url = testcloud.util.get_centos_image_url(
matched_centos[1].group(2), stream=True)
elif matched_ubuntu:
url = testcloud.util.get_ubuntu_image_url(matched_ubuntu.group(2))
elif matched_debian:
url = testcloud.util.get_debian_image_url(matched_debian.group(2))
elif 'rawhide' in name:
url = testcloud.util.get_fedora_image_url("rawhide")
if not url:
raise ProvisionError(f"Could not map '{name}' to compose.")
return url
@staticmethod
def _create_template():
with open(DOMAIN_TEMPLATE_FILE, 'w') as template:
template.write(DOMAIN_TEMPLATE)
def load(self, data):
super().load(data)
self.image = None
self.image_url = data.get('image')
self.instance = None
self.instance_name = data.get('instance')
self.memory = data.get('memory')
self.disk = data.get('disk')
def save(self):
data = super().save()
data['instance'] = self.instance_name
data['image'] = self.image_url
return data
def wake(self):
self.debug(
f"Waking up testcloud instance '{self.instance_name}'.",
level=2, shift=0)
self.prepare_config()
self.image = testcloud.image.Image(self.image_url)
self.instance = testcloud.instance.Instance(
self.instance_name, image=self.image,
connection='qemu:///session')
def prepare_ssh_key(self):
self.key = os.path.join(self.workdir, 'id_rsa')
self.pubkey = os.path.join(self.workdir, 'id_rsa.pub')
self.debug('Generating an ssh key.')
self.run(["ssh-keygen", "-f", self.key, "-N", ""], shell=False)
with open(self.pubkey, 'r') as pubkey:
self.config.USER_DATA = USER_DATA.format(
user_name=self.user, public_key=pubkey.read())
def prepare_config(self):
import_testcloud()
self.config = testcloud.config.get_config()
self.config.DOWNLOAD_PROGRESS = self.opt('debug') > 2
self.config.DATA_DIR = TESTCLOUD_DATA
self.config.STORE_DIR = TESTCLOUD_IMAGES
|
MIT License
|
naparuba/opsbro
|
opsbro/misc/internalcherrypy/cherrypy/_cpcompat_subprocess.py
|
Popen._execute_child
|
python
|
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
if not isinstance(args, types.StringTypes):
args = list2cmdline(args)
if startupinfo is None:
startupinfo = STARTUPINFO()
if None not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= _subprocess.STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags |= _subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = _subprocess.SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = '{} /c "{}"'.format(comspec, args)
if (_subprocess.GetVersion() >= 0x80000000 or
os.path.basename(comspec).lower() == "command.com"):
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
creationflags |= _subprocess.CREATE_NEW_CONSOLE
try:
try:
hp, ht, pid, tid = _subprocess.CreateProcess(
executable, args,
None, None,
int(not close_fds),
creationflags,
env,
cwd,
startupinfo)
except pywintypes.error, e:
raise WindowsError(*e.args)
finally:
if p2cread is not None:
p2cread.Close()
if c2pwrite is not None:
c2pwrite.Close()
if errwrite is not None:
errwrite.Close()
self._child_created = True
self._handle = hp
self.pid = pid
ht.Close()
|
Execute program (MS Windows version)
|
https://github.com/naparuba/opsbro/blob/98618a002cd47250d21e7b877a24448fc95fec80/opsbro/misc/internalcherrypy/cherrypy/_cpcompat_subprocess.py#L862-L940
|
import sys
mswindows = (sys.platform == "win32")
import os
import types
import traceback
import gc
import signal
import errno
try:
set
except NameError:
from sets import Set as set
class CalledProcessError(Exception):
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (
self.cmd, self.returncode)
if mswindows:
import threading
import msvcrt
import _subprocess
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
class pywintypes:
error = IOError
else:
import select
_has_poll = hasattr(select, 'poll')
import fcntl
import pickle
_PIPE_BUF = getattr(select, 'PIPE_BUF', 512)
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call",
"check_output", "CalledProcessError"]
if mswindows:
from _subprocess import CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP, STD_INPUT_HANDLE, STD_OUTPUT_HANDLE, STD_ERROR_HANDLE, SW_HIDE, STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW
__all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP",
"STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE",
"STD_ERROR_HANDLE", "SW_HIDE",
"STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW"])
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
_active = []
def _cleanup():
for inst in _active[:]:
res = inst._internal_poll(_deadstate=sys.maxint)
if res is not None:
try:
_active.remove(inst)
except ValueError:
pass
PIPE = -1
STDOUT = -2
def _eintr_retry_call(func, *args):
while True:
try:
return func(*args)
except (OSError, IOError), e:
if e.errno == errno.EINTR:
continue
raise
def call(*popenargs, **kwargs):
return Popen(*popenargs, **kwargs).wait()
def check_call(*popenargs, **kwargs):
retcode = call(*popenargs, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return 0
def check_output(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = Popen(stdout=PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output=output)
return output
def list2cmdline(seq):
result = []
needquote = False
for arg in seq:
bs_buf = []
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
bs_buf.append(c)
elif c == '"':
result.append('\\' * len(bs_buf) * 2)
bs_buf = []
result.append('\\"')
else:
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
class Popen(object):
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
_cleanup()
self._child_created = False
if not isinstance(bufsize, (int, long)):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
if close_fds and (stdin is not None or stdout is not None or
stderr is not None):
raise ValueError("close_fds is not supported on Windows "
"platforms if you redirect "
"stdin/stdout/stderr")
else:
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
self._execute_child(args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
if mswindows:
if p2cwrite is not None:
p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
if c2pread is not None:
c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
if errread is not None:
errread = msvcrt.open_osfhandle(errread.Detach(), 0)
if p2cwrite is not None:
self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
if c2pread is not None:
if universal_newlines:
self.stdout = os.fdopen(c2pread, 'rU', bufsize)
else:
self.stdout = os.fdopen(c2pread, 'rb', bufsize)
if errread is not None:
if universal_newlines:
self.stderr = os.fdopen(errread, 'rU', bufsize)
else:
self.stderr = os.fdopen(errread, 'rb', bufsize)
def _translate_newlines(self, data):
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
return data
def __del__(self, _maxint=sys.maxint, _active=_active):
if not getattr(self, '_child_created', False):
return
self._internal_poll(_deadstate=_maxint)
if self.returncode is None and _active is not None:
_active.append(self)
def communicate(self, input=None):
if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
stdout = None
stderr = None
if self.stdin:
if input:
try:
self.stdin.write(input)
except IOError, e:
if e.errno != errno.EPIPE and e.errno != errno.EINVAL:
raise
self.stdin.close()
elif self.stdout:
stdout = _eintr_retry_call(self.stdout.read)
self.stdout.close()
elif self.stderr:
stderr = _eintr_retry_call(self.stderr.read)
self.stderr.close()
self.wait()
return (stdout, stderr)
return self._communicate(input)
def poll(self):
return self._internal_poll()
if mswindows:
def _get_handles(self, stdin, stdout, stderr):
if stdin is None and stdout is None and stderr is None:
return (None, None, None, None, None, None)
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
p2cread = _subprocess.GetStdHandle(
_subprocess.STD_INPUT_HANDLE)
if p2cread is None:
p2cread, _ = _subprocess.CreatePipe(None, 0)
elif stdin == PIPE:
p2cread, p2cwrite = _subprocess.CreatePipe(None, 0)
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = _subprocess.GetStdHandle(
_subprocess.STD_OUTPUT_HANDLE)
if c2pwrite is None:
_, c2pwrite = _subprocess.CreatePipe(None, 0)
elif stdout == PIPE:
c2pread, c2pwrite = _subprocess.CreatePipe(None, 0)
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = _subprocess.GetStdHandle(
_subprocess.STD_ERROR_HANDLE)
if errwrite is None:
_, errwrite = _subprocess.CreatePipe(None, 0)
elif stderr == PIPE:
errread, errwrite = _subprocess.CreatePipe(None, 0)
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
return _subprocess.DuplicateHandle(
_subprocess.GetCurrentProcess(),
handle,
_subprocess.GetCurrentProcess(),
0,
1,
_subprocess.DUPLICATE_SAME_ACCESS
)
def _find_w9xpopen(self):
w9xpopen = os.path.join(
os.path.dirname(_subprocess.GetModuleFileName(0)),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is "
"needed for Popen to work with your "
"shell or platform.")
return w9xpopen
|
MIT License
|
ha0y/xiaomi_miot_raw
|
custom_components/xiaomi_miot_raw/fan.py
|
MiotSubFan.speed
|
python
|
def speed(self):
if not NEW_FAN:
try:
self._speed = self.get_key_by_value(self._ctrl_params['speed'],self.device_state_attributes[self._did_prefix + 'speed'])
except KeyError:
self._speed = None
return self._speed
else:
return None
|
Return the current speed.
|
https://github.com/ha0y/xiaomi_miot_raw/blob/6fe412b3fbb7fca11f0faf518c654c84228412cf/custom_components/xiaomi_miot_raw/fan.py#L282-L291
|
import asyncio
import logging
from functools import partial
from collections import OrderedDict
from datetime import timedelta
from homeassistant.const import __version__ as current_version
from distutils.version import StrictVersion
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from homeassistant.components import fan
from homeassistant.components.fan import (
ATTR_SPEED,
PLATFORM_SCHEMA,
SPEED_HIGH,
SPEED_LOW,
SPEED_MEDIUM,
SPEED_OFF,
SUPPORT_OSCILLATE,
SUPPORT_SET_SPEED,
SUPPORT_DIRECTION,
FanEntity)
from homeassistant.const import *
from homeassistant.exceptions import PlatformNotReady
from homeassistant.util import color
from miio.exceptions import DeviceException
from .deps.miio_new import MiotDevice
from .basic_dev_class import (
GenericMiotDevice,
ToggleableMiotDevice,
MiotSubDevice,
MiotSubToggleableDevice
)
from . import async_generic_setup_platform
from .switch import BinarySelectorEntity
from .deps.const import (
DOMAIN,
CONF_UPDATE_INSTANT,
CONF_MAPPING,
CONF_CONTROL_PARAMS,
CONF_CLOUD,
CONF_MODEL,
ATTR_STATE_VALUE,
ATTR_MODEL,
ATTR_FIRMWARE_VERSION,
ATTR_HARDWARE_VERSION,
SCHEMA,
MAP,
DUMMY_IP,
DUMMY_TOKEN,
)
import copy
TYPE = 'fan'
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Generic MIoT " + TYPE
DATA_KEY = TYPE + '.' + DOMAIN
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
SCHEMA
)
SCAN_INTERVAL = timedelta(seconds=10)
NEW_FAN = True if StrictVersion(current_version.replace(".dev","a")) >= StrictVersion("2021.2.9") else False
SUPPORT_PRESET_MODE = 8
@asyncio.coroutine
async def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
hass.data[DOMAIN]['add_handler'].setdefault(TYPE, {})
if 'config_entry' in config:
id = config['config_entry'].entry_id
hass.data[DOMAIN]['add_handler'][TYPE].setdefault(id, async_add_devices)
await async_generic_setup_platform(
hass,
config,
async_add_devices,
discovery_info,
TYPE,
{'default': MiotFan},
{'default': MiotSubFan, 'a_l': MiotActionList}
)
async def async_setup_entry(hass, config_entry, async_add_entities):
config = copy.copy(hass.data[DOMAIN]['configs'].get(config_entry.entry_id, dict(config_entry.data)))
await async_setup_platform(hass, config, async_add_entities)
class MiotFan(ToggleableMiotDevice, FanEntity):
def __init__(self, device, config, device_info, hass, main_mi_type):
ToggleableMiotDevice.__init__(self, device, config, device_info, hass, main_mi_type)
self._speed = None
self._mode = None
self._oscillation = None
hass.async_add_job(self.create_sub_entities)
@property
def supported_features(self):
s = 0
if self._did_prefix + 'oscillate' in self._mapping:
s |= SUPPORT_OSCILLATE
if self._did_prefix + 'motor_control' in self._mapping:
s |= SUPPORT_DIRECTION
if self._did_prefix + 'speed' in self._mapping:
s |= (SUPPORT_SET_SPEED if not NEW_FAN else SUPPORT_SET_SPEED)
if self._did_prefix + 'mode' in self._mapping:
s |= (SUPPORT_SET_SPEED if not NEW_FAN else SUPPORT_PRESET_MODE)
return s
@property
def speed_list(self) -> list:
if NEW_FAN:
return None
else:
if 'speed' in self._ctrl_params:
return list(self._ctrl_params['speed'].keys())
elif 'mode' in self._ctrl_params:
return list(self._ctrl_params['mode'].keys())
@property
def _speed_list_without_preset_modes(self) -> list:
if 'stepless_speed' not in self._ctrl_params:
return list(self._ctrl_params['speed'].keys())
else:
return list(range(
self._ctrl_params['stepless_speed']['value_range'][0],
self._ctrl_params['stepless_speed']['value_range'][1] + 1,
self._ctrl_params['stepless_speed']['value_range'][2],
))
@property
def speed(self):
return (self._speed or self._mode) if not NEW_FAN else self._speed
@property
def preset_modes(self) -> list:
try:
return list(self._ctrl_params['mode'].keys())
except KeyError:
return []
@property
def preset_mode(self):
return self._mode
@property
def speed_count(self):
return len(self._speed_list_without_preset_modes)
@property
def oscillating(self):
return self._oscillation
async def async_oscillate(self, oscillating: bool) -> None:
result = await self.set_property_new(self._did_prefix + "oscillate", oscillating)
if result:
self._oscillation = True
self._skip_update = True
async def async_turn_on(self, speed: str = None, **kwargs) -> None:
result = True
if not self.is_on:
result &= await self.set_property_new(self._did_prefix + "switch_status", self._ctrl_params['switch_status']['power_on'])
parameters = []
if 'from_stepless_speed' in kwargs:
parameters.append({**{'did': self._did_prefix + "stepless_speed", 'value': speed}, **(self._mapping[self._did_prefix + 'stepless_speed'])})
elif speed:
if 'speed' in self._ctrl_params:
parameters.append({**{'did': self._did_prefix + "speed", 'value': self._ctrl_params['speed'][speed]}, **(self._mapping[self._did_prefix + 'speed'])})
elif 'mode' in self._ctrl_params:
parameters.append({**{'did': self._did_prefix + "mode", 'value': self._ctrl_params['mode'][speed]}, **(self._mapping[self._did_prefix + 'mode'])})
if parameters:
result &= await self.set_property_new(multiparams = parameters)
if result:
self._state = True
if speed is not None:
self._speed = speed
self._skip_update = True
async def async_set_speed(self, speed: str) -> None:
if 'stepless_speed' not in self._ctrl_params or not NEW_FAN:
await self.async_turn_on(speed)
else:
await self.async_turn_on(speed, from_stepless_speed = True)
async def async_set_preset_mode(self, preset_mode: str) -> None:
result = await self.set_property_new(self._did_prefix + "mode", self._ctrl_params['mode'][preset_mode])
if result:
self._state = True
self._mode = preset_mode
self._skip_update = True
@property
def current_direction(self) -> str:
return None
async def async_set_direction(self, direction: str) -> None:
if direction == 'forward':
d = 'left'
elif direction == 'reverse':
d = 'right'
else:
d = direction
if d not in self._ctrl_params['motor_control']:
raise TypeError(f"Your fan does not support {direction}.")
await self.set_property_new(self._did_prefix + "motor_control", self._ctrl_params['motor_control'][d])
def _handle_platform_specific_attrs(self):
super()._handle_platform_specific_attrs()
try:
self._speed = self.get_key_by_value(self._ctrl_params['speed'],self._state_attrs.get(self._did_prefix + 'speed')) if 'stepless_speed' not in self._ctrl_params or not NEW_FAN else self._state_attrs.get(self._did_prefix + 'stepless_speed')
except KeyError:
self._speed = None
try:
self._mode = self.get_key_by_value(self._ctrl_params['mode'],self._state_attrs.get(self._did_prefix + 'mode'))
except KeyError:
self._mode = None
self._oscillation = self._state_attrs.get(self._did_prefix + 'oscillate')
class MiotSubFan(MiotSubToggleableDevice, FanEntity):
def __init__(self, parent_device, mapping, params, mitype):
super().__init__(parent_device, mapping, params, mitype)
self._speed = None
self._oscillation = None
@property
def supported_features(self):
s = 0
if 'oscillate' in self._mapping:
s |= SUPPORT_OSCILLATE
if 'speed' in self._mapping:
s |= (SUPPORT_SET_SPEED if not NEW_FAN else SUPPORT_PRESET_MODE)
return s
@property
def speed_list(self) -> list:
if NEW_FAN:
return None
else:
return list(self._ctrl_params['speed'].keys())
@property
def preset_modes(self) -> list:
return list(self._ctrl_params['speed'].keys())
@property
|
Apache License 2.0
|
debith/py3traits
|
src/pytraits/support/inspector.py
|
isclassinstance
|
python
|
def isclassinstance(object):
if not hasattr(object, "__class__"):
return False
if isbuiltin(object.__class__):
return False
return True
|
Convinience method to check if object is class instance.
|
https://github.com/debith/py3traits/blob/bc30a2c8ba1f4279477f5aa0a9a023e930855d03/src/pytraits/support/inspector.py#L104-L110
|
import inspect
from collections import OrderedDict as odict
from pytraits.support.magic import type_safe
__all__ = ["Inspector"]
def isproperty(object):
return isinstance(object, property)
def isbuiltin(object):
if inspect.isbuiltin(object):
return True
return getattr(object, '__module__', None) == 'builtins'
def isclass(object):
if not inspect.isclass(object):
return False
if isbuiltin(object):
return False
return type not in inspect.getmro(object)
def ismetaclass(object):
if not inspect.isclass(object):
return False
if isbuiltin(object):
return False
return type in inspect.getmro(object)
def _get_dict_function(object):
try:
return object.__self__.__dict__[object.__name__]
except (AttributeError, KeyError):
return None
def isclassmethod(object):
if isinstance(object, classmethod):
return True
original = _get_dict_function(object)
return isinstance(original, classmethod)
def isdatatype(object):
return isinstance(object, (str, int, bool, float, type(None)))
def isstaticmethod(object):
if isinstance(object, staticmethod):
return True
if not inspect.isfunction(object):
return False
if "." not in getattr(object, "__qualname__", ""):
return False
args = object.__code__.co_varnames
if len(args) == 0:
return True
return args[0] != 'self'
|
Apache License 2.0
|
simplysecurity/simplydomain
|
simplydomain/src/core_processes.py
|
CoreProcess.execute_processes
|
python
|
def execute_processes(self, config, queue_dict, modules):
while True:
q = queue_dict['task_queue'].get()
pbq = queue_dict['progress_bar_pickup']
if q == None:
self.logger.infomsg(
'execute_processes() dynamic module task_queue empty, EXITING process', 'CoreProcess')
break
dynamic_module = modules[q]
try:
dm = dynamic_module.DynamicModule(config)
self.logger.infomsg('execute_processes() starting module: '
+ str(dm.info['Name']), 'CoreProcess')
msg = "Executing module: %s %s" % ('{0: <22}'.format(
"("+dm.info['Module']+")"), "("+dm.info['Name']+")")
if not self.config['silent']:
pbq.put(['execute', msg])
dm.dynamic_main(queue_dict)
self.logger.infomsg('execute_processes() completed module: '
+ str(dm.info['Name']), 'CoreProcess')
msg = "Module completed: %s %s" % (
'{0: <22}'.format("(" + dm.info['Module'] + ")"), "(" + dm.info['Name'] + ")")
if not self.config['silent']:
pbq.put(['complete', msg])
except Exception as e:
self.logger.warningmsg('execute_processes hit fatal error: '
+ str(e), 'CoreProcess')
self.print_red(" [!] Module process failed: %s %s" % (
'{0: <22}'.format("(" + dm.info['Module'] + ")"), "(" + e + ")"))
|
Executes the module required and passed.
:param module_obj: module settings
:param queues: passed list obj
:return:
|
https://github.com/simplysecurity/simplydomain/blob/101dd55b213009b449a96a1fa8b143d85dcdba88/simplydomain/src/core_processes.py#L272-L308
|
import multiprocessing as mp
from itertools import product
import string
import threading
import time
from . import core_printer
from . import core_progress
from . import core_serialization
from . import module_recursion
class CoreProcess(core_printer.CorePrinters, core_progress.CoreProgress, module_recursion.ModuleRecursion):
def __init__(self):
core_printer.CorePrinters.__init__(self)
core_progress.CoreProgress.__init__(self)
module_recursion.ModuleRecursion.__init__(self)
self.procs = []
self.threads = []
self.processors = mp.cpu_count()
self.module_count = 0
self.mp = mp
self.mpq = self.mp.Queue()
self.task_queue = mp.Queue()
self.task_output_queue = mp.Queue()
self.task_msg_queue = mp.Queue()
self.progress_bar_pickup = mp.Queue()
self.serialize_json_output = core_serialization.SerializeJSON(
self.config)
def _configure_mp(self):
self.logger.infomsg(
'setting MP to SPAWN for cross platform support', 'CoreRuntime')
self.mp.set_start_method('spawn')
def _configure_processes(self, mod_count):
self.logger.infomsg('current # of modules loaded: ' +
str(mod_count), 'CoreProcess')
self.logger.infomsg('current # of processors of SYSTEM: ' +
str(self.processors), 'CoreProcess')
if self.processors > mod_count:
self.logger.infomsg('setting MP count of: ' +
str(mod_count), 'CoreProcess')
self.processors = mod_count
for p in range(self.processors + 1):
self.task_queue.put(None)
def _task_output_queue_consumer(self):
while True:
item = self.task_output_queue.get()
if item:
self.logger.debugmsg('_task_output_queue_consumer recv a subdomain: '
+ str(item.subdomain), 'CoreProcess')
msg = self.green_text("Subdomain: %s Vaild: (%s)" %
('{0: <30}'.format('('+str(item.subdomain)+')'), str(item.valid)))
if not self.config['silent']:
self.progress_print(msg)
self.serialize_json_output.add_subdomain(item)
self.add_subdomain(item)
if item == None:
self.logger.infomsg(
'_task_output_queue_consumer is (NONE) exiting thread', 'CoreProcess')
break
if self.task_output_queue.empty():
pass
def populate_task_queue(self, modules):
if self.config['args'].module:
for mod in modules:
if self.config['args'].module in mod:
self.task_queue.put(mod)
else:
for mod in modules:
self.logger.infomsg('adding module to task queue: ' +
str(mod), 'CoreProcess')
self.task_queue.put(mod)
self.module_count = len(modules)
self._configure_processes(len(modules))
def clear_task_queue(self):
self.logger.infomsg('tasked to clear_task_queue()', 'CoreProcess')
while not self.task_queue.empty():
obj = self.task_queue.get()
del obj
self.logger.infomsg(
'clear_task_queue() completed empty', 'CoreProcess')
self.task_queue.close()
def _pbar_thread(self):
if not self.config['silent']:
start_count = len(self.procs)
self.start_progress_bar(self.module_count)
while self.check_active():
try:
dm = self.progress_bar_pickup.get()
except Exception as e:
print(e)
if dm == None:
self.close_progress_bar()
break
if dm:
if dm[0] == 'complete':
self.progress_print(self.blue_text(dm[1]))
self.inc_progress_bar(1)
if dm[0] == 'execute':
self.progress_print(self.blue_text(dm[1]))
if self.progress_bar_pickup.empty():
time.sleep(0.1)
def _start_thread_function(self, pointer):
self.threads.insert(0, threading.Thread(
target=self._pbar_thread()))
t = self.threads[0]
t.start()
def _start_threads(self):
self.threads.append(threading.Thread(
target=self._task_output_queue_consumer))
for t in self.threads:
t.start()
def stop_threads(self):
self.logger.infomsg(
'tasked to stop_threads() putting (NONE)', 'CoreProcess')
self.task_output_queue.put(None)
for t in self.threads:
self.logger.infomsg(
'Attempting to shutting down thread in stop_threads()', 'CoreProcess')
t.join
self.print_red("[!] All consumer threads have been joined")
self.logger.infomsg(
'All consumer threads joined in stop_threads()', 'CoreProcess')
def join_threads(self):
self.logger.infomsg(
'tasked to join_threads() putting (NONE)', 'CoreProcess')
self.task_output_queue.put(None)
while True:
if self.task_output_queue.empty() == True:
self.logger.infomsg(
'self.task_output_queue is empty! breaking loop', 'CoreProcess')
break
else:
self.logger.infomsg(
'self.task_output_queue not empty sleeping for 1 second', 'CoreProcess')
time.sleep(1)
for t in self.threads:
t.join
def start_processes(self):
self._start_threads()
for _ in range(self.processors):
self.logger.infomsg(
'start_processes() is kicking off empty procs with queue objects', 'CoreProcess')
self.start_process(self.config, self.task_queue,
self.task_output_queue, self.progress_bar_pickup)
for p in self.procs:
p.daemon = True
p.start()
self.logger.infomsg(
'start_process() started proc with daemon mode', 'CoreProcess')
def start_process(self, config, task_queue, task_output_queue, progress_bar_pickup):
queue_dict = {
'task_queue': task_queue,
'task_output_queue': task_output_queue,
'progress_bar_pickup': progress_bar_pickup
}
self.logger.infomsg('start_process() built queue_dict and appending to self.mp.procs, current proc count: '
+ str(len(self.procs)), 'CoreProcess')
self.procs.append(
self.mp.Process(target=self.execute_processes,
args=(config, queue_dict, self.modules)))
|
BSD 3-Clause New or Revised License
|
awslabs/aws-ops-automator
|
source/code/handlers/__init__.py
|
get_class_for_handler
|
python
|
def get_class_for_handler(handler_name):
if handler_name not in __handlers:
m = get_module_for_handler(handler_name)
cls = _get_handler_class(m)
if cls is None or cls[0] != handler_name:
raise ImportError(ERR_UNEXPECTED_HANDLER_CLASS_IN_MODULE.format(handler_name, m, cls[0] if cls else "None"))
__handlers[handler_name] = cls
return __handlers[handler_name][1]
|
Returns the class for the handler
:param handler_name: Name of the handler
:return: class that implements the handler
|
https://github.com/awslabs/aws-ops-automator/blob/362abd0717b48ecca7f20d8985ae7d76f045daf3/source/code/handlers/__init__.py#L330-L343
|
import base64
import copy
import importlib
import inspect
import json
import os
import sys
import time
from datetime import datetime, timedelta
from os import getenv, listdir
from os.path import abspath, dirname, isdir, isfile, join
import boto_retry
import pytz
import services
from configuration import ENV_CONFIG_BUCKET as CONFIGURATION_BUCKET
from helpers import pascal_to_snake_case, safe_json
from outputs import raise_exception
from scheduling.cron_expression import CronExpression
ARN_ROLE_TEMPLATE = "arn:aws:iam::{}:role/{}"
INF_NEW_CRON = "New cron expression for rule is {}"
INF_NEXT_EVENT_IN_PAST = "As next trigger time for rule is before {} it will be set to trigger every minute"
INF_NEXT_EVENT = "Next event is at {}, new time for rule to trigger is {}"
COMPLETION_METHOD = "is_completed"
ACTION_CONCURRENCY_KEY_METHOD = "action_concurrency_key"
ACTION_VALIDATE_PARAMETERS_METHOD = "action_validate_parameters"
DEFAULT_ACCOUNT_ROLENAME = "{}ActionsRole"
ENV_LAMBDA_NAME = "LAMBDA_NAME"
ENV_OPS_AUTOMATOR_RULE = "OPS_AUTOMATOR_RULE"
ENV_COMPLETION_RULE = "COMPLETION_RULE"
ENV_ACTION_TRACKING_TABLE = "ACTION_TRACKING_TABLE"
ENV_LAST_RUN_TABLE = "LAST_SCHEDULER_RUN_TABLE"
ENV_AUTOMATOR_TAG_NAME = "TASKLIST_TAG_NAME"
ENV_CONCURRENCY_TABLE = "CONCURRENCY_TABLE"
ENV_WAITING_COMPLETION_TABLE = "WAITING_COMPLETION_TABLE"
ENV_STACK_NAME = "STACK_NAME"
ENV_STACK_ID = "STACK_ID"
ENV_OPS_AUTOMATOR_ACCOUNT = "OPS_AUTOMATOR_ACCOUNT"
ENV_LAMBDA_TIMEOUT = "LAMBDA_TIMEOUT"
ENV_ISSUES_TOPIC_ARN = "ISSUES_TOPIC_ARN"
ENV_EVENTS_TOPIC_ARN = "EVENTS_TOPIC_ARN"
ENV_RESOURCE_BUCKET = "RESOURCE_BUCKET"
ENV_RESOURCE_TO_S3_SIZE = "RESOURCE_TO_S3_SIZE"
ENV_CLOUDWATCH_METRICS = "CLOUDWATCH_METRICS"
ENV_USE_ECS = "USE_ECS"
ENV_IS_ECS_JOB = "IS_ECS_JOB"
ENV_CONFIG_BUCKET = CONFIGURATION_BUCKET
ENV_ECS_CLUSTER = "ECS_CLUSTER"
ENV_ECS_TASK = "ECS_OPS_AUTOMATOR_TASK"
ENV_TASK_CLEANUP_ENABLED = "TASK_CLEANUP_ENABLED"
ENV_TASK_RETENTION_HOURS = "TASK_RETENTION_HOURS"
ENV_KEEP_FAILED_TASKS = "KEEP_FAILED_TASKS"
ENV_SERVICE_LIMIT_CONCURRENT_EBS_SNAPSHOT_COPY = "SERVICE_LIMIT_CONCURRENT_EBS_SNAPSHOT_COPY"
ENV_SERVICE_LIMIT_CONCURRENT_RDS_SNAPSHOT_COPY = "SERVICE_LIMIT_CONCURRENT_RDS_SNAPSHOT_COPY"
ENV_SERVICE_LIMIT_CONCURRENT_IMAGE_COPY = "SERVICE_LIMIT_CONCURRENT_IMAGE_COPY"
ENV_OPS_AUTOMATOR_ROLE_ARN = "OPS_AUTOMATOR_ROLE_ARN"
DEFAULT_SCHEDULER_TAG = "OpsAutomatorTaskList"
FORWARDED_EVENT = "ops-automator:{}"
EC2_EVENT_SOURCE = "aws.ec2"
RDS_EVENT_SOURCE = "aws.rds"
S3_EVENT_SOURCE = "aws:s3"
TAG_EVENT_SOURCE = "aws.tag"
TAG_CHANGE_EVENT_SOURCE_DETAIL_TYPE = "Tag Change on Resource"
TAG_CHANGE_EVENT = "TagChangeOnResource"
ECS_DEFAULT_MEMORY_RESERVATION = 128
TASK_TR_ACCOUNT = "Account"
TASK_TR_ACTION = "Action"
TASK_TR_ASSUMED_ROLE = "AssumedRole"
TASK_TR_COMPLETION_SIZE = "CompletionSize"
TASK_TR_CONCURRENCY_ID = "ConcurrencyId"
TASK_TR_CONCURRENCY_KEY = "ConcurrencyKey"
TASK_TR_CREATED = "Created"
TASK_TR_CREATED_TS = "CreatedTs"
TASK_TR_DEBUG = "Debug"
TASK_TR_DRYRUN = "Dryrun"
TASK_TR_DT = "TaskDatetime"
TASK_TR_ERROR = "Error"
TASK_TR_EVENTS = "Events"
TASK_TR_EXECUTE_SIZE = "ExecuteSize"
TASK_TR_EXECUTION_LOGSTREAM = "LogStream"
TASK_TR_EXECUTION_TIME = "ExecutionTime"
TASK_TR_HAS_COMPLETION = "HasCompletion"
TASK_TR_ID = "Id"
TASK_TR_INTERNAL = "Internal"
TASK_TR_INTERVAL = "Interval"
TASK_TR_GROUP = "TaskGroup"
TASK_TR_LAST_WAIT_COMPLETION = "LastCompletionCheck"
TASK_TR_METRICS = "TaskMetrics"
TASK_TR_NAME = "TaskName"
TASK_TR_NOTIFICATIONS = "TaskNotifications"
TASK_TR_PARAMETERS = "Parameters"
TASK_TR_RESOURCES = "Resources"
TASK_TR_RESULT = "ActionResult"
TASK_TR_RUN_LOCAL = "RunLocal"
TASK_TR_S3_RESOURCES = "S3Resources"
TASK_TR_REGION = "Region"
TASK_TR_RESOURCE_TYPE = "ResourceType"
TASK_TR_SELECT_SIZE = "SelectSize"
TASK_TR_SERVICE = "Service"
TASK_TR_SOURCE = "Source"
TASK_TR_START_EXECUTION_TIME = "StartExecutionTime"
TASK_TR_START_RESULT = "ActionStartResult"
TASK_TR_STARTED_TS = "StartedTs"
TASK_TR_STATUS = "Status"
TASK_TR_TAGFILTER = "TagFilter"
TASK_TR_TIMEOUT = "TaskTimeout"
TASK_TR_TIMEZONE = "Timezone"
TASK_TR_TTL = "TTL"
TASK_TR_UPDATED = "Updated"
TASK_TR_UPDATED_TS = "UpdatedTs"
STATUS_PENDING = "pending"
STATUS_STARTED = "started"
STATUS_WAIT_FOR_COMPLETION = "wait-to-complete"
STATUS_COMPLETED = "completed"
STATUS_TIMED_OUT = "timed-out"
STATUS_FAILED = "failed"
STATUS_WAITING = "wait-for-exec"
TASK_ACTION = "action"
TASK_COMPLETION_ECS_MEMORY = "completion_ecs_memory"
TASK_COMPLETION_SIZE = "completion_size"
TASK_ACCOUNTS = "accounts"
TASK_ROLE = "cross_account_role"
TASK_DEBUG = "debug"
TASK_DESCRIPTION = "description"
TASK_DRYRUN = "dryrun"
TASK_ENABLED = "enabled"
TASK_EVENT_SCOPES = "event_scopes"
TASK_EVENT_SOURCE_TAG_FILTER = "EventSourceTagFilter"
TASK_EVENTS = "events"
TASK_EXECUTE_ECS_MEMORY = "execute_ecs_memory"
TASK_EXECUTE_SIZE = "execute_size"
TASK_ID = "id"
TASK_INTERNAL = "internal"
TASK_INTERVAL = "interval"
TASK_METRICS = "task_metrics"
TASK_NAME = "name"
TASK_NOTIFICATIONS = "notifications"
TASK_PARAMETERS = "parameters"
TASK_REGIONS = "regions"
TASK_SELECT_ECS_MEMORY = "select_ecs_memory"
TASK_SELECT_SIZE = "select_size"
TASK_SERVICE = "service"
TASK_RESOURCE_TYPE = "resource_type"
TASK_TAG_FILTER = "tag_filter"
TASK_THIS_ACCOUNT = "this_account"
TASK_TIMEOUT = "timeout"
TASK_TIMEZONE = "timezone"
HANDLER_EVENT_ACTION = "action"
HANDLER_ACTION_EXECUTE = "execute-action"
HANDLER_ACTION_TEST_COMPLETION = "test-completion-action"
HANDLER_ACTION_SELECT_RESOURCES = "select-resources"
HANDLER_SELECT_ARGUMENTS = "select-args"
HANDLER_SELECT_RESOURCES = "resources"
HANDLER_EVENT_SCHEDULER_EXECUTE_TASK = "scheduler-execute-task"
HANDLER_EVENT_TASK_NAME = "task-name"
HANDLER_EVENT_TASK_DT = "task-datetime"
HANDLER_EVENT_TASK = "task"
HANDLER_EVENT_ACCOUNT = "account"
HANDLER_EVENT_REGIONS = "regions"
HANDLER_EVENT_SOURCE = "source"
HANDLER_EVENT_DYNAMO_SOURCE = "eventSource"
HANDLER_EVENT_SUB_TASK = "sub-task"
HANDLER_EVENT_RESOURCE_NAME = "resource-name"
HANDLER_EVENT_CUSTOM_SELECT = "custom-select"
HANDLER_EVENT_TASK_GROUP = "task-group"
EVENT_SCOPE_RESOURCE = "resource"
EVENT_SCOPE_REGION = "region"
UNKNOWN_SOURCE = "unknown"
ERR_NO_MODULE_FOR_HANDLER = "Can not load module {} for handler {}, available handlers are {}"
ERR_UNEXPECTED_HANDLER_CLASS_IN_MODULE = "Unable to load class {0}Handler for handler {0} from module {1}, " "handler class found in module was {2}"
ERR_EVENT_RULE_NOT_FOUND = "Can not get CloudWatch rule {} in stack {}"
ERR_FAILED_TO_START_ECS_TASK = "Failed to start ECS job for {}, failures are {}"
ERR_CREATING_SESSION = "Error creating session {}"
DESC_NO_EXECUTIONS_FOR_EXPR = "No task scheduled within the next 24 hours"
DESC_EXPRESSION_SET = "Schedule expression set to {}"
HANDLERS = "handlers"
HANDLERS_MODULE_NAME = HANDLERS + ".{}"
HANDLERS_PATH = "./" + HANDLERS
HANDLER = "Handler"
HANDLER_CLASS = "{}" + HANDLER
__handlers = {}
__actions = None
_kms_client = None
def _get_handler_class(handler_module):
for cls in inspect.getmembers(handler_module, inspect.isclass):
if cls[1].__module__ != handler_module.__name__ or not cls[1].__name__.endswith(HANDLER):
continue
return cls
return None
def _get_module(module_name):
handler_module = sys.modules.get(module_name)
if handler_module is None:
handler_module = importlib.import_module(module_name)
return handler_module
def get_module_for_handler(handler_name):
module_name = HANDLERS_MODULE_NAME.format(pascal_to_snake_case(handler_name))
try:
return _get_module(module_name)
except:
raise ImportError(ERR_NO_MODULE_FOR_HANDLER.format(module_name, handler_name, ", ".join(all_handlers())))
def all_handlers():
global __actions
if __actions is None:
__actions = []
current = abspath(os.getcwd())
while True:
if isdir(os.path.join(current, "handlers")):
break
parent = dirname(current)
if parent == current:
raise Exception("Could not find handlers directory")
else:
current = parent
for f in listdir(os.path.join(current, "handlers")):
if isfile(join(current, "handlers", f)) and f.endswith("_{}.py".format(HANDLER.lower())):
module_name = HANDLERS_MODULE_NAME.format(f[0:-len(".py")])
m = _get_module(module_name)
cls = _get_handler_class(m)
if cls is not None:
handler_name = cls[0]
__actions.append(handler_name)
return __actions
def create_handler(handler_name, event, context):
return get_class_for_handler(handler_name)(event, context)
|
Apache License 2.0
|
locationlabs/confab
|
confab/jinja_filters.py
|
remove_jinja_filter
|
python
|
def remove_jinja_filter(filter):
return jinja_filters.remove_filter(filter)
|
Remove a custom jinja filter.
|
https://github.com/locationlabs/confab/blob/a39c3d7aae11b2f373b8911b4f3caa75548a00c6/confab/jinja_filters.py#L109-L113
|
def select(value, key):
return value.get(key, value) if isinstance(value, dict) else value
def rotate(list_, pivot):
try:
pos = list_.index(pivot)
except ValueError:
return list_
else:
return list_[pos:] + list_[:pos]
def map_format(sequence, format):
return [format.format(item) for item in sequence]
def built_in_filters():
return [
select,
rotate,
map_format,
]
class JinjaFiltersRegistry(object):
def __init__(self):
self._filters = set(built_in_filters())
def add_filter(self, filter):
self._filters.add(filter)
def remove_filter(self, filter):
try:
self._filters.remove(filter)
except KeyError:
return False
return True
@property
def filters(self):
return {filter.__name__: filter for filter in self._filters}
def register(self, environment):
for name, filter in self.filters.iteritems():
environment.filters[name] = filter
class JinjaFilters(object):
def __init__(self, *filters):
self._filters = filters
def __enter__(self):
for filter in self._filters:
add_jinja_filter(filter)
def __exit__(self, type, value, traceback):
for filter in self._filters:
remove_jinja_filter(filter)
def add_jinja_filter(filter):
jinja_filters.add_filter(filter)
|
Apache License 2.0
|
numba/numba
|
numba/np/unsafe/ndarray.py
|
to_fixed_tuple
|
python
|
def to_fixed_tuple(typingctx, array, length):
if not isinstance(length, types.IntegerLiteral):
raise RequireLiteralValue('*length* argument must be a constant')
if array.ndim != 1:
raise TypingError("Not supported on array.ndim={}".format(array.ndim))
tuple_size = int(length.literal_value)
tuple_type = types.UniTuple(dtype=array.dtype, count=tuple_size)
sig = tuple_type(array, length)
def codegen(context, builder, signature, args):
def impl(array, length, empty_tuple):
out = empty_tuple
for i in range(length):
out = tuple_setitem(out, i, array[i])
return out
inner_argtypes = [signature.args[0], types.intp, tuple_type]
inner_sig = typing.signature(tuple_type, *inner_argtypes)
ll_idx_type = context.get_value_type(types.intp)
empty_tuple = context.get_constant_undef(tuple_type)
inner_args = [args[0], ll_idx_type(tuple_size), empty_tuple]
res = context.compile_internal(builder, impl, inner_sig, inner_args)
return res
return sig, codegen
|
Convert *array* into a tuple of *length*
Returns ``UniTuple(array.dtype, length)``
** Warning **
- No boundchecking.
If *length* is longer than *array.size*, the behavior is undefined.
|
https://github.com/numba/numba/blob/8d4559a83b7b12da9121c030b8e3780874204a34/numba/np/unsafe/ndarray.py#L42-L79
|
from numba.core import types, typing
from numba.core.cgutils import unpack_tuple
from numba.core.extending import intrinsic
from numba.core.imputils import impl_ret_new_ref
from numba.core.errors import RequireLiteralValue, TypingError
from numba.cpython.unsafe.tuple import tuple_setitem
@intrinsic
def empty_inferred(typingctx, shape):
from numba.np.arrayobj import _empty_nd_impl
def codegen(context, builder, signature, args):
arrty = signature.return_type
assert arrty.is_precise()
shapes = unpack_tuple(builder, args[0])
res = _empty_nd_impl(context, builder, arrty, shapes)
return impl_ret_new_ref(context, builder, arrty, res._getvalue())
nd = len(shape)
array_ty = types.Array(ndim=nd, layout='C', dtype=types.undefined)
sig = array_ty(shape)
return sig, codegen
@intrinsic
|
BSD 2-Clause Simplified License
|
peerchemist/cryptotik
|
cryptotik/bitstamp.py
|
Bitstamp.get_liquidation_address_info
|
python
|
def get_liquidation_address_info(self, address=None):
return self.private_api('v2/liquidation_address/info/',
data={'address': address})
|
Shows transactions (BTC to liquidation_currency) for liquidation address.
|
https://github.com/peerchemist/cryptotik/blob/24ffd74c43ff1fc171081e135cb2b66b775af3f3/cryptotik/bitstamp.py#L218-L222
|
import requests
from decimal import Decimal
import time
from cryptotik.common import (headers, ExchangeWrapper,
NormalizedExchangeWrapper)
from cryptotik.exceptions import (InvalidBaseCurrencyError,
InvalidDelimiterError,
APIError)
import hmac
import hashlib
from datetime import datetime
class Bitstamp(ExchangeWrapper):
def __init__(self, apikey=None, secret=None, customer_id=None,
timeout=None, proxy=None):
if apikey:
self._apikey = apikey
self._secret = secret
self._customer_id = customer_id
if proxy:
assert proxy.startswith('https'), {'Error': 'Only https proxies supported.'}
self.proxy = {'https': proxy}
if not timeout:
self.timeout = (8, 15)
else:
self.timeout = timeout
self.api_session = requests.Session()
public_commands = ("ticker", "transactions", "order_book")
private_commands = ("balance", "user_transactions", "open_orders", "order_status",
"cancel_order", "cancel_all_orders", "buy",
"sell")
name = 'bitstamp'
url = 'https://www.bitstamp.net/'
api_url = url + 'api/'
delimiter = ""
case = "lower"
headers = headers
_markets = 'btcusd, btceur, eurusd, xrpusd, xrpeur, xrpbtc, ltcusd, ltceur, ltcbtc, ethusd, etheur, ethbtc'.split(', ')
_markets_normalized = 'btc-usd, btc-eur, eur-usd, xrp-usd, xrp-eur, xrp-btc, ltc-usd, ltc-eur, ltc-btc, eth-usd, eth-eur, eth-btc'.split(', ')
maker_fee, taker_fee = 0.002, 0.002
quote_order = 0
base_currencies = ['usd', 'eur', 'btc']
def get_nonce(self):
nonce = getattr(self, '_nonce', 0)
if nonce:
nonce += 1
self._nonce = max(int(time.time() * 1000000), nonce)
return self._nonce
def get_base_currencies(self):
raise NotImplementedError
@classmethod
def format_pair(cls, pair):
pair = pair.replace("-", cls.delimiter)
if not pair.islower():
return pair.lower()
else:
return pair
def _verify_response(self, response):
if 'v2' in response.url:
try:
if response.json()['error']:
raise APIError(response.json()['reason'])
except (KeyError, TypeError):
pass
def _generate_signature(self, message):
return hmac.new(self._secret.encode('utf-8'),
msg=message.encode('utf-8'),
digestmod=hashlib.sha256).hexdigest().upper()
def api(self, command):
try:
response = self.api_session.get(self.api_url + 'v2/' + command, headers=self.headers,
timeout=self.timeout, proxies=self.proxy)
response.raise_for_status()
except requests.exceptions.HTTPError as e:
raise APIError(e)
self._verify_response(response)
return response.json()
def private_api(self, command, data={}):
if not self._customer_id or not self._apikey or not self._secret:
raise ValueError("A Key, Secret and customer_id required!")
nonce = self.get_nonce()
data['key'] = self._apikey
message = str(nonce) + self._customer_id + self._apikey
data['signature'] = self._generate_signature(message)
data['nonce'] = nonce
try:
response = self.api_session.post(url=self.api_url + command,
data=data,
headers=self.headers,
timeout=self.timeout,
proxies=self.proxy)
response.raise_for_status()
except requests.exceptions.HTTPError as e:
print(e)
self._verify_response(response)
return response.json()
def get_markets(self):
return self._markets
def get_summaries(self):
raise NotImplementedError
def get_market_ticker(self, pair):
pair = self.format_pair(pair)
return self.api("ticker/" + pair)
def get_market_orders(self, pair):
pair = self.format_pair(pair)
return self.api("order_book/" + pair)
def get_market_sell_orders(self, pair):
return self.get_market_orders(pair)['asks']
def get_market_buy_orders(self, pair):
return self.get_market_orders(pair)['bids']
def get_market_trade_history(self, pair, depth=100, since="hour"):
pair = self.format_pair(pair)
orders = self.api("transactions/" + pair + "/?time={0}".format(since))
return [i for i in orders][:depth]
def get_market_volume(self, pair):
pair = self.format_pair(pair)
return self.get_market_ticker(pair)['volume']
def get_balances(self, coin=None):
if not coin:
return self.private_api("v2/balance/")
else:
return {k: v for k, v in self.private_api("v2/balance/").items() if k.startswith(coin.lower())}
def get_deposit_address(self, coin=None):
if coin == 'btc':
command = 'bitcoin_deposit_address/'
if coin == 'ltc':
command = 'v2/ltc_address/'
if coin == 'eth':
command = 'v2/eth_address/'
if coin == 'xrp':
command = 'ripple_address/'
if coin == 'bch':
command = 'v2/bch_address/'
return self.private_api(command)
def get_liquidation_address(self, fiat):
return self.private_api('v2/liquidation_address/new/',
data={'liquidation_currency': fiat.lower()})
|
BSD 3-Clause New or Revised License
|
guandjoy/redfish
|
src/django_server/notes/managers.py
|
NoteManager.createNoOwnerNotesFromFile
|
python
|
def createNoOwnerNotesFromFile(self):
get_qs = lambda: self.get_queryset().filter(owner=None)
if len(get_qs()) == 0:
try:
with open('./fixtures/initial_notes.json', 'r') as initial_notes_file:
json_obj = initial_notes_file.read()
initial_notes = []
for note in serializers.deserialize("json", json_obj):
initial_notes.append(note.object)
initial_notes = self.bulk_create(initial_notes)
print("initial notes", initial_notes)
return initial_notes
except FileNotFoundError:
print("There is no initial_notes.json file. Create it with `generateInitialNotesFile` manager")
return False
else:
print("There is already some initial notes without owner")
return False
|
Retrieve initial notes from 'initial_notes.json' file
located in same directory
|
https://github.com/guandjoy/redfish/blob/ebcf7f25c6ef05af229aebcee0e5ff0728e34c9e/src/django_server/notes/managers.py#L88-L109
|
from django.db import models, transaction
from django.db.models import F, Max, Q
from django.contrib.auth.models import User
from django.core import serializers
from random import randint
import requests
import json
class NoteManager(models.Manager):
def reorder_by(self, field):
users = User.objects.all()
for user in users:
qs = self.get_queryset().filter(owner=user).exclude(Q(title=None)| Q(title=""), Q(content=None) | Q(content="")).order_by(field)
for index, note in enumerate(qs.filter(pinned=True)):
note.order = index
note.save()
for index, note in enumerate(qs.filter(pinned=False)):
note.order = index
note.save()
def remove_order_gaps(self):
users = User.objects.all()
for user in users:
qs = self.get_queryset().filter(owner=user).order_by('order')
for index, note in enumerate(qs):
print(f'Old order: {note.order}. New order: {index + 1}')
note.order = index + 1
note.save()
def fill_gaps(self, objects):
qs = self.get_queryset()
with transaction.atomic():
for index, obj in enumerate(objects):
try:
notes_to_update_order = qs.filter(order__gt=obj.order, order__lt=objects[index + 1].order ,owner=obj.owner, pinned=obj.pinned)
except:
notes_to_update_order = qs.filter(order__gt=obj.order, owner=obj.owner, pinned=obj.pinned)
finally:
notes_to_update_order.update(order=F('order') - (index + 1))
def generateInitialNotesFile(self, records_amount):
FILE_NAME = "./fixtures/initial_notes.json"
sentences = lambda: randint(1, 12)
url = lambda: f'https://baconipsum.com/api/?type=meat-and-filler&sentences={sentences()}&format=text'
notes = []
i = 0
while i < records_amount:
r = requests.get(url())
if r.status_code == 200:
note = {
"model": "notes.note",
"fields": {
"title": "",
"content": r.text,
"color": "WHITE",
"pinned": False,
"order": i
}
}
notes.append(note)
print(f'{json.dumps(note, indent=4)}')
i += 1
else:
print(f'{i} failed')
with open(FILE_NAME, 'w') as file:
file.write(json.dumps(notes, indent=4))
|
MIT License
|
extreme-classification/decaf
|
DECAF/libs/collate_fn.py
|
construct_collate_fn.collate_fn_full_lbf
|
python
|
def collate_fn_full_lbf(self, batch):
batch_data = {}
self.collate_docs(batch, batch_data)
batch_data['Y'] = torch.from_numpy(np.vstack(
list(map(lambda x: x[1], batch))
)).type(torch.FloatTensor)
return batch_data
|
OvA collation for the labels
|
https://github.com/extreme-classification/decaf/blob/1522e7ac1f56469a0b8a19181755a6f752acad45/DECAF/libs/collate_fn.py#L108-L117
|
import torch
import numpy as np
import scipy.sparse as sp
def _block_sparse_matrix(label_words):
data = torch.FloatTensor(label_words.data)
idx = torch.LongTensor(np.vstack(label_words.nonzero()))
shape = torch.Size(label_words.shape)
return idx, data, shape
def _create_sparse_mat(cols, data, shape):
rows = list(map(lambda x: [x[0]]*len(x[1]), enumerate(cols)))
cols = np.concatenate(cols)
rows = np.concatenate(rows)
data = np.concatenate(data)
return sp.coo_matrix((data, (rows, cols)), shape=shape).tocsc()
def _return_padded_from_list(Var, padding_val,
dtype=torch.FloatTensor):
return torch.nn.utils.rnn.pad_sequence(
list(map(lambda x: torch.from_numpy(
np.asarray(x)).type(dtype), Var)),
batch_first=True, padding_value=padding_val)
def _return_padded_batch(Var, idx, key, padding_val,
dtype=torch.FloatTensor):
return torch.nn.utils.rnn.pad_sequence(
list(map(lambda x: torch.from_numpy(
x[idx][key]).type(dtype), Var)),
batch_first=True, padding_value=padding_val)
class construct_collate_fn:
def __init__(self, feature_type, use_shortlist=False,
num_partitions=1, sparse_label_fts=None,
freeze=False, padding_idx=0, num_labels=-1):
self.num_partitions = num_partitions
self.padding_idx = padding_idx
self.sparse_label_fts = sparse_label_fts
if self.sparse_label_fts is not None:
self.sparse_label_fts = self.sparse_label_fts
self.freeze = freeze
self.num_labels = num_labels
self.use_shortlist = use_shortlist
self._get_docs(feature_type)
self._batcher(sparse_label_fts, use_shortlist)
def __call__(self, batch):
return self.batcher(batch)
def _batcher(self, sparse_label_fts, use_shortlist):
if sparse_label_fts is not None:
if use_shortlist:
self.sparse_label_fts = self.sparse_label_fts.tocsr()
self.batcher = self.collate_fn_shorty_lbf
else:
self.batcher = self.collate_fn_full_lbf
else:
if use_shortlist:
self.batcher = self.collate_fn_shorty
else:
self.batcher = self.collate_fn_full
def _get_docs(self, feature_type):
if feature_type == 'dense':
self.collate_docs = self.collate_fn_docs_dense
elif feature_type == 'sparse':
self.collate_docs = self.collate_fn_docs_sparse
else:
print("Kuch bhi")
def collate_fn_docs_dense(self, batch, batch_data):
batch_data['X'] = torch.stack(list(
map(lambda x: torch.from_numpy(x[0]), batch)
), 0).type(torch.FloatTensor)
batch_data['batch_size'] = len(batch)
batch_data['idx'] = np.arange(
batch_data['batch_size']
).reshape(-1, 1)
batch_data['is_sparse'] = False
def collate_fn_docs_sparse(self, batch, batch_data):
batch_data['X'] = _return_padded_batch(batch, 0, 0, self.padding_idx,
dtype=torch.LongTensor)
batch_data['X_w'] = _return_padded_batch(batch, 0, 1, 0,
dtype=torch.FloatTensor)
batch_data['batch_size'] = len(batch)
batch_data['idx'] = np.arange(batch_data['batch_size']).reshape(-1, 1)
batch_data['is_sparse'] = True
|
MIT License
|
erkyrath/tworld
|
lib/tweblib/bhandlers.py
|
BuildBaseHandler.import_property
|
python
|
def import_property(self, prop):
valtype = prop['type']
if valtype == 'value':
val = prop.get('value', None)
if not val:
raise Exception('Value entry may not be blank')
return ast.literal_eval(val)
if valtype == 'datetime':
val = prop.get('value', None)
if not val:
return twcommon.misc.now().replace(microsecond=0)
val = twcommon.misc.gen_datetime_parse(val)
return val
if valtype == 'text':
res = { 'type':valtype }
if 'text' in prop:
res['text'] = prop['text']
twcommon.interp.parse(res['text'])
return res
if valtype == 'gentext':
res = { 'type':valtype }
if 'text' in prop:
res['text'] = prop['text']
twcommon.gentext.parse(res['text'])
return res
if valtype == 'code' or valtype == 'codearg':
res = { 'type':'code' }
if 'text' in prop:
res['text'] = prop['text']
ast.parse(res['text'], filename='property')
if 'args' in prop:
res['args'] = prop['args']
ast.parse('lambda %s : None' % (res['args'],), filename='arguments')
return res
if valtype == 'event':
res = { 'type':valtype }
if 'text' in prop:
res['text'] = prop['text']
twcommon.interp.parse(res['text'])
if 'otext' in prop:
res['otext'] = prop['otext']
twcommon.interp.parse(res['otext'])
return res
if valtype == 'panic':
res = { 'type':valtype }
if 'text' in prop:
res['text'] = prop['text']
twcommon.interp.parse(res['text'])
if 'otext' in prop:
res['otext'] = prop['otext']
twcommon.interp.parse(res['otext'])
return res
if valtype == 'move':
res = { 'type':valtype }
if 'loc' in prop:
loc = sluggify(prop['loc'])
res['loc'] = loc
if 'text' in prop:
res['text'] = prop['text']
twcommon.interp.parse(res['text'])
if 'oleave' in prop:
res['oleave'] = prop['oleave']
twcommon.interp.parse(res['oleave'])
if 'oarrive' in prop:
res['oarrive'] = prop['oarrive']
twcommon.interp.parse(res['oarrive'])
return res
if valtype == 'editstr':
res = { 'type':valtype }
if 'key' in prop:
key = sluggify(prop['key'])
res['key'] = key
if 'editaccess' in prop:
try:
editaccess = twcommon.access.level_named(prop['editaccess'])
except:
namels = twcommon.access.level_name_list()
raise Exception('Access level must be in %s' % (namels,))
res['editaccess'] = editaccess
if 'label' in prop:
res['label'] = prop['label']
twcommon.interp.parse(res['label'])
if 'text' in prop:
res['text'] = prop['text']
twcommon.interp.parse(res['text'])
if 'otext' in prop:
res['otext'] = prop['otext']
twcommon.interp.parse(res['otext'])
return res
if valtype == 'portlist':
res = { 'type':valtype }
if 'plistkey' in prop:
plistkey = sluggify(prop['plistkey'])
res['plistkey'] = plistkey
if 'editaccess' in prop:
try:
editaccess = twcommon.access.level_named(prop['editaccess'])
except:
namels = twcommon.access.level_name_list()
raise Exception('Access level must be in %s' % (namels,))
res['editaccess'] = editaccess
if 'readaccess' in prop:
try:
readaccess = twcommon.access.level_named(prop['readaccess'])
except:
namels = twcommon.access.level_name_list()
raise Exception('Access level must be in %s' % (namels,))
res['readaccess'] = readaccess
if 'text' in prop:
res['text'] = prop['text']
twcommon.interp.parse(res['text'])
if 'focus' in prop:
try:
if twcommon.misc.gen_bool_parse(prop['focus']):
res['focus'] = True
else:
res.pop('focus', None)
except:
raise Exception('Focus flag must be true or false')
return res
raise Exception('Unknown property type: %s' % (valtype,))
|
Given a type-keyed dict from the client, convert it into database
form. Raises an exception if a problem occurs.
This is written strictly; it never allows in typed structures that
we don't recognize.
|
https://github.com/erkyrath/tworld/blob/9f5237771196b03753d027277ffc296e25fd7425/lib/tweblib/bhandlers.py#L265-L395
|
import datetime
import random
import json
import ast
import re
import collections
from bson.objectid import ObjectId
import tornado.web
import tornado.gen
import tornado.escape
import motor
import tweblib.handlers
import twcommon.misc
import twcommon.interp
import twcommon.gentext
from twcommon.misc import sluggify
class JSONEncoderExtra(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, ObjectId):
return str(obj)
if isinstance(obj, datetime.datetime):
return {'type':'datetime', 'value':twcommon.misc.gen_datetime_format(obj)}
return super().default(obj)
re_valididentifier = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*$')
class NoBuildHandler(tweblib.handlers.MyRequestHandler):
@tornado.gen.coroutine
def prepare(self):
yield self.find_current_session()
if self.twsessionstatus != 'auth':
raise tornado.web.HTTPError(403, 'You are not signed in.')
res = yield motor.Op(self.application.mongodb.players.find_one,
{ '_id':self.twsession['uid'] })
if not res:
raise tornado.web.HTTPError(403, 'You do not exist.')
self.twisadmin = res.get('admin', False)
self.twisbuild = (self.twisadmin or res.get('build', False))
self.twisaskbuild = res.get('askbuild', False)
self.twisguest = res.get('guest', False)
@tornado.gen.coroutine
def get(self):
formerror = None
self.render('nobuild.html',
formerror=formerror,
isbuild=self.twisbuild,
askbuild=self.twisaskbuild,
isguest=self.twisguest)
@tornado.gen.coroutine
def post(self):
formerror = None
if not self.twisaskbuild:
formerror = 'You do not have permission to ask for permission.'
elif (not self.get_argument('agree', False)):
formerror = 'You must agree to the terms.'
else:
yield motor.Op(self.application.mongodb.players.update,
{ '_id':self.twsession['uid'] },
{ '$set':{'build':True} })
self.twisbuild = True
self.application.twlog.info('Player requested build permission: %s', self.twsession['email'])
self.render('nobuild.html',
formerror=formerror,
isbuild=self.twisbuild,
askbuild=self.twisaskbuild,
isguest=self.twisguest)
class BuildBaseHandler(tweblib.handlers.MyRequestHandler):
@tornado.gen.coroutine
def prepare(self):
yield self.find_current_session()
if self.twsessionstatus != 'auth':
raise tornado.web.HTTPError(403, 'You are not signed in.')
res = yield motor.Op(self.application.mongodb.players.find_one,
{ '_id':self.twsession['uid'] })
if not res:
raise tornado.web.HTTPError(403, 'You do not exist.')
self.twisadmin = res.get('admin', False)
self.twisbuild = (self.twisadmin or res.get('build', False))
if not self.twisbuild:
self.redirect('/nobuild')
return
def extend_template_namespace(self, map):
map = super().extend_template_namespace(map)
map['xsrf_token'] = tornado.escape.xhtml_escape(self.xsrf_token)
return map
@tornado.gen.coroutine
def find_build_world(self, wid):
world = yield motor.Op(self.application.mongodb.worlds.find_one,
{ '_id':wid })
if not world:
raise Exception('No such world')
if not self.twisbuild:
raise tornado.web.HTTPError(403, 'You do not have build access.')
if world['creator'] != self.twsession['uid'] and not self.twisadmin:
raise tornado.web.HTTPError(403, 'You did not create this world.')
locations = []
cursor = self.application.mongodb.locations.find({'wid':wid})
while (yield cursor.fetch_next):
loc = cursor.next_object()
locations.append(loc)
locations.sort(key=lambda loc:loc['_id'])
return (world, locations)
@tornado.gen.coroutine
def check_world_arguments(self, wid, locid, plistid=None, playerok=False):
world = yield motor.Op(self.application.mongodb.worlds.find_one,
{ '_id':wid })
if not world:
raise Exception('No such world')
if not self.twisbuild:
raise tornado.web.HTTPError(403, 'You do not have build access.')
if world['creator'] != self.twsession['uid'] and not self.twisadmin:
raise tornado.web.HTTPError(403, 'You did not create this world.')
if locid is None:
loc = None
elif locid == '$player':
if not playerok:
raise Exception('Player property not permitted')
loc = locid
else:
loc = yield motor.Op(self.application.mongodb.locations.find_one,
{ '_id':locid })
if not loc:
raise Exception('No such location')
if loc['wid'] != wid:
raise Exception('Location is not in this world')
if plistid is not None:
plist = yield motor.Op(self.application.mongodb.portlists.find_one,
{ '_id':plistid })
if not plist:
raise Exception('Portlist not found')
if plist['type'] != 'world':
raise Exception('Portlist is not world-level')
if plist['wid'] != wid:
raise Exception('Portlist not in this world')
return (world, loc)
@tornado.gen.coroutine
def invent_key(self, prefix, dbname, query, querykey='key'):
counter = 0
while True:
key = '%s_%d' % (prefix, counter,)
query[querykey] = key
obj = yield motor.Op(self.application.mongodb[dbname].find_one,
query)
if not obj:
return key
counter = counter+1
if counter >= 5:
counter = counter + random.randrange(50)
if counter >= 1000:
raise Exception('No free key available!')
def export_prop_array(self, ls):
res = []
for prop in ls:
val = prop['val']
if type(val) is dict:
valtype = val.get('type', None)
if not valtype:
val = { 'type':'value', 'value':repr(val) }
elif valtype == 'editstr':
if 'editaccess' in val:
try:
val['editaccess'] = twcommon.access.name_for_level(val['editaccess']).lower()
except:
del val['editaccess']
elif valtype == 'portlist':
if 'editaccess' in val:
try:
val['editaccess'] = twcommon.access.name_for_level(val['editaccess']).lower()
except:
del val['editaccess']
if 'readaccess' in val:
try:
val['readaccess'] = twcommon.access.name_for_level(val['readaccess']).lower()
except:
del val['readaccess']
elif valtype == 'code':
if 'args' in val:
val['type'] = 'codearg'
pass
else:
pass
elif isinstance(val, datetime.datetime):
val = { 'type':'datetime', 'value':twcommon.misc.gen_datetime_format(val) }
else:
val = { 'type':'value', 'value':repr(val) }
newprop = {'key':prop['key'], 'val':val, 'id':str(prop['_id'])}
res.append(newprop)
return res
|
MIT License
|
commvault/cvpysdk
|
cvpysdk/plan.py
|
Plans.get_eligible_plans
|
python
|
def get_eligible_plans(self, entities):
query = ''
for i in entities:
query += '{0}={1}&'.format(i, entities[i])
requset_url = self._services['ELIGIBLE_PLANS'] % query[0:-1]
flag, response = self._cvpysdk_object.make_request('GET', requset_url)
del query
if flag:
plans = {}
if response.json() and 'plans' in response.json():
response_value = response.json()['plans']
for temp in response_value:
temp_name = temp['plan']['planName'].lower()
temp_id = str(temp['plan']['planId']).lower()
plans[temp_name] = temp_id
return plans
else:
response_string = self._update_response_(response.text)
raise SDKException('Response', '101', response_string)
|
Returns dict of plans that are eligible for the specified entities
Args:
entities (dict) - dictionary containing entities as keys and
their respective IDs as values
{
'clientId': id,
'appId': id,
'backupsetId': id
}
Returns:
dict - dict of eligible plans
Raises:
SDKException:
if there is an error in the response
|
https://github.com/commvault/cvpysdk/blob/66df30e6e31d619812b7756cb4f7e130b220a08f/cvpysdk/plan.py#L620-L660
|
from __future__ import unicode_literals
from past.builtins import basestring
from .exception import SDKException
class Plans(object):
def __init__(self, commcell_object):
self._commcell_object = commcell_object
self._cvpysdk_object = commcell_object._cvpysdk_object
self._services = commcell_object._services
self._update_response_ = commcell_object._update_response_
self._PLANS = self._services['PLANS']
self._plans = None
self.refresh()
def __str__(self):
representation_string = "{:^5}\t{:^50}\n\n".format('S. No.', 'Plan')
for index, plan in enumerate(self._plans):
sub_str = '{:^5}\t{:30}\n'.format(index + 1, plan)
representation_string += sub_str
return representation_string.strip()
def __repr__(self):
return "Plans class instance for Commcell: '{0}'".format(
self._commcell_object.commserv_name
)
def __len__(self):
return len(self.all_plans)
def __getitem__(self, value):
value = str(value)
if value in self.all_plans:
return self.all_plans[value]
else:
try:
return list(filter(lambda x: x[1]['id'] == value, self.all_plans.items()))[0][0]
except IndexError:
raise IndexError('No plan exists with the given Name / Id')
def _get_plans(self):
flag, response = self._cvpysdk_object.make_request('GET', self._PLANS)
if flag:
plans = {}
if response.json() and 'plans' in response.json():
response_value = response.json()['plans']
for temp in response_value:
temp_name = temp['plan']['planName'].lower()
temp_id = str(temp['plan']['planId']).lower()
plans[temp_name] = temp_id
return plans
else:
response_string = self._update_response_(response.text)
raise SDKException('Response', '101', response_string)
def _get_plan_template(self, plan_sub_type, plan_type="MSP"):
if not (isinstance(plan_sub_type, basestring) and
isinstance(plan_type, basestring)):
raise SDKException('Plan', '101')
else:
template_url = self._services['GET_PLAN_TEMPLATE'] % (plan_type, plan_sub_type)
flag, response = self._cvpysdk_object.make_request('GET', template_url)
if flag:
if response.json() and 'plan' in response.json():
return response.json()
else:
raise SDKException('Plan', '102', 'Failed to get Plan template')
else:
response_string = self._update_response_(response.text)
raise SDKException('Response', '101', response_string)
@property
def all_plans(self):
return self._plans
def has_plan(self, plan_name):
if not isinstance(plan_name, basestring):
raise SDKException('Plan', '101')
return self._plans and plan_name.lower() in self._plans
def get(self, plan_name):
if not isinstance(plan_name, basestring):
raise SDKException('Plan', '101')
else:
plan_name = plan_name.lower()
if self.has_plan(plan_name):
return Plan(
self._commcell_object,
plan_name,
self._plans[plan_name]
)
raise SDKException(
'Plan', '102', 'No plan exists with name: {0}'.format(
plan_name)
)
def delete(self, plan_name):
if not isinstance(plan_name, basestring):
raise SDKException('Plan', '101')
else:
plan_name = plan_name.lower()
if self.has_plan(plan_name):
plan_id = self._plans[plan_name]
delete_plan = self._services['DELETE_PLAN'] % (plan_id)
flag, response = self._cvpysdk_object.make_request('DELETE', delete_plan)
error_code = 0
if flag:
if 'error' in response.json():
if isinstance(response.json()['error'], list):
error_code = response.json()['error'][0]['status']['errorCode']
else:
error_code = response.json()['errorCode']
if error_code != 0:
o_str = 'Failed to delete plan'
if isinstance(response.json()['error'], list):
error_message = response.json()['error'][0]['status']['errorMessage']
else:
error_message = response.json()['errorMessage']
o_str += '\nError: "{0}"'.format(error_message)
raise SDKException('Plan', '102', o_str)
else:
self.refresh()
else:
response_string = self._update_response_(response.text)
raise SDKException('Response', '101', response_string)
else:
raise SDKException(
'Plan',
'102',
'No plan exists with name: {0}'.format(plan_name)
)
def add(self,
plan_name,
plan_sub_type,
storage_pool_name=None,
sla_in_minutes=1440,
override_entities=None):
if not (isinstance(plan_name, basestring) and
isinstance(plan_sub_type, basestring)):
raise SDKException('Plan', '101')
else:
if self.has_plan(plan_name):
raise SDKException(
'Plan', '102', 'Plan "{0}" already exists'.format(plan_name)
)
if not plan_sub_type == 'ExchangeUser':
storage_pool_obj = self._commcell_object.storage_pools.get(
storage_pool_name)
is_dedupe = True
if 'dedupDBDetailsList' not in storage_pool_obj._storage_pool_properties['storagePoolDetails']:
is_dedupe = False
request_json = self._get_plan_template(plan_sub_type, "MSP")
request_json['plan']['summary']['rpoInMinutes'] = sla_in_minutes
request_json['plan']['summary']['description'] = "Created from CvPySDK."
request_json['plan']['summary']['plan']['planName'] = plan_name
request_json['plan']['schedule']['subTasks'][1]['options']['commonOpts'][
'automaticSchedulePattern'].update({
'minBackupInterval': 0,
'maxBackupIntervalMinutes': 0,
'minSyncInterval': 0,
'minSyncIntervalMinutes': 0
})
request_json['plan']['schedule']['subTasks'][1]['options']['commonOpts'][
'automaticSchedulePattern']['ignoreOpWindowPastMaxInterval'] = True
del request_json['plan']['schedule']['task']['taskName']
if not plan_sub_type == 'ExchangeUser':
request_json['plan']['storage']['copy'][0]['useGlobalPolicy'] = {
"storagePolicyId": int(storage_pool_obj.storage_pool_id)
}
if is_dedupe:
request_json['plan']['storage']['copy'][0]['dedupeFlags'][
'useGlobalDedupStore'] = 1
else:
del request_json['plan']['storage']['copy'][0]['storagePolicyFlags']
del request_json['plan']['storage']['copy'][0]['dedupeFlags'][
'enableDeduplication']
del request_json['plan']['storage']['copy'][0]['dedupeFlags'][
'enableClientSideDedup']
del request_json['plan']['storage']['copy'][0]['DDBPartitionInfo']
request_json['plan']['storage']['copy'][0]['extendedFlags'] = {
'useGlobalStoragePolicy': 1
}
if plan_sub_type == "Server" and 'database' in request_json['plan']:
request_json['plan']['database']['storageLog']['copy'][0]['dedupeFlags'][
'useGlobalDedupStore'] = 1
request_json['plan']['database']['storageLog']['copy'][0].pop(
'DDBPartitionInfo', None
)
request_json['plan']['database']['storageLog']['copy'][0]['dedupeFlags'][
'useGlobalPolicy'] = {
"storagePolicyId": int(storage_pool_obj.storage_pool_id)
}
request_json['plan']['storage']['copy'][1]['extendedFlags'] = {
'useGlobalStoragePolicy': 1
}
request_json['plan']['storage']['copy'][1]['useGlobalPolicy'] = {
"storagePolicyId": int(storage_pool_obj.storage_pool_id)
}
for subtask in request_json['plan']['schedule']['subTasks']:
if 'flags' in subtask['subTask'] and subtask['subTask']['flags'] == 65536:
import copy
full_schedule = copy.deepcopy(subtask)
del copy
full_schedule['subTask'].update({
'subTaskName': 'Full backup schedule',
'flags': 4194304
})
full_schedule['pattern'].update({
'freq_type': 4,
'freq_interval': 1,
'name': 'Full backup schedule',
'active_end_time': 0
})
full_schedule['options']['backupOpts']['backupLevel'] = 'FULL'
request_json['plan']['schedule']['subTasks'].append(full_schedule)
break
if isinstance(override_entities, dict):
request_json['plan']['summary']['restrictions'] = 0
request_json['plan']['inheritance'] = {
'isSealed': False
}
if 'enforcedEntities' in override_entities:
request_json['plan']['inheritance']['enforcedEntities'] = override_entities[
'enforcedEntities']
if 'privateEntities' in override_entities:
request_json['plan']['inheritance']['privateEntities'] = override_entities[
'privateEntities']
else:
request_json['plan']['summary']['restrictions'] = 1
request_json['plan']['inheritance'] = {
'isSealed': True
}
headers = self._commcell_object._headers.copy()
headers['LookupNames'] = 'False'
flag, response = self._cvpysdk_object.make_request(
'POST', self._PLANS, request_json, headers=headers
)
if flag:
if response.json():
response_value = response.json()
error_message = None
error_code = None
if 'errors' in response_value:
error_code = response_value['errors'][0]['status']['errorCode']
error_message = response_value['errors'][0]['status']['errorMessage']
if error_code > 1:
o_str = 'Failed to create new Plan\nError: "{0}"'.format(
error_message
)
raise SDKException('Plan', '102', o_str)
if 'plan' in response_value:
plan_name = response_value['plan']['summary']['plan']['planName']
self.refresh()
self._commcell_object.storage_policies.refresh()
return self.get(plan_name)
else:
o_str = ('Failed to create new plan due to error code: "{0}"\n'
'Please check the documentation for '
'more details on the error').format(error_code)
raise SDKException('Plan', '102', o_str)
else:
raise SDKException('Response', 102)
else:
response_string = self._update_response_(response.text)
raise SDKException('Response', '101', response_string)
|
Apache License 2.0
|
pollination/queenbee
|
queenbee/recipe/dag.py
|
DAG.sort_list
|
python
|
def sort_list(cls, v):
v.sort(key=lambda x: x.name)
return v
|
Sort the list of tasks by name
|
https://github.com/pollination/queenbee/blob/679f527cc28c3df496d8156ba49bc8a1613fadc7/queenbee/recipe/dag.py#L169-L172
|
from queenbee.io.outputs.task import TaskPathReturn, TaskReturn
from typing import List, Set, Union
from pydantic import Field, validator, root_validator, constr
from .task import DAGTask
from ..io.common import IOBase, find_dup_items
from ..io.inputs.dag import DAGInputs
from ..io.outputs.dag import DAGOutputs
from ..io.reference import FileReference, FolderReference, TaskReference, TaskFileReference, TaskFolderReference, TaskPathReference
class DAG(IOBase):
type: constr(regex='^DAG$') = 'DAG'
name: str = Field(
...,
description='A unique name for this dag.'
)
inputs: List[DAGInputs] = Field(
None,
description='Inputs for the DAG.'
)
fail_fast: bool = Field(
True,
description='Stop scheduling new steps, as soon as it detects that one of the'
' DAG nodes is failed. Default is True.'
)
tasks: List[DAGTask] = Field(
...,
description='Tasks are a list of DAG steps'
)
outputs: List[DAGOutputs] = Field(
None,
description='Outputs of the DAG that can be used by other DAGs.'
)
@staticmethod
def find_task_return(
tasks: List[DAGTask],
reference: Union[
TaskReference, TaskFileReference, TaskFolderReference, TaskPathReference]
) -> Union[TaskReturn, TaskPathReturn]:
filtered_tasks = [x for x in tasks if x.name == reference.name]
if not filtered_tasks:
raise ValueError(f'Task with name "{reference.name}" not found.')
task = filtered_tasks[0]
if isinstance(reference, TaskReference):
if task.loop is not None:
raise ValueError(
'Cannot refer to outputs from a looped task.'
'You must perform your own aggregation and then refer to '
'a hard coded folder path.'
)
out = [ret for ret in task.returns if ret.name == reference.variable]
if not out:
raise ValueError(
f'Failed to find referenced variable name "{reference.variable}" in '
f'"{task.name}" task.'
)
return out[0]
@validator('tasks')
def check_unique_names(cls, v):
names = [task.name for task in v]
duplicates = find_dup_items(names)
if len(duplicates) != 0:
raise ValueError(f'Duplicate names: {duplicates}')
return v
@validator('tasks')
def check_dependencies(cls, v):
task_names = [task.name for task in v]
exceptions = []
err_msg = 'DAG Task "{name}" has unresolved dependency: "{dep}"\n'
for task in v:
if task.needs is None:
continue
for dep in task.needs:
if dep not in task_names:
exceptions.append(err_msg.format(name=task.name, dep=dep))
if exceptions:
raise ValueError(''.join(exceptions))
return v
@validator('tasks')
def check_references(cls, v, values):
dag_inputs = values.get('inputs', [])
dag_input_names = set(d.name for d in dag_inputs)
exceptions = []
for task in v:
if task.arguments is None:
continue
for arg in task.argument_by_ref_source('dag'):
if arg.from_.variable not in dag_input_names:
exceptions.append(
f'Invalid input reference variable: "{arg.from_.variable}" '
f'in task "{task.name}"'
)
for arg in task.argument_by_ref_source('task'):
try:
cls.find_task_return(tasks=v, reference=arg.from_)
except ValueError as error:
exceptions.append(f'tasks.{task.name}.{arg.name}: %s' % error)
if exceptions:
raise ValueError('\n '.join(exceptions))
return v
@validator('tasks', each_item=True)
def check_template_name(cls, v, values):
name = values.get('name')
plugin = v.template.split('/')[0]
assert plugin != name, ValueError('Task cannot refer to its own DAG as a template.')
return v
@validator('tasks')
|
MIT License
|
xchem/fragalysis
|
frag/alysis/models.py
|
Owner.__init__
|
python
|
def __init__(self, object_list, title):
self.object_list = object_list
self.title = title
|
An Owner will own multiple objects. E.g. PDB 4CUP owns waters.
:param object_list: the list of objects it owns
:param title: the title of the object
|
https://github.com/xchem/fragalysis/blob/216837507819572f5d0d3295ebde94b3b14019d1/frag/alysis/models.py#L102-L109
|
import uuid
class StructHolder(object):
def __init__(
self, struct_id, resid_pdb=None, ligand=None, water_pdb=None, apo_pdb=None
):
self.struct_id = struct_id
self.water_pdb = water_pdb
self.resid_pdb = resid_pdb
self.ligand = ligand
class ClusterStuff(object):
def __init__(self, parser, lamb, cluster):
self.parser = parser
self.lamb = lamb
self.cluster = cluster
self.owner_list = []
self.out_clusters = {}
def run(self, input_data):
self.owner_list = self.parser(input_data)
data_set = self.owner_list_conv()
for this_type in data_set:
self.out_clusters[this_type] = []
self.add_clust_to_obj(
self.cluster(data_set[this_type]["data"], this_type),
data_set[this_type]["objects"],
this_type,
)
def add_clust_to_obj(self, cluster_obj, object_list, type):
for i, cluster in enumerate(cluster_obj.clusters):
new_cluster = Cluster(cluster_obj.clusters[cluster], type, i)
self.out_clusters[type].append(new_cluster)
for i, val in enumerate(cluster_obj.dataClusterId):
this_clust = [x for x in self.out_clusters[type] if x.cluster == val][0]
this_obj = object_list[i]
this_obj.cluster = this_clust
this_clust.object_list.append(this_obj)
def write_output(self):
for type in self.out_clusters:
self.out_clusters[type]
def owner_list_conv(self):
out_d = {}
for owner in self.owner_list:
for obj in owner.object_list:
if obj.object_desc in out_d:
out_d[obj.object_desc]["data"].append(obj.value_array)
out_d[obj.object_desc]["objects"].append(obj)
else:
out_d[obj.object_desc] = {
"data": [obj.value_array],
"pks": [obj.uuid],
}
return out_d
class Object(object):
def __init__(self, value_array, object_desc):
self.value_array = value_array
self.object_desc = object_desc
self.uuid = str(uuid.uuid4())
self.cluster = -1
class Owner(object):
|
Apache License 2.0
|
nii-cloud/dodai-compute
|
nova/api/openstack/common.py
|
limited
|
python
|
def limited(items, request, max_limit=FLAGS.osapi_max_limit):
try:
offset = int(request.GET.get('offset', 0))
except ValueError:
msg = _('offset param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
limit = int(request.GET.get('limit', max_limit))
except ValueError:
msg = _('limit param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
if limit < 0:
msg = _('limit param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
if offset < 0:
msg = _('offset param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
limit = min(max_limit, limit or max_limit)
range_end = offset + limit
return items[offset:range_end]
|
Return a slice of items according to requested offset and limit.
@param items: A sliceable entity
@param request: `wsgi.Request` possibly containing 'offset' and 'limit'
GET variables. 'offset' is where to start in the list,
and 'limit' is the maximum number of items to return. If
'limit' is not specified, 0, or > max_limit, we default
to max_limit. Negative values for either offset or limit
will cause exc.HTTPBadRequest() exceptions to be raised.
@kwarg max_limit: The maximum number of items to return from 'items'
|
https://github.com/nii-cloud/dodai-compute/blob/d9bea632913c0ddc6f59c6120f60daea369d09cc/nova/api/openstack/common.py#L127-L162
|
import functools
import re
import urlparse
from xml.dom import minidom
import webob
from nova import exception
from nova import flags
from nova import log as logging
from nova import quota
from nova.api.openstack import wsgi
from nova.compute import vm_states
from nova.compute import task_states
LOG = logging.getLogger('nova.api.openstack.common')
FLAGS = flags.FLAGS
XML_NS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0'
XML_NS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
_STATE_MAP = {
vm_states.ACTIVE: {
'default': 'ACTIVE',
task_states.REBOOTING: 'REBOOT',
task_states.UPDATING_PASSWORD: 'PASSWORD',
task_states.RESIZE_VERIFY: 'VERIFY_RESIZE',
},
vm_states.BUILDING: {
'default': 'BUILD',
},
vm_states.REBUILDING: {
'default': 'REBUILD',
},
vm_states.STOPPED: {
'default': 'STOPPED',
},
vm_states.MIGRATING: {
'default': 'MIGRATING',
},
vm_states.RESIZING: {
'default': 'RESIZE',
},
vm_states.PAUSED: {
'default': 'PAUSED',
},
vm_states.SUSPENDED: {
'default': 'SUSPENDED',
},
vm_states.RESCUED: {
'default': 'RESCUE',
},
vm_states.ERROR: {
'default': 'ERROR',
},
vm_states.DELETED: {
'default': 'DELETED',
},
}
def status_from_state(vm_state, task_state='default'):
task_map = _STATE_MAP.get(vm_state, dict(default='UNKNOWN_STATE'))
status = task_map.get(task_state, task_map['default'])
LOG.debug("Generated %(status)s from vm_state=%(vm_state)s "
"task_state=%(task_state)s." % locals())
return status
def vm_state_from_status(status):
for state, task_map in _STATE_MAP.iteritems():
status_string = task_map.get("default")
if status.lower() == status_string.lower():
return state
def get_pagination_params(request):
params = {}
for param in ['marker', 'limit']:
if not param in request.GET:
continue
try:
params[param] = int(request.GET[param])
except ValueError:
msg = _('%s param must be an integer') % param
raise webob.exc.HTTPBadRequest(explanation=msg)
if params[param] < 0:
msg = _('%s param must be positive') % param
raise webob.exc.HTTPBadRequest(explanation=msg)
return params
|
Apache License 2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.