repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/batch_v2alpha1_api.py
BatchV2alpha1Api.patch_namespaced_cron_job
python
def patch_namespaced_cron_job(self, name, namespace, body, **kwargs): kwargs['_return_http_data_only'] = True return self.patch_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs)
patch_namespaced_cron_job # noqa: E501 partially update the specified CronJob # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_cron_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str name: name of the CronJob (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: V2alpha1CronJob If the method is called asynchronously, returns the request thread.
https://github.com/tomplus/kubernetes_asyncio/blob/22bf0f4ec775b920abc9cee86bb38abcfc57506d/kubernetes_asyncio/client/api/batch_v2alpha1_api.py#L918-L947
from __future__ import absolute_import import re import six from kubernetes_asyncio.client.api_client import ApiClient from kubernetes_asyncio.client.exceptions import ( ApiTypeError, ApiValueError ) class BatchV2alpha1Api(object): def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def create_namespaced_cron_job(self, namespace, body, **kwargs): kwargs['_return_http_data_only'] = True return self.create_namespaced_cron_job_with_http_info(namespace, body, **kwargs) def create_namespaced_cron_job_with_http_info(self, namespace, body, **kwargs): local_var_params = locals() all_params = [ 'namespace', 'body', 'pretty', 'dry_run', 'field_manager' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method create_namespaced_cron_job" % key ) local_var_params[key] = val del local_var_params['kwargs'] if self.api_client.client_side_validation and ('namespace' not in local_var_params or local_var_params['namespace'] is None): raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_cron_job`") if self.api_client.client_side_validation and ('body' not in local_var_params or local_var_params['body'] is None): raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_cron_job`") collection_formats = {} path_params = {} if 'namespace' in local_var_params: path_params['namespace'] = local_var_params['namespace'] query_params = [] if 'pretty' in local_var_params and local_var_params['pretty'] is not None: query_params.append(('pretty', local_var_params['pretty'])) if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: query_params.append(('dryRun', local_var_params['dry_run'])) if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: query_params.append(('fieldManager', local_var_params['field_manager'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) auth_settings = ['BearerToken'] return self.api_client.call_api( '/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V2alpha1CronJob', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def delete_collection_namespaced_cron_job(self, namespace, **kwargs): kwargs['_return_http_data_only'] = True return self.delete_collection_namespaced_cron_job_with_http_info(namespace, **kwargs) def delete_collection_namespaced_cron_job_with_http_info(self, namespace, **kwargs): local_var_params = locals() all_params = [ 'namespace', 'pretty', '_continue', 'dry_run', 'field_selector', 'grace_period_seconds', 'label_selector', 'limit', 'orphan_dependents', 'propagation_policy', 'resource_version', 'timeout_seconds', 'body' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method delete_collection_namespaced_cron_job" % key ) local_var_params[key] = val del local_var_params['kwargs'] if self.api_client.client_side_validation and ('namespace' not in local_var_params or local_var_params['namespace'] is None): raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_cron_job`") collection_formats = {} path_params = {} if 'namespace' in local_var_params: path_params['namespace'] = local_var_params['namespace'] query_params = [] if 'pretty' in local_var_params and local_var_params['pretty'] is not None: query_params.append(('pretty', local_var_params['pretty'])) if '_continue' in local_var_params and local_var_params['_continue'] is not None: query_params.append(('continue', local_var_params['_continue'])) if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: query_params.append(('dryRun', local_var_params['dry_run'])) if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: query_params.append(('fieldSelector', local_var_params['field_selector'])) if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: query_params.append(('labelSelector', local_var_params['label_selector'])) if 'limit' in local_var_params and local_var_params['limit'] is not None: query_params.append(('limit', local_var_params['limit'])) if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: query_params.append(('resourceVersion', local_var_params['resource_version'])) if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) auth_settings = ['BearerToken'] return self.api_client.call_api( '/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def delete_namespaced_cron_job(self, name, namespace, **kwargs): kwargs['_return_http_data_only'] = True return self.delete_namespaced_cron_job_with_http_info(name, namespace, **kwargs) def delete_namespaced_cron_job_with_http_info(self, name, namespace, **kwargs): local_var_params = locals() all_params = [ 'name', 'namespace', 'pretty', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy', 'body' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method delete_namespaced_cron_job" % key ) local_var_params[key] = val del local_var_params['kwargs'] if self.api_client.client_side_validation and ('name' not in local_var_params or local_var_params['name'] is None): raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_cron_job`") if self.api_client.client_side_validation and ('namespace' not in local_var_params or local_var_params['namespace'] is None): raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_cron_job`") collection_formats = {} path_params = {} if 'name' in local_var_params: path_params['name'] = local_var_params['name'] if 'namespace' in local_var_params: path_params['namespace'] = local_var_params['namespace'] query_params = [] if 'pretty' in local_var_params and local_var_params['pretty'] is not None: query_params.append(('pretty', local_var_params['pretty'])) if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: query_params.append(('dryRun', local_var_params['dry_run'])) if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) auth_settings = ['BearerToken'] return self.api_client.call_api( '/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def get_api_resources(self, **kwargs): kwargs['_return_http_data_only'] = True return self.get_api_resources_with_http_info(**kwargs) def get_api_resources_with_http_info(self, **kwargs): local_var_params = locals() all_params = [ ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method get_api_resources" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) auth_settings = ['BearerToken'] return self.api_client.call_api( '/apis/batch/v2alpha1/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1APIResourceList', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def list_cron_job_for_all_namespaces(self, **kwargs): kwargs['_return_http_data_only'] = True return self.list_cron_job_for_all_namespaces_with_http_info(**kwargs) def list_cron_job_for_all_namespaces_with_http_info(self, **kwargs): local_var_params = locals() all_params = [ 'allow_watch_bookmarks', '_continue', 'field_selector', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method list_cron_job_for_all_namespaces" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) if '_continue' in local_var_params and local_var_params['_continue'] is not None: query_params.append(('continue', local_var_params['_continue'])) if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: query_params.append(('fieldSelector', local_var_params['field_selector'])) if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: query_params.append(('labelSelector', local_var_params['label_selector'])) if 'limit' in local_var_params and local_var_params['limit'] is not None: query_params.append(('limit', local_var_params['limit'])) if 'pretty' in local_var_params and local_var_params['pretty'] is not None: query_params.append(('pretty', local_var_params['pretty'])) if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: query_params.append(('resourceVersion', local_var_params['resource_version'])) if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) if 'watch' in local_var_params and local_var_params['watch'] is not None: query_params.append(('watch', local_var_params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) auth_settings = ['BearerToken'] return self.api_client.call_api( '/apis/batch/v2alpha1/cronjobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V2alpha1CronJobList', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def list_namespaced_cron_job(self, namespace, **kwargs): kwargs['_return_http_data_only'] = True return self.list_namespaced_cron_job_with_http_info(namespace, **kwargs) def list_namespaced_cron_job_with_http_info(self, namespace, **kwargs): local_var_params = locals() all_params = [ 'namespace', 'pretty', 'allow_watch_bookmarks', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method list_namespaced_cron_job" % key ) local_var_params[key] = val del local_var_params['kwargs'] if self.api_client.client_side_validation and ('namespace' not in local_var_params or local_var_params['namespace'] is None): raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_cron_job`") collection_formats = {} path_params = {} if 'namespace' in local_var_params: path_params['namespace'] = local_var_params['namespace'] query_params = [] if 'pretty' in local_var_params and local_var_params['pretty'] is not None: query_params.append(('pretty', local_var_params['pretty'])) if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) if '_continue' in local_var_params and local_var_params['_continue'] is not None: query_params.append(('continue', local_var_params['_continue'])) if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: query_params.append(('fieldSelector', local_var_params['field_selector'])) if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: query_params.append(('labelSelector', local_var_params['label_selector'])) if 'limit' in local_var_params and local_var_params['limit'] is not None: query_params.append(('limit', local_var_params['limit'])) if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: query_params.append(('resourceVersion', local_var_params['resource_version'])) if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) if 'watch' in local_var_params and local_var_params['watch'] is not None: query_params.append(('watch', local_var_params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) auth_settings = ['BearerToken'] return self.api_client.call_api( '/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V2alpha1CronJobList', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
Apache License 2.0
awslabs/aws-crt-python
awscrt/io.py
TlsContextOptions.create_client_with_mtls_from_path
python
def create_client_with_mtls_from_path(cert_filepath, pk_filepath): assert isinstance(cert_filepath, str) assert isinstance(pk_filepath, str) cert_buffer = _read_binary_file(cert_filepath) key_buffer = _read_binary_file(pk_filepath) return TlsContextOptions.create_client_with_mtls(cert_buffer, key_buffer)
Create options configured for use with mutual TLS in client mode. Both files are treated as PKCS #7 PEM armored. They are loaded from disk and stored in buffers internally. Args: cert_filepath (str): Path to certificate file. pk_filepath (str): Path to private key file. Returns: TlsContextOptions:
https://github.com/awslabs/aws-crt-python/blob/aa4511c1cea931c30051b2fe43be76f86b6a2ba2/awscrt/io.py#L235-L256
import _awscrt from awscrt import NativeResource from enum import IntEnum import threading class LogLevel(IntEnum): NoLogs = 0 Fatal = 1 Error = 2 Warn = 3 Info = 4 Debug = 5 Trace = 6 def init_logging(log_level, file_name): assert log_level is not None assert file_name is not None _awscrt.init_logging(log_level, file_name) class EventLoopGroup(NativeResource): __slots__ = ('shutdown_event') def __init__(self, num_threads=None, cpu_group=None): super().__init__() if num_threads is None: num_threads = 0 if cpu_group is None: is_pinned = False cpu_group = 0 else: is_pinned = True shutdown_event = threading.Event() def on_shutdown(): shutdown_event.set() self.shutdown_event = shutdown_event self._binding = _awscrt.event_loop_group_new(num_threads, is_pinned, cpu_group, on_shutdown) class HostResolverBase(NativeResource): __slots__ = () class DefaultHostResolver(HostResolverBase): __slots__ = () def __init__(self, event_loop_group, max_hosts=16): assert isinstance(event_loop_group, EventLoopGroup) super().__init__() self._binding = _awscrt.host_resolver_new_default(max_hosts, event_loop_group) class ClientBootstrap(NativeResource): __slots__ = ('shutdown_event') def __init__(self, event_loop_group, host_resolver): assert isinstance(event_loop_group, EventLoopGroup) assert isinstance(host_resolver, HostResolverBase) super().__init__() shutdown_event = threading.Event() def on_shutdown(): shutdown_event.set() self.shutdown_event = shutdown_event self._binding = _awscrt.client_bootstrap_new(event_loop_group, host_resolver, on_shutdown) def _read_binary_file(filepath): with open(filepath, mode='rb') as fh: contents = fh.read() return contents class SocketDomain(IntEnum): IPv4 = 0 IPv6 = 1 Local = 2 class SocketType(IntEnum): Stream = 0 DGram = 1 class SocketOptions: __slots__ = ( 'domain', 'type', 'connect_timeout_ms', 'keep_alive', 'keep_alive_timeout_secs', 'keep_alive_interval_secs', 'keep_alive_max_probes' ) def __init__(self): for slot in self.__slots__: setattr(self, slot, None) self.domain = SocketDomain.IPv6 self.type = SocketType.Stream self.connect_timeout_ms = 5000 self.keep_alive = False self.keep_alive_interval_secs = 0 self.keep_alive_timeout_secs = 0 self.keep_alive_max_probes = 0 class TlsVersion(IntEnum): SSLv3 = 0 TLSv1 = 1 TLSv1_1 = 2 TLSv1_2 = 3 TLSv1_3 = 4 DEFAULT = 128 class TlsContextOptions: __slots__ = ( 'min_tls_ver', 'ca_dirpath', 'ca_buffer', 'alpn_list', 'certificate_buffer', 'private_key_buffer', 'pkcs12_filepath', 'pkcs12_password', 'verify_peer') def __init__(self): for slot in self.__slots__: setattr(self, slot, None) self.min_tls_ver = TlsVersion.DEFAULT self.verify_peer = True @staticmethod
Apache License 2.0
bitmovin/bitmovin-api-sdk-python
bitmovin_api_sdk/models/ad_analytics_min_query_request.py
AdAnalyticsMinQueryRequest.__eq__
python
def __eq__(self, other): if not isinstance(other, AdAnalyticsMinQueryRequest): return False return self.__dict__ == other.__dict__
Returns true if both objects are equal
https://github.com/bitmovin/bitmovin-api-sdk-python/blob/79dd938804197151af7cbe5501c7ec1d97872c15/bitmovin_api_sdk/models/ad_analytics_min_query_request.py#L30-L35
from six import string_types, iteritems from bitmovin_api_sdk.common.poscheck import poscheck_model from bitmovin_api_sdk.models.ad_analytics_attribute import AdAnalyticsAttribute from bitmovin_api_sdk.models.ad_analytics_query_request import AdAnalyticsQueryRequest from bitmovin_api_sdk.models.analytics_interval import AnalyticsInterval import pprint class AdAnalyticsMinQueryRequest(AdAnalyticsQueryRequest): def to_dict(self): result = {} if hasattr(super(AdAnalyticsMinQueryRequest, self), "to_dict"): result = super(AdAnalyticsMinQueryRequest, self).to_dict() return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str()
MIT License
babybuddy/babybuddy
reports/utils.py
rangeselector_date
python
def rangeselector_date(): return { 'bgcolor': 'rgb(35, 149, 86)', 'activecolor': 'rgb(25, 108, 62)', 'buttons': [ { 'count': 7, 'label': '1w', 'step': 'day', 'stepmode': 'backward' }, { 'count': 14, 'label': '2w', 'step': 'day', 'stepmode': 'backward' }, { 'count': 1, 'label': '1m', 'step': 'month', 'stepmode': 'backward' }, { 'count': 3, 'label': '3m', 'step': 'month', 'stepmode': 'backward' }, { 'step': 'all' } ] }
Graph date range selectors settings for 1w, 2w, 1m, 3m, and all. :returns: a dict of settings for the selectors.
https://github.com/babybuddy/babybuddy/blob/a361f96e4db1d77ce3e787af15e17eb04698116e/reports/utils.py#L39-L76
def default_graph_layout_options(): return { 'paper_bgcolor': 'rgb(52, 58, 64)', 'plot_bgcolor': 'rgb(52, 58, 64)', 'font': { 'color': 'rgba(255, 255, 255, 1)', 'family': '-apple-system, BlinkMacSystemFont, "Segoe UI", ' 'Roboto, "Helvetica Neue", Arial, sans-serif, ' '"Apple Color Emoji", "Segoe UI Emoji", ' '"Segoe UI Symbol"', 'size': 14, }, 'margin': {'b': 80, 't': 80}, 'xaxis': { 'titlefont': { 'color': 'rgba(255, 255, 255, 0.5)' }, 'gridcolor': 'rgba(0, 0, 0, 0.25)', 'zerolinecolor': 'rgba(0, 0, 0, 0.5)' }, 'yaxis': { 'titlefont': { 'color': 'rgba(255, 255, 255, 0.5)' }, 'gridcolor': 'rgba(0, 0, 0, 0.25)', 'zerolinecolor': 'rgba(0, 0, 0, 0.5)' } }
BSD 2-Clause Simplified License
beloglazov/openstack-neat
neat/locals/vm_selection/algorithms.py
minimum_migration_time_max_cpu_factory
python
def minimum_migration_time_max_cpu_factory(time_step, migration_time, params): return lambda vms_cpu, vms_ram, state=None: ([minimum_migration_time_max_cpu(params['last_n'], vms_cpu, vms_ram)], {})
Creates the minimum migration time / max CPU usage VM selection algorithm. :param time_step: The length of the simulation time step in seconds. :type time_step: int,>=0 :param migration_time: The VM migration time in time seconds. :type migration_time: float,>=0 :param params: A dictionary containing the algorithm's parameters. :type params: dict(str: *) :return: A function implementing the minimum migration time / max CPU VM selection. :rtype: function
https://github.com/beloglazov/openstack-neat/blob/a5a853ae2affb0cdc582e3ab641737f5ebd3d0a7/neat/locals/vm_selection/algorithms.py#L89-L107
from contracts import contract from neat.contracts_primitive import * from neat.contracts_extra import * from random import choice import operator import logging log = logging.getLogger(__name__) @contract def random_factory(time_step, migration_time, params): return lambda vms_cpu, vms_ram, state=None: ([random(vms_cpu)], {}) @contract def minimum_utilization_factory(time_step, migration_time, params): return lambda vms_cpu, vms_ram, state=None: ([minimum_utilization(vms_cpu)], {}) @contract def minimum_migration_time_factory(time_step, migration_time, params): return lambda vms_cpu, vms_ram, state=None: ([minimum_migration_time(vms_ram)], {}) @contract
Apache License 2.0
s1m0n38/soccerapi
soccerapi/api/base.py
ApiBase.url_to_competition
python
def url_to_competition(self, url: str) -> str: pass
Convert url to the correspoing the competition. First check it validity using regex, then exstract competition from it
https://github.com/s1m0n38/soccerapi/blob/dd73ac0ed4611b8235226a01986e72a47414a91d/soccerapi/api/base.py#L14-L18
import abc from typing import Dict, List, Tuple class ApiBase(abc.ABC): @abc.abstractmethod def requests(self, competition: str, **kwargs) -> Tuple: pass @abc.abstractmethod
MIT License
repsac/io_three
addons/io_three/exporter/utilities.py
id_from_name
python
def id_from_name(name): return str(uuid.uuid3(uuid.NAMESPACE_DNS, name)).upper()
Generate a UUID using a name as the namespace :type name: str :rtype: str
https://github.com/repsac/io_three/blob/56b2f35b7d56aab6df131c7a3aecb43cda915ca3/addons/io_three/exporter/utilities.py#L49-L56
import uuid import hashlib from .. import constants ROUND = constants.DEFAULT_PRECISION def bit_mask(flags): bit = 0 true = lambda x, y: (x | (1 << y)) false = lambda x, y: (x & (~(1 << y))) for mask, position in constants.MASK.items(): func = true if flags.get(mask) else false bit = func(bit, position) return bit def hash(value): hash_ = hashlib.md5() hash_.update(repr(value).encode('utf8')) return hash_.hexdigest() def id(): return str(uuid.uuid4()).upper()
MIT License
ethereum/trinity
trinity/_utils/datastructures.py
OrderedTaskPreparation.num_ready
python
def num_ready(self) -> int: return self._ready_tasks.qsize()
How many tasks are waiting to be picked up by :meth:`ready_tasks`?
https://github.com/ethereum/trinity/blob/6383280c5044feb06695ac2f7bc1100b7bcf4fe0/trinity/_utils/datastructures.py#L737-L739
from abc import ABC, abstractmethod import asyncio from enum import Enum from functools import ( total_ordering, ) from itertools import ( count, ) from typing import ( Any, Callable, Collection, Dict, Generic, Iterable, Set, Tuple, Type, TypeVar, ) from eth_utils import ( ValidationError, to_tuple, ) from eth_utils.toolz import ( identity, mapcat, ) from eth.typing import ( StaticMethod, ) from trinity._utils.queues import ( queue_get_batch, queue_get_nowait, ) from trinity._utils.tree_root import ( RootTracker, ) TPrerequisite = TypeVar('TPrerequisite', bound=Enum) TTask = TypeVar('TTask') TTaskID = TypeVar('TTaskID') @total_ordering class SortableTask(Generic[TTask]): _order_fn: StaticMethod[Callable[[TTask], Any]] = None @classmethod def orderable_by_func(cls, order_fn: Callable[[TTask], Any]) -> 'Type[SortableTask[TTask]]': return type('PredefinedSortableTask', (cls, ), dict(_order_fn=staticmethod(order_fn))) def __init__(self, task: TTask) -> None: if self._order_fn is None: raise ValidationError("Must create this class with orderable_by_func before init") self._task = task _comparable_val = self._order_fn(task) try: self_equal = _comparable_val == _comparable_val self_lt = _comparable_val < _comparable_val self_gt = _comparable_val > _comparable_val if not self_equal or self_lt or self_gt: raise ValidationError( "The orderable function provided a comparable value that does not compare" f"validly to itself: equal to self? {self_equal}, less than self? {self_lt}, " f"greater than self? {self_gt}" ) except TypeError as exc: raise ValidationError( f"The provided order_fn {self._order_fn!r} did not return a sortable " f"value from {task!r}" ) from exc self._comparable_val = _comparable_val @property def original(self) -> TTask: return self._task def __eq__(self, other: Any) -> bool: if not isinstance(other, SortableTask): return False else: return self._comparable_val == other._comparable_val def __lt__(self, other: Any) -> bool: if not isinstance(other, SortableTask): return False else: return self._comparable_val < other._comparable_val class TaskQueue(Generic[TTask]): _task_wrapper: Type[SortableTask[TTask]] _in_progress: Dict[int, Tuple[TTask, ...]] _open_queue: 'asyncio.PriorityQueue[SortableTask[TTask]]' _tasks: Set[TTask] def __init__( self, maxsize: int = 0, order_fn: Callable[[TTask], Any] = identity, *, loop: asyncio.AbstractEventLoop = None) -> None: self._maxsize = maxsize self._full_lock = asyncio.Lock(loop=loop) self._open_queue = asyncio.PriorityQueue(maxsize, loop=loop) self._task_wrapper = SortableTask.orderable_by_func(order_fn) self._id_generator = count() self._tasks = set() self._in_progress = {} async def add(self, tasks: Tuple[TTask, ...]) -> None: if not isinstance(tasks, tuple): raise ValidationError(f"must pass a tuple of tasks to add(), but got {tasks!r}") already_pending = self._tasks.intersection(tasks) if already_pending: raise ValidationError( f"Duplicate tasks detected: {already_pending!r} are already present in the queue" ) remaining = tuple(sorted(map(self._task_wrapper, tasks))) while remaining: num_tasks = len(self._tasks) if self._maxsize <= 0: open_slots = len(remaining) elif num_tasks < self._maxsize: open_slots = self._maxsize - num_tasks else: await self._full_lock.acquire() continue queueing, remaining = remaining[:open_slots], remaining[open_slots:] for task in queueing: await self._open_queue.put(task) original_queued = tuple(task.original for task in queueing) self._tasks.update(original_queued) if self._full_lock.locked() and len(self._tasks) < self._maxsize: self._full_lock.release() def get_nowait(self, max_results: int = None) -> Tuple[int, Tuple[TTask, ...]]: if self._open_queue.empty(): raise asyncio.QueueFull("No tasks are available to get") else: ranked_tasks = queue_get_nowait(self._open_queue, max_results) pending_tasks = tuple(task.original for task in ranked_tasks) next_id = next(self._id_generator) self._in_progress[next_id] = pending_tasks return (next_id, pending_tasks) async def get(self, max_results: int = None) -> Tuple[int, Tuple[TTask, ...]]: ranked_tasks = await queue_get_batch(self._open_queue, max_results) pending_tasks = tuple(task.original for task in ranked_tasks) next_id = next(self._id_generator) self._in_progress[next_id] = pending_tasks return (next_id, pending_tasks) async def complete(self, batch_id: int, completed: Collection[TTask]) -> None: if batch_id not in self._in_progress: raise ValidationError(f"batch id {batch_id} not recognized, with tasks {completed!r}") attempted = self._in_progress.pop(batch_id) unrecognized_tasks = set(completed).difference(attempted) if unrecognized_tasks: self._in_progress[batch_id] = attempted raise ValidationError( f"cannot complete tasks {unrecognized_tasks!r} in this batch, only {attempted!r}" ) incomplete = set(attempted).difference(completed) for task in incomplete: wrapped_task = self._task_wrapper(task) try: self._open_queue.put_nowait(wrapped_task) except asyncio.QueueFull: await self._open_queue.put(wrapped_task) else: await asyncio.sleep(0) self._tasks.difference_update(completed) if self._full_lock.locked() and len(self._tasks) < self._maxsize: self._full_lock.release() def num_in_progress(self) -> int: return len(self._tasks) - self._open_queue.qsize() def num_pending(self) -> int: return self._open_queue.qsize() def __len__(self) -> int: return len(self._tasks) def __contains__(self, task: TTask) -> bool: return task in self._tasks class BaseTaskPrerequisites(Generic[TTask, TPrerequisite]): _prereqs: Iterable[TPrerequisite] _completed_prereqs: Set[TPrerequisite] _task: TTask @classmethod def from_enum(cls, prereqs: Type[TPrerequisite]) -> 'Type[BaseTaskPrerequisites[Any, Any]]': return type('CompletionFor' + prereqs.__name__, (cls, ), dict(_prereqs=prereqs)) def __init__(self, task: TTask) -> None: self._task = task self._completed_prereqs = set() @property def task(self) -> TTask: return self._task @property def is_complete(self) -> bool: return len(self.remaining_prereqs) == 0 def set_complete(self) -> None: for prereq in self.remaining_prereqs: self.finish(prereq) @property def remaining_prereqs(self) -> Set[TPrerequisite]: return set(self._prereqs).difference(self._completed_prereqs) def finish(self, prereq: TPrerequisite) -> None: if prereq not in self._prereqs: raise ValidationError( "Prerequisite %r is not recognized by task %r" % (prereq, self._task) ) elif prereq in self._completed_prereqs: raise ValidationError( "Prerequisite %r is already complete in task %r" % (prereq, self._task) ) else: self._completed_prereqs.add(prereq) def __repr__(self) -> str: return ( f'<{type(self).__name__}({self._task!r}, done={self._completed_prereqs!r}, ' f'remaining={self.remaining_prereqs!r})>' ) class DuplicateTasks(Exception, Generic[TTask]): def __init__(self, msg: str, duplicates: Collection[TTask]) -> None: super().__init__(msg) self.duplicates = duplicates class MissingDependency(Exception): pass class NoPrerequisites(Enum): pass class BaseOrderedTaskPreparation(ABC, Generic[TTask, TTaskID]): @abstractmethod def set_finished_dependency(self, finished_task: TTask) -> None: ... @abstractmethod def register_tasks( self, tasks: Collection[TTask], ignore_duplicates: bool = False) -> Tuple[TTask, ...]: ... @abstractmethod async def wait_add_tasks( self, tasks: Collection[TTask], ignore_duplicates: bool = False) -> Tuple[TTask, ...]: ... @abstractmethod async def ready_tasks(self, max_results: int = None) -> Tuple[TTask, ...]: ... @abstractmethod def has_ready_tasks(self) -> bool: ... class OrderedTaskPreparation( BaseOrderedTaskPreparation[TTask, TTaskID], Generic[TTask, TTaskID, TPrerequisite]): _id_of: StaticMethod[Callable[[TTask], TTaskID]] _dependency_of: StaticMethod[Callable[[TTask], TTaskID]] _default_max_depth = 10 _prereq_tracker: Type[BaseTaskPrerequisites[TTask, TPrerequisite]] _roots: RootTracker[TTaskID] NoPrerequisites = NoPrerequisites def __init__( self, prerequisites: Type[TPrerequisite], id_extractor: Callable[[TTask], TTaskID], dependency_extractor: Callable[[TTask], TTaskID], accept_dangling_tasks: bool = False, max_depth: int = None, max_tasks: int = None) -> None: self._prereq_tracker = BaseTaskPrerequisites.from_enum(prerequisites) self._id_of = id_extractor self._dependency_of = dependency_extractor self._accept_dangling_tasks = accept_dangling_tasks if max_depth is None: self._max_depth = self._default_max_depth elif max_depth < 0: raise ValidationError(f"The maximum depth must be at least 0, not {max_depth}") else: self._max_depth = max_depth self._max_tasks = max_tasks self._ready_count_dropped = asyncio.Event() self._tasks: Dict[TTaskID, BaseTaskPrerequisites[TTask, TPrerequisite]] = {} self._unready: Set[TTaskID] = set() self._ready_tasks: 'asyncio.Queue[TTask]' = asyncio.Queue() self._declared_finished: Set[TTaskID] = set() self._roots = RootTracker() self._last_yielded_tasks: Tuple[TTask, ...] = tuple() def set_finished_dependency(self, finished_task: TTask) -> None: completed = self._prereq_tracker(finished_task) completed.set_complete() task_id = self._id_of(finished_task) if task_id in self._tasks: raise DuplicateTasks( f"Can't set a new finished dependency {finished_task} id:{task_id}, " "it's already registered", (finished_task, ), ) self._tasks[task_id] = completed self._declared_finished.add(task_id) dependency_id = self._dependency_of(finished_task) self._roots.add(task_id, dependency_id) @to_tuple def register_tasks( self, tasks: Collection[TTask], ignore_duplicates: bool = False) -> Iterable[TTask]: identified_tasks = dict((self._id_of(task), task) for task in tasks) if len(identified_tasks) < len(tasks) and not ignore_duplicates: raise ValidationError( f"May not register same task twice in the same call. Tried to register: {tasks}" ) duplicates = tuple( task for task_id, task in identified_tasks.items() if task_id in self._tasks ) if duplicates and not ignore_duplicates: raise DuplicateTasks( f"Cannot re-register tasks: {duplicates!r} for completion", duplicates, ) task_meta_info = tuple( (self._prereq_tracker(task), task_id, self._dependency_of(task)) for task_id, task in identified_tasks.items() if task_id not in self._tasks ) for prereq_tracker, task_id, dependency_id in task_meta_info: if not self._accept_dangling_tasks and dependency_id not in self._tasks: raise MissingDependency( f"Cannot prepare task {prereq_tracker!r} with id {task_id} and " f"dependency {dependency_id} before preparing its dependency " f"among tasks {task_meta_info!r}, from the original registration: " f"{tasks!r}." ) else: self._tasks[task_id] = prereq_tracker self._unready.add(task_id) self._roots.add(task_id, dependency_id) if prereq_tracker.is_complete and self._is_ready(prereq_tracker.task): self._mark_complete(task_id) yield prereq_tracker.task async def wait_add_tasks( self, tasks: Collection[TTask], ignore_duplicates: bool = False) -> Tuple[TTask, ...]: if self._max_tasks is not None: while self.num_tasks() >= self._max_tasks: self._ready_count_dropped.clear() await self._ready_count_dropped.wait() return self.register_tasks(tasks, ignore_duplicates=ignore_duplicates) def finish_prereq(self, prereq: TPrerequisite, tasks: Collection[TTask]) -> None: if len(self._tasks) == 0: raise ValidationError("Cannot finish a task until set_last_completion() is called") for task in tasks: task_id = self._id_of(task) if task_id not in self._tasks: raise ValidationError(f"Cannot finish task {task_id!r} before preparing it") elif task_id not in self._unready: raise ValidationError( f"Cannot finish prereq {prereq} of task {task} id:{task_id!r} that is complete" ) task_completion = self._tasks[task_id] task_completion.finish(prereq) if task_completion.is_complete and self._is_ready(task): self._mark_complete(task_id) async def ready_tasks(self, max_results: int = None) -> Tuple[TTask, ...]: for completed_task in self._last_yielded_tasks: task_id = self._id_of(completed_task) self._prune_finished(task_id) self._prune_finished(task_id) self._last_yielded_tasks = await queue_get_batch(self._ready_tasks, max_results) self._ready_count_dropped.set() return self._last_yielded_tasks def has_ready_tasks(self) -> bool: return not self._ready_tasks.empty() def _is_ready(self, task: TTask) -> bool: dependency = self._dependency_of(task) if dependency in self._declared_finished: return True elif dependency in self._tasks and dependency not in self._unready: return True else: return False def _mark_complete(self, task_id: TTaskID) -> None: qualified_tasks = tuple([task_id]) while qualified_tasks: qualified_tasks = tuple(mapcat( self._mark_one_task_complete, qualified_tasks, )) @to_tuple def _mark_one_task_complete(self, task_id: TTaskID) -> Iterable[TTaskID]: task_completion = self._tasks[task_id] self._ready_tasks.put_nowait(task_completion.task) self._unready.remove(task_id) for depending_task_id in self._roots.get_children(task_id): if self._tasks[depending_task_id].is_complete and depending_task_id in self._unready: yield depending_task_id def _prune_finished(self, task_id: TTaskID) -> None: if task_id not in self._tasks: return root_task_id, depth = self._roots.get_root(task_id) num_to_prune = depth - self._max_depth if num_to_prune <= 0: return else: self._prune(root_task_id) def _prune(self, prune_task_id: TTaskID) -> None: self._roots.prune(prune_task_id) del self._tasks[prune_task_id] if prune_task_id in self._declared_finished: self._declared_finished.remove(prune_task_id)
MIT License
devgc/eventmonkey
libem/WindowsEventManager.py
WindowsEventHandler.__init__
python
def __init__(self,filename,options,progressBar): self.filename = filename self.options = options self.ext = os.path.splitext(self.filename)[1] self.eventfile_type = None self.progressBar = progressBar
Initialize the Event Handler for processing an event file
https://github.com/devgc/eventmonkey/blob/f1095f6354e101d358f9fe09dc197e1a946d7ddc/libem/WindowsEventManager.py#L347-L353
import sys import logging import os import multiprocessing import json import re import hashlib import datetime import base64 import yaml import pyevtx import pyevt import xmltodict import ProgressManager import DbHandler import elastichandler import Config from libem import Utilities WINEVENT_LOGGER = logging.getLogger('WinEvent') WINEVENT_MAPPING_FILE = Utilities.GetResource( 'etc', 'etc', 'evtx.mapping.json' ) DESCRIPTION_FOLDER = Utilities.GetResource( 'etc/descriptions', 'etc.descriptions', '' ) EVENT_ID_DESCRIPTIONS = {} WINEVENT_COLUMN_ORDER = [ 'we_hash_id', 'we_index', 'we_source', 'we_jrec', 'we_tags', 'we_description', 'eventfile_type', 'computer_name', 'event_category', 'event_identifier', 'event_identifier_qualifiers', 'event_level', 'identifier', 'offset', 'source_name', 'strings', 'user_security_identifier', 'creation_time', 'written_time', 'xml_string', 'data', 'recovered' ] WINEVENT_FIELD_MAPPING = { 'we_hash_id':'CHAR(32)', 'we_index':'BIGINT UNSIGNED', 'we_source':'TEXT', 'we_jrec':'JSON', 'we_tags':'TEXT', 'we_description':'TEXT', 'eventfile_type':'CHAR(4)', 'computer_name':'TEXT', 'event_category':'BIGINT UNSIGNED', 'event_identifier':'BIGINT UNSIGNED', 'event_identifier_qualifiers':'BIGINT UNSIGNED', 'event_level':'INT UNSIGNED', 'identifier':'BIGINT UNSIGNED', 'offset':'BIGINT UNSIGNED', 'source_name':'TEXT', 'strings':'TEXT', 'user_security_identifier':'TEXT', 'creation_time':'DATETIME', 'written_time':'DATETIME', 'xml_string':'BLOB', 'data':'BLOB', 'recovered':'INT' } def DescriptionLoader(EVENT_ID_DESCRIPTIONS): if not os.path.isdir(DESCRIPTION_FOLDER): raise Exception('Description folder is not a directory: {}'.format(DESCRIPTION_FOLDER)) for filename in os.listdir(DESCRIPTION_FOLDER): fullname = os.path.join( DESCRIPTION_FOLDER, filename ) if filename.endswith('.yml'): channel,file_extension = os.path.splitext(filename) with open(fullname,'rb') as fh: descriptions = yaml.load(fh) EVENT_ID_DESCRIPTIONS[channel] = descriptions fh.close() DescriptionLoader(EVENT_ID_DESCRIPTIONS) class EvtXtractFile(object): def __init__(self): self.fullname = '' self.filehandle = None def open(self,fullname): self.fullname = fullname self.filehandle = open(self.fullname,'rb') self.file = json.load(self.filehandle) self.records = self.file["valid_records"] pass class WindowsEventManager(): def __init__(self,options): self.options = options self.total_records = 0 self._GetEventFileList() self._InitOutpath() self._InitDb() self._InitEsIndex() def _InitDb(self): if not getattr(self.options,'db_name',None): self.options.db_name = os.path.join( self.options.output_path, self.options.evidencename+'.db' ) dbConfig = DbHandler.DbConfig( db_type = 'sqlite', db = self.options.db_name ) dbHandler = dbConfig.GetDbHandle() dbHandler.DropTable('winevent') dbHandler.CreateTableFromMapping( 'winevent', WINEVENT_FIELD_MAPPING, 'PRIMARY KEY (we_hash_id)', WINEVENT_COLUMN_ORDER ) def _InitOutpath(self): try: os.makedirs(self.options.output_path) except OSError as e: pass except Exception as e: WINEVENT_LOGGER.error('{}'.format(str(e))) def _InitEsIndex(self): if self.options.eshost is not None: self.options.index_name = GetIndexName( self.options.evidencename ) es_options = elastichandler.GetEsOptions( self.options ) esConfig = elastichandler.EsConfig( es_options ) esHandler = esConfig.GetEsHandler() result = esHandler.CheckForIndex( self.options.index_name ) if result == False: esHandler.InitializeIndex( index=self.options.index_name ) result = esHandler.CheckForMapping( 'winevent', index=self.options.index_name ) if result == False: index_mapping = None with open(WINEVENT_MAPPING_FILE,'rb') as evtmap: index_mapping = json.load(evtmap) esHandler.InitializeMapping( 'winevent', index_mapping, index=self.options.index_name ) def _GetEventFileList(self): self.filelist = [] if not os.path.isdir(self.options.events_path): raise(Exception("Events directory does not exist: {}".format(self.options.events_path))) for dirName, subdirList, fileList in os.walk(self.options.events_path): for filename in fileList: fullname = os.path.join( dirName, filename ) if (filename.lower().endswith('.evt') or filename.lower().endswith('.evtx')): if IsSupportedEventFile(fullname): self.filelist.append(fullname) elif filename.lower().endswith('.json'): if IsSupportedEvtXtractFile(fullname): self.filelist.append(fullname) self.filelist.sort() progressBar = ProgressManager.ProgressBarClass( Config.Config.UI_TYPE, count = len(self.filelist), description = u'Enumerating Event Files'.format(dirName) ) _fcnt = 0 for filename in self.filelist: if filename.lower().endswith('evtx'): wefile = pyevtx.file() wefile.open(filename) self.total_records += wefile.get_number_of_records() self.total_records += wefile.get_number_of_recovered_records() wefile.close() elif filename.lower().endswith('evt'): wefile = pyevt.file() wefile.open(filename) self.total_records += wefile.get_number_of_records() self.total_records += wefile.get_number_of_recovered_records() wefile.close() elif filename.lower().endswith('json'): with open(filename) as wefile: jstruct = json.load(wefile) self.total_records += len(jstruct['valid_records']) wefile.close() progressBar.Increment(1) _fcnt += 1 progressBar.Finish() def ProcessEvents(self): print u'Total Records = {}'.format(self.total_records) progressManager = ProgressManager.ProgressManager() progressManager.start() progressBar = progressManager.ProgressBar( Config.Config.UI_TYPE, count = self.total_records, description = u'Processing Event Files' ) if self.options.threads_to_use > 1: if len(self.filelist) < self.options.threads_to_use: self.options.threads_to_use = len(self.filelist) processes = [] c = 0 for filename in self.filelist: while len(processes) >= self.options.threads_to_use: index_list = [] for i in range(len(processes)): result = processes[i].is_alive() if result == False: processes[i].terminate() index_list.append(i) for i in sorted(index_list, key=int, reverse=True): del processes[i] weHandler = WindowsEventHandler( filename, self.options, progressBar ) worker = multiprocessing.Process( target=WindowsEventHandler.ProcessRecords, args=(weHandler,) ) worker.start() processes.append( worker ) while len(processes) > 0: for i in range(len(processes)): try: if not processes[i].is_alive(): processes[i].terminate() del processes[i] except: pass else: for filename in self.filelist: name = os.path.basename(filename) eventHandler = WindowsEventHandler( filename, self.options, progressBar ) eventHandler.ProcessRecords() progressBar.Finish() class WindowsEventHandler():
Apache License 2.0
asappresearch/rationale-alignment
similarity/compute/trainer.py
AlignmentTrainer.step
python
def step(self) -> bool: self.train_step() self.eval_step() self._step += 1 done = self._step >= self.epochs if done: model_name = "model.pt" self.model.load_state_dict(self._best_model) with open(os.path.join(self.log_dir, "metrics.json"), "w") as f: json.dump(self._log, f, indent=4, sort_keys=True) else: model_name = f"model_{self._step - 1}.pt" save_model(self.model, os.path.join(self.log_dir, model_name)) return done
Train until the next checkpoint, and evaluate. Returns ------ bool Whether the computable has completed.
https://github.com/asappresearch/rationale-alignment/blob/8d2bf06ba4c121863833094d5d4896bf34a9a73e/similarity/compute/trainer.py#L246-L272
from copy import deepcopy import json import math import os from typing import Any, Dict, List, Optional, Tuple from tensorboardX import SummaryWriter import torch import torch.nn as nn from torch.optim import Adam from torch.optim.lr_scheduler import _LRScheduler, ReduceLROnPlateau from similarity.data import Sampler from similarity.metric import Metric from utils.utils import prod, save_model, NoamLR class AlignmentTrainer: def __init__( self, args, train_sampler: Sampler, dev_sampler: Sampler, test_sampler: Sampler, model: nn.Module, loss_fn: Metric, metric_fn: Metric, optimizer: Adam, scheduler: Optional[_LRScheduler], epochs: int, lower_is_better: bool = False, dev_loss_fn: Metric = None, extra_training_metrics: Optional[List[Metric]] = None, extra_validation_metrics: Optional[List[Metric]] = None, log_dir: Optional[str] = None, log_frequency: int = 20, gradient_accumulation_steps: int = 1, sparsity_thresholds: Optional[List[float]] = None, saved_step: Optional[int] = 0, ): self.train_sampler = train_sampler self.dev_sampler = dev_sampler self.test_sampler = test_sampler self.model = model self.loss_fn = loss_fn self.metric_fn = metric_fn self.optimizer = optimizer self.scheduler = scheduler self.lower_is_better = lower_is_better self.dev_loss_fn = dev_loss_fn or self.loss_fn self.extra_training_metrics = extra_training_metrics or [] self.extra_validation_metrics = extra_validation_metrics or [] self.validation_metrics = [self.metric_fn] + self.extra_validation_metrics self.log_dir = log_dir self.log_frequency = log_frequency self.sparsity_thresholds = sparsity_thresholds or [] self.gradient_accumulation_steps = gradient_accumulation_steps self.epochs = epochs self._step = 0 self._best_metric = None self._best_model = None self.summary_writer = SummaryWriter(log_dir=self.log_dir) self._log: Dict[str, Dict[int, float]] = {} def log(self, text: str, value: float, step: int) -> None: print(f"{text} = {value:.4f}") self.summary_writer.add_scalar(text, value, step) self._log.setdefault(text, {})[step] = value def train_step(self) -> None: self.model.train() self.optimizer.zero_grad() with torch.enable_grad(): for i, batch in enumerate(self.train_sampler()): preds, targets = self.model(*batch) loss = self.loss_fn(preds, targets, self._step) loss = loss / self.gradient_accumulation_steps loss.backward() if i % self.gradient_accumulation_steps == 0: self.optimizer.step() if i % self.log_frequency == 0: global_step = (len(self.train_sampler) * self._step) + i self.log( "Stats/Grad_Norm", self.model.gradient_norm, global_step ) if self.scheduler is not None: self.scheduler.step() self.optimizer.zero_grad() if i % self.log_frequency == 0: global_step = (len(self.train_sampler) * self._step) + i self.log( "Stats/Learning_Rate", self.scheduler.get_lr()[0], global_step ) self.log(f"Train/Loss/{self.loss_fn}", loss.item(), global_step) self.log("Stats/Param_Norm", self.model.parameter_norm, global_step) for metric in self.extra_training_metrics: self.log( f"Train/Metric/{metric}", metric(preds, targets).item(), global_step, ) self.optimizer.zero_grad() def postprocess( self, preds: List[Tuple[torch.FloatTensor, torch.FloatTensor]], targets: List[Dict[str, torch.LongTensor]], num_preds: int = 0, ) -> Tuple[ List[Tuple[torch.FloatTensor, torch.FloatTensor]], List[Dict[str, torch.LongTensor]], int, ]: for target in targets: target["scope"] += num_preds target["positives"] += num_preds target["negatives"] += num_preds preds = [(cost.cpu(), alignment.cpu()) for (cost, alignment) in preds] targets = [ {key: value.cpu() for key, value in target.items()} for target in targets ] num_preds += len(preds) return preds, targets, num_preds def eval_step(self) -> None: self.model.eval() with torch.no_grad(): all_preds, all_targets = [], [] num_preds = 0 for batch in self.dev_sampler(): preds, targets = self.model(*batch) preds, targets, num_preds = self.postprocess(preds, targets, num_preds) all_preds += preds all_targets += targets dev_loss = self.dev_loss_fn( all_preds, all_targets, 10 ).item() dev_metric = self.metric_fn(all_preds, all_targets).item() sign = (-1) ** self.lower_is_better if self._best_metric is None or (sign * dev_metric > sign * self._best_metric): self._best_metric = dev_metric self._best_model = deepcopy(self.model.state_dict()) self.log(f"Validation/Loss/{self.dev_loss_fn}", dev_loss, self._step) self.log(f"Validation/Metric/{self.metric_fn}", dev_metric, self._step) for metric in self.extra_validation_metrics: self.log( f"Validation/Metric/{metric}", metric(all_preds, all_targets).item(), self._step, ) if self.scheduler is not None: if isinstance(self.scheduler, ReduceLROnPlateau): self.scheduler.step(dev_loss) else: self.scheduler.step() def test_step( self, flavor="Test", thresholds=[0.0, 0.01, 0.1, 0.5, 1, 3, 5] ) -> None: self.model.eval() assert flavor in ["Test", "Val"] with torch.no_grad(): all_preds, all_targets = [], [] num_preds = 0 for i, scaling in enumerate(thresholds): sampler = self.dev_sampler if flavor == "Val" else self.test_sampler if i != 0 and self.model.args.alignment == "sinkhorn": costs, alignments = zip(*all_preds) threshold_alignments = [ alignment * (alignment >= scaling / prod(alignment.shape[-2:])).float() for alignment in alignments ] all_preds = list(zip(costs, threshold_alignments)) else: all_preds, all_targets = [], [] num_preds = 0 for batch in sampler(): preds, targets = self.model(*batch, threshold=scaling) preds, targets, num_preds = self.postprocess( preds, targets, num_preds ) all_preds += preds all_targets += targets dev_loss = self.dev_loss_fn( all_preds, all_targets, 10 ).item() dev_metric = self.metric_fn(all_preds, all_targets).item() self.log(f"{flavor}/Metric/threshold", scaling, i) self.log(f"{flavor}/Loss/{self.dev_loss_fn}", dev_loss, i) self.log(f"{flavor}/Metric/{self.metric_fn}", dev_metric, i) for metric in self.extra_validation_metrics: self.log( f"{flavor}/Metric/{metric}", metric(all_preds, all_targets).item(), i, )
MIT License
pachyderm/python-pachyderm
src/python_pachyderm/experimental/mixin/pps.py
PPSMixin.create_pipeline_from_request
python
def create_pipeline_from_request( self, req: pps_proto.CreatePipelineRequest ) -> None: self._req(Service.PPS, "CreatePipeline", req=req)
Creates a pipeline from a ``CreatePipelineRequest`` object. Usually used in conjunction with ``util.parse_json_pipeline_spec()`` or ``util.parse_dict_pipeline_spec()``. Parameters ---------- req : pps_proto.CreatePipelineRequest The ``CreatePipelineRequest`` object.
https://github.com/pachyderm/python-pachyderm/blob/9dbffba91ac753e7c63c58d71768f53f83789cb9/src/python_pachyderm/experimental/mixin/pps.py#L443-L455
import json import base64 from typing import Dict, Iterator, List, Union from python_pachyderm.pfs import commit_from, Commit from python_pachyderm.service import Service, pps_proto, pfs_proto from google.protobuf import empty_pb2, duration_pb2 class PPSMixin: def inspect_job( self, job_id: str, pipeline_name: str = None, wait: bool = False, details: bool = False, ) -> Iterator[pps_proto.JobInfo]: if pipeline_name is not None: return iter( [ self._req( Service.PPS, "InspectJob", job=pps_proto.Job( pipeline=pps_proto.Pipeline(name=pipeline_name), id=job_id ), wait=wait, details=details, ) ] ) else: return self._req( Service.PPS, "InspectJobSet", job_set=pps_proto.JobSet(id=job_id), wait=wait, details=details, ) def list_job( self, pipeline_name: str = None, input_commit: Union[tuple, dict, Commit, pfs_proto.Commit, List] = None, history: int = 0, details: bool = False, jqFilter: str = None, ) -> Union[Iterator[pps_proto.JobInfo], Iterator[pps_proto.JobSetInfo]]: if pipeline_name is not None: if isinstance(input_commit, list): input_commit = [commit_from(ic) for ic in input_commit] elif input_commit is not None: input_commit = [commit_from(input_commit)] return self._req( Service.PPS, "ListJob", pipeline=pps_proto.Pipeline(name=pipeline_name), input_commit=input_commit, history=history, details=details, jqFilter=jqFilter, ) else: return self._req( Service.PPS, "ListJobSet", details=details, ) def delete_job(self, job_id: str, pipeline_name: str) -> None: self._req( Service.PPS, "DeleteJob", job=pps_proto.Job( pipeline=pps_proto.Pipeline(name=pipeline_name), id=job_id ), ) def stop_job(self, job_id: str, pipeline_name: str, reason: str = None) -> None: self._req( Service.PPS, "StopJob", job=pps_proto.Job( pipeline=pps_proto.Pipeline(name=pipeline_name), id=job_id ), reason=reason, ) def inspect_datum( self, pipeline_name: str, job_id: str, datum_id: str ) -> pps_proto.DatumInfo: return self._req( Service.PPS, "InspectDatum", datum=pps_proto.Datum( id=datum_id, job=pps_proto.Job( pipeline=pps_proto.Pipeline(name=pipeline_name), id=job_id ), ), ) def list_datum( self, pipeline_name: str = None, job_id: str = None, input: pps_proto.Input = None, ) -> Iterator[pps_proto.DatumInfo]: req = pps_proto.ListDatumRequest() if pipeline_name is not None and job_id is not None: req.job.CopyFrom( pps_proto.Job( pipeline=pps_proto.Pipeline(name=pipeline_name), id=job_id ) ) else: req.input.CopyFrom(input) return self._req(Service.PPS, "ListDatum", req=req) def restart_datum( self, pipeline_name: str, job_id: str, data_filters: List[str] = None ) -> None: self._req( Service.PPS, "RestartDatum", job=pps_proto.Job( pipeline=pps_proto.Pipeline(name=pipeline_name), id=job_id ), data_filters=data_filters, ) def create_pipeline( self, pipeline_name: str, transform: pps_proto.Transform, parallelism_spec: pps_proto.ParallelismSpec = None, egress: pps_proto.Egress = None, reprocess_spec: str = None, update: bool = False, output_branch_name: str = None, s3_out: bool = False, resource_requests: pps_proto.ResourceSpec = None, resource_limits: pps_proto.ResourceSpec = None, sidecar_resource_limits: pps_proto.ResourceSpec = None, input: pps_proto.Input = None, description: str = None, reprocess: bool = False, service: pps_proto.Service = None, datum_set_spec: pps_proto.DatumSetSpec = None, datum_timeout: duration_pb2.Duration = None, job_timeout: duration_pb2.Duration = None, salt: str = None, datum_tries: int = 3, scheduling_spec: pps_proto.SchedulingSpec = None, pod_patch: str = None, spout: pps_proto.Spout = None, spec_commit: pfs_proto.Commit = None, metadata: pps_proto.Metadata = None, autoscaling: bool = False, ) -> None: self._req( Service.PPS, "CreatePipeline", pipeline=pps_proto.Pipeline(name=pipeline_name), transform=transform, parallelism_spec=parallelism_spec, egress=egress, update=update, output_branch=output_branch_name, s3_out=s3_out, resource_requests=resource_requests, resource_limits=resource_limits, sidecar_resource_limits=sidecar_resource_limits, input=input, description=description, reprocess=reprocess, metadata=metadata, service=service, datum_set_spec=datum_set_spec, datum_timeout=datum_timeout, job_timeout=job_timeout, salt=salt, datum_tries=datum_tries, scheduling_spec=scheduling_spec, pod_patch=pod_patch, spout=spout, spec_commit=spec_commit, reprocess_spec=reprocess_spec, autoscaling=autoscaling, )
Apache License 2.0
pangeo-data/climpred
climpred/smoothing.py
_reset_temporal_axis
python
def _reset_temporal_axis(ds_smoothed, tsmooth_kws, dim="lead", set_lead_center=True): if tsmooth_kws is None or callable(tsmooth_kws): return ds_smoothed if not ("time" in tsmooth_kws.keys() or "lead" in tsmooth_kws.keys()): raise ValueError("tsmooth_kws does not contain a time dimension.", tsmooth_kws) for c in ["time", "lead"]: if c in tsmooth_kws.keys(): smooth = tsmooth_kws[c] ds_smoothed[dim] = [f"{t}-{t + smooth - 1}" for t in ds_smoothed[dim].values] if set_lead_center: _set_center_coord(ds_smoothed, dim) return ds_smoothed
Reduce and reset temporal axis. See temporal_smoothing(). Should be used after calculation of skill to maintain readable labels for skill computation. Args: ds_smoothed (xarray object): Smoothed dataset. tsmooth_kws (dict): Keywords smoothing is performed over. dim (str): Dimension smoothing is performed over. Defaults to 'lead'. set_center (bool): Whether to set new coord `{dim}_center`. Defaults to True. Returns: Smoothed Dataset with updated labels for smoothed temporal dimension.
https://github.com/pangeo-data/climpred/blob/8df537f4a3740441dba55e018ce65e1b6d977162/climpred/smoothing.py#L190-L216
import numpy as np import xarray as xr from .checks import is_xarray try: import xesmf as xe except ImportError: xe = None @is_xarray(0) def spatial_smoothing_xesmf( ds, d_lon_lat_kws={"lon": 5, "lat": 5}, method="bilinear", periodic=False, filename=None, reuse_weights=False, tsmooth_kws=None, how=None, ): if xe is None: raise ImportError( "xesmf is not installed; see" "https://pangeo-xesmf.readthedocs.io/en/latest/installation.html" ) def _regrid_it(da, d_lon, d_lat, **kwargs): if "lon" in da.coords: lon = da.lon else: try: lon = da.cf["longitude"] except KeyError: raise KeyError( "Could not find `lon` as coordinate or any C/F compliant `latitude` coordinate, see https://pangeo-xesmf.readthedocs.io and https://cf-xarray.readthedocs.io" ) if "lat" in da.coords: lat = da.lat else: try: lat = da.cf["latitude"] except KeyError: raise KeyError( "C/F compliant or `lat` as coordinate, see https://pangeo-xesmf.readthedocs.io" ) grid_out = xr.Dataset( { "lat": (["lat"], np.arange(lat.min(), lat.max() + d_lat, d_lat)), "lon": (["lon"], np.arange(lon.min(), lon.max() + d_lon, d_lon)), } ) regridder = xe.Regridder(da, grid_out, **kwargs) return regridder(da, keep_attrs=True) if ("lon" in d_lon_lat_kws) and ("lat" in d_lon_lat_kws): pass elif ("lon" not in d_lon_lat_kws) and ("lat" in d_lon_lat_kws): d_lon_lat_kws["lon"] = d_lon_lat_kws["lat"] elif ("lat" not in d_lon_lat_kws) and ("lon" in d_lon_lat_kws): d_lon_lat_kws["lat"] = d_lon_lat_kws["lon"] else: raise ValueError("please provide either `lon` or/and `lat` in d_lon_lat_kws.") kwargs = { "d_lon": d_lon_lat_kws["lon"], "d_lat": d_lon_lat_kws["lat"], "method": method, "periodic": periodic, "filename": filename, "reuse_weights": reuse_weights, } ds = _regrid_it(ds, **kwargs) return ds @is_xarray(0) def temporal_smoothing(ds, tsmooth_kws=None, how="mean", d_lon_lat_kws=None): if not isinstance(tsmooth_kws, dict): raise ValueError( "Please provide tsmooth_kws as dict, found ", type(tsmooth_kws) ) if not ("time" in tsmooth_kws or "lead" in tsmooth_kws): raise ValueError( 'tsmooth_kws doesnt contain a time dimension \ (either "lead" or "time").', tsmooth_kws, ) smooth = list(tsmooth_kws.values())[0] if smooth == 1: return ds dim = list(tsmooth_kws.keys())[0] time_dims = ["time", "lead"] if dim not in ds.dims: time_dims.remove(dim) dim = time_dims[0] tsmooth_kws = {dim: smooth} ds_smoothed = getattr(ds.rolling(tsmooth_kws, center=False), how)() ds_smoothed = ds_smoothed.isel({dim: slice(smooth - 1, None)}) ds_smoothed[dim] = ds.isel({dim: slice(None, -smooth + 1)})[dim] return ds_smoothed
MIT License
alehander92/airtight
airtight/hindley_milner_ast.py
fresh
python
def fresh(t, non_generic): mappings = {} def freshrec(tp): p = prune(tp) if isinstance(p, TypeVariable): if isGeneric(p, non_generic): if p not in mappings: mappings[p] = TypeVariable() return mappings[p] else: return p elif isinstance(p, TypeOperator): return TypeOperator(p.name, [freshrec(x) for x in p.types]) elif isinstance(p, Union): return Union(*[freshrec(x) for x in p.types]) return freshrec(t)
Makes a copy of a type expression. The type t is copied. The the generic variables are duplicated and the non_generic variables are shared. Args: t: A type to be copied. non_generic: A set of non-generic TypeVariables
https://github.com/alehander92/airtight/blob/3f54b4889ab60288ce69669ebf1c3d284662c7aa/airtight/hindley_milner_ast.py#L516-L541
import copy class Top: a_type = None a_native = False def annotate(self, a_type): self.a_type = a_type return a_type class Lambda(Top): def __init__(self, v, body, expected=None, return_expected=None): self.v = v self.body = body self.expected = expected self.return_expected = return_expected def __str__(self): return "(fn {v}@{t} => {body})".format(v=self.v, t=self.a_type.types[0] if self.a_type else '', body=self.body) class LambdaNoArgs(Top): def __init__(self, body): self.body = body def __str__(self): return "(fn => {body})".format(body=self.body) class aList(Top): def __init__(self, items): self.items = items def __str__(self): return "[{items}]".format( items=', '.join(str(item) for item in self.items)) class If(Top): def __init__(self, test, body, orelse): self.test = test self.body = body self.orelse = orelse def __str__(self): return 'If({0}) {1} {2}'.format(str(self.test), str(self.body), str(self.orelse)) class For(Top): def __init__(self, iter, target, body): self.iter = iter self.target = target self.body = body def __str__(self): return 'For {0} in {1} {2}'.format(str(self.target), str(self.iter), str(self.body)) class While(Top): def __init__(self, test, body): self.test = test self.body = body def __str__(self): return 'While {0} {1}'.format(str(self.test), str(self.body)) class Body(Top): def __init__(self, expression, other): self.expression = expression self.other = other def __str__(self): return "(@{expression}\n {other})".format( expression=str(self.expression), other=str(self.other)) class Ident(Top): def __init__(self, name): self.name = name def __str__(self): return '{name}@{type}'.format(name=str(self.name), type=str(self.a_type)) class anInteger(Ident): pass class aString(Ident): def __init__(self, name): self.name = "'%s'" % name class aBoolean(Ident): pass class aFloat(Ident): pass class Apply(Top): def __init__(self, fn, arg): self.fn = fn self.arg = arg def __str__(self): return "({fn} {arg})".format(fn=self.fn, arg=self.arg) class Let(Top): def __init__(self, v, defn, body): self.v = v self.defn = defn self.body = body def __str__(self): return "(let {v} = {defn} in {body})".format(v=self.v, defn=self.defn, body=self.body) def Letmany(vs, defns, body): if len(vs) == 1: return Let(vs[0], defns[0], body) else: return Let(vs[0], defns[0], Letmany(vs[1:], defns[1:], body)) class Letrec(Top): def __init__(self, v, defn, body): self.v = v self.defn = defn self.body = body def __str__(self): return "(letrec {v} = {defn} in {body})".format(v=self.v, defn=self.defn, body=self.body) class NotUnifiedError(Exception): def __init__(self, message): self.__message = message message = property(lambda self: self.__message) def __str__(self): return str(self.message) class TypeError(Exception): def __init__(self, message): self.__message = message message = property(lambda self: self.__message) def __str__(self): return str(self.message) class ParseError(Exception): def __init__(self, message): self.__message = message message = property(lambda self: self.__message) def __str__(self): return str(self.message) class TypeVariable(object): next_variable_id = 0 def __init__(self): self.id = TypeVariable.next_variable_id TypeVariable.next_variable_id += 1 self.instance = None self.__name = None next_variable_name = 'a' def _getName(self): if self.__name is None: self.__name = TypeVariable.next_variable_name TypeVariable.next_variable_name = chr(ord(TypeVariable.next_variable_name) + 1) return self.__name name = property(_getName) def __str__(self): if self.instance is not None: return str(self.instance) else: return str(self.name) def __repr__(self): return "TypeVariable(id = {0})".format(self.id) class TypeOperator(object): def __init__(self, name, types): self.name = name self.types = types def __str__(self): num_types = len(self.types) if num_types == 0: return str(self.name) elif num_types == 1: return '[{0}]'.format(str(self.types[0])) elif num_types == 2: return "({0} {1} {2})".format(str(self.types[0]), str(self.name), str(self.types[1])) else: return "{0} {1}" % (str(self.name), ' '.join(map(str, self.types))) class Function(TypeOperator): def __init__(self, from_type, to_type): super(Function, self).__init__("->", [from_type, to_type]) class Union(object): def __init__(self, *types): self.types = types def __str__(self): return ' | '.join(str(t) for t in self.types) def Multi_Apply(ident, args): if len(args) == 1: return Apply(ident, args[0]) else: return Apply(Multi_Apply(ident, args[:-1]), args[-1]) def Multi_Lambda(args, body, expected=None): if not expected: expected = None rest_expected = [] else: rest_expected = expected[1:] expected = expected[0] if len(args) > 1: return Lambda( args[0], Multi_Lambda(args[1:], body, expected=rest_expected), expected=expected) elif len(args) == 0: return LambdaNoArgs(body) else: return Lambda(args[0], body, expected=expected, return_expected=None if rest_expected == [] else rest_expected[0]) def Multi_Function(types): if len(types) == 2: return Function(types[0], types[1]) else: return Function(types[0], Multi_Function(types[1:])) class List(TypeOperator): def __init__(self, element_type): super(List, self).__init__("list", [element_type]) def __str__(self): return '[{0}]'.format(str(self.types[0])) Integer = TypeOperator("Integer", []) Bool = TypeOperator("Bool", []) Float = TypeOperator("Float", []) String = TypeOperator("String", []) def analyse(node, env, non_generic=None): if non_generic is None: non_generic = set() if isinstance(node, Ident): return node.annotate(getType(node.name, env, non_generic)) elif isinstance(node, If): unify(analyse(node.test, env, non_generic), Bool) node.test.annotate(Bool) body_type = analyse(node.body, env, non_generic) orelse_type = analyse(node.body, env, non_generic) unify(body_type, orelse_type) return node.annotate(body_type) elif isinstance(node, Apply): fun_type = analyse(node.fn, env, non_generic) arg_type = analyse(node.arg, env, non_generic) result_type = TypeVariable() if not isinstance(fun_type, Union): fun_types = [fun_type] else: fun_types = fun_type.types backup = Function(arg_type, result_type) found = False unify(backup, fun_type) if isinstance(node.arg, Ident): if node.arg.name not in env: env[node.arg.name] = arg_type else: unify(env[node.arg.name], arg_type) node.arg.annotate(arg_type) node.fn.annotate(backup) return node.annotate(result_type) elif isinstance(node, Body): analyse(node.expression, env, non_generic) return node.annotate( analyse(node.other, env, non_generic)) elif isinstance(node, For): iter_type = analyse(node.iter, env, non_generic) target_type = TypeVariable() unify(List(target_type), iter_type) node.target.annotate(target_type) node.iter.annotate(iter_type) new_env = env.copy() new_env[node.target.name] = target_type new_non_generic = non_generic.copy() new_non_generic.add(target_type) return node.annotate(analyse(node.body, new_env, new_non_generic)) elif isinstance(node, While): test_type = analyse(node.test, env, non_generic) unify(Bool, test_type) node.test.annotate(test_type) return node.annotate(analyse(node.body, env, non_generic)) elif isinstance(node, Lambda): arg_type = TypeVariable() new_env = env.copy() new_env[node.v] = arg_type new_non_generic = non_generic.copy() new_non_generic.add(arg_type) if node.expected: expected_type = find_type(node.expected, env) unify(expected_type, arg_type) result_type = analyse(node.body, new_env, new_non_generic) if node.return_expected: expected_type = find_type(node.return_expected, env) unify(expected_type, result_type) node.a_return_type = result_type return node.annotate(Function(arg_type, result_type)) elif isinstance(node, LambdaNoArgs): return node.annotate(analyse(node.body, env, non_generic)) elif isinstance(node, aList): if not node.items: item_type = TypeVariable() else: item_type = find_type(node.items[0], env) node.items[0].annotate(item_type) for j in node.items[1:]: unify(item_type, find_type(j, env)) j.annotate(item_type) return node.annotate(List(item_type)) elif isinstance(node, Let): defn_type = analyse(node.defn, env, non_generic) new_env = env.copy() if node.v in new_env: if isinstance(new_env[node.v], Function): new_env[node.v] = Union(new_env[node.v], defn_type) elif isinstance(new_env[node.v], Union): new_env[node.v].types.append(defn_type) else: new_env[node.v] = defn_type node.defn.annotate(new_env[node.v]) return node.annotate(analyse(node.body, new_env, non_generic)) elif isinstance(node, Letrec): new_type = TypeVariable() new_env = env.copy() new_env[node.v] = new_type new_non_generic = non_generic.copy() new_non_generic.add(new_type) defn_type = analyse(node.defn, new_env, new_non_generic) unify(new_type, defn_type) node.defn.annotate(defn_type) return node.annotate(analyse(node.body, new_env, non_generic)) assert 0, "Unhandled syntax node {0}".format(node) def find_type(expected, env): if isinstance(expected, Function): for i in range(len(expected.types)): expected.types[i] = find_type(expected.types[i], env) return expected elif isinstance(expected, Union): for i in range(len(expected.types)): expected.types[i] = find_type(expected.types[i], env) return expected elif isinstance(expected, List): expected.types[0] = find_type(expected.types[0], env) return expected elif isinstance(expected, Ident): return getType(expected, env, set()) elif isinstance(expected, TypeOperator) and not expected.types: return env.get(expected.name, expected) elif isinstance(expected, TypeVariable): return expected def getType(name, env, non_generic): if name in env: type_ = env[name] if isinstance(type_, list): return [fresh(t, non_generic) for t in type_] else: return fresh(type_, non_generic) else: if isinstance(name, Ident): name = name.name types_of_name = { int: Integer, float: Float, bool: Bool } if type(name) in types_of_name: return types_of_name[type(name)] elif len(name) != 0 and name[0] == "'" and name[-1] == "'": return String else: raise ParseError("Undefined symbol {0}".format(name))
MIT License
researchapps/job-maker
helpers/slurm2json.py
keep_going
python
def keep_going(name): skip_these = ['DEFAULT','test'] go_on = True for skip in skip_these: if name.startswith(skip): go_on = False return go_on
A function to filter a string to determine if the calling function should continue. Returns False if the string being checked contains any flag variables.
https://github.com/researchapps/job-maker/blob/53f6af33f209cb751374e9ff4cb3dd1b82fed5c4/helpers/slurm2json.py#L305-L315
import argparse import json import sys import os import re def get_parser(): parser = argparse.ArgumentParser( description="convert slurm.conf to machines.json") parser.add_argument("--input", dest='input', help='''one or more slurm config files, separated by commas.''', type=str, default=None) parser.add_argument("--update", dest='update', help='''Update an already existing machines.json (or other)''', default=False, action='store_true') parser.add_argument("--disclude-part", dest='disclude_part', help='''Partitions to disclude, separated by commas''', type=str, default=None) parser.add_argument("--print", dest='print', help="print to screen instead of saving to machines.json", default=False, action='store_true') parser.add_argument("--quiet", dest='quiet', help='''Suppress all output (other than print)''', default=False, action='store_true') parser.add_argument("--force", dest='force', help="Force overwrite of the output file, if it exists.", default=False, action='store_true') parser.add_argument("--outfile", dest='outfile', help="output json file. Default is machines.json", type=str, default='machines.json') return parser def main(): parser = get_parser() try: args = parser.parse_args() except: sys.exit(0) if args.input == None: parser.print_help() message('Please supply a slurm.conf with --input.', args.quiet) sys.exit(1) if os.path.exists(args.outfile) and args.force is False and args.update is False and args.print is False: message("%s already exists! Use --force to force overwrite." % args.outfile) sys.exit(1) print("Parsing %s, please wait!" % args.input) if args.update is True: if not os.path.exists(args.outfile): message("Cannot find %s. Did you specify the right path?" % args.outfile) sys.exit(1) message("Found %s to update." % args.outfile, args.quiet) machines = read_json(args.outfile) else: machines = dict() disclude_part = None if args.disclude_part is not None: disclude_part = args.disclude_part.split(',') message("%s will not be included." %', '.join(disclude_part),args.quiet) else: message("All partitions will be included.",args.quiet) input_files = args.input.split(',') for input_file in input_files: if not os.path.exists(input_file): message("Cannot find %s. Did you specify the right path?" % input_file) sys.exit(1) cluster = parse_config(config_file=input_file) cluster_names = ",".join(list(cluster.keys())) message('Adding cluster %s' %(cluster_names),args.quiet) if disclude_part is not None: cluster = disclude_partitions(cluster,disclude_part) machines.update(cluster) cluster_names = ",".join(list(machines.keys())) message('Compiling clusters %s' %(cluster_names),args.quiet) if args.print is True: message(json.dumps(machines, indent=4, sort_keys=True)) else: write_json(machines,args.outfile) def message(text,quiet=False): if not quiet: print(text) def unpack_data(data): config = data['config'] nodes = data['nodes'] partitions = data['partitions'] return config,nodes,partitions def pack_data(config,nodes,partitions): return {'config':config, 'nodes':nodes, 'partitions':partitions} def combine_clusters(clusters): machines = dict() for cluster in clusters: for cluster_name,metadata in cluster: machines[cluster_name] = metadata return machines def read_file(file_name, clean=True, join=False): with open(file_name,'r') as filey: content = filey.readlines() if clean is True: content = [c.strip('\n') for c in content if not c.startswith('#') and len(c.strip('\n')) > 0] if join: content = ''.join(content) return content def write_json(json_obj,filename,mode="w"): with open(filename,mode) as filey: filey.writelines(json.dumps(json_obj, indent=4, separators=(',', ': '))) return filename def read_json(filename,mode='r'): with open(filename,mode) as filey: data = json.load(filey) return data def remove_comments(line): return line.rsplit('#',1)[0].strip() def parse_line_multi(line,keepers=None): parsed = dict() lines = line.strip().split('#')[0].split(' ') for line in lines: if len(line) > 0: params = line.split('=') key = params[0] key = "%s%s" %(key[0].capitalize(),key[1:]) value = params[-1].strip() if keepers is not None: if key in keepers: parsed[key] = value else: if key is not "\\": parsed[key] = value return parsed def get_node_variables(): return ["RealMemory", "Gres", "Weight", "Feature", "Default"] def break_range_expressions(node_name): parts = list(node_name) current = '' finished = [] opened = False for c in range(len(parts)): part = parts[c] if part == '{': if len(current) > 0: finished.append(current) opened = True current='{' elif part == '}': if len(current) > 0: finished.append("%s}" %current) current='' opened = False else: current = "%s%s" %(current,part) if opened: current = "%s}" %(current) if current not in finished and len(current)>0: finished.append(current) return finished def parse_single_node(node_name): parts = break_range_expressions(node_name) options = [] for part in parts: node_options = [] if not re.search("^{|}$",part): options.append([part]) else: node_ranges = re.findall("[0-9]+-[0-9]+",part) node_lists = re.findall("[0-9]+,[0-9]+",part) for node_range in node_ranges: start,end = [int(x) for x in node_range.split('-')] node_options += [int(x) for x in range(start,end+1)] for node_list in node_lists: node_options += [int(x) for x in node_list.split(',')] options.append(node_options) final_options = options.pop(0) while len(options) > 0: option_set = options.pop(0) new_options = [] for final_option in final_options: for option in option_set: new_options.append("%s%s" %(final_option,option)) final_options = new_options return final_options def parse_node_names(line): new_nodes = [] nodelist = re.sub("\\\\| ","", line) nodelist = nodelist.replace('[','{').replace(']','}') nodelist = re.split(',\s*(?![^{}]*\})', nodelist) for node_name in nodelist: contenders = [x for x in parse_single_node(node_name) if x not in new_nodes] new_nodes = new_nodes + contenders return list(set(new_nodes))
MIT License
facebookresearch/d2go
d2go/data/extended_coco.py
InMemoryCOCO.__init__
python
def __init__(self, loaded_json): self.dataset = loaded_json self.anns = {} self.cats = {} self.imgs = {} self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list) self.createIndex()
In this in-memory version of COCO we don't load json from the file, but direclty use a loaded_json instead. This approach improves both robustness and efficiency, as when we convert from other formats to COCO format, we don't need to save and re-load the json again.
https://github.com/facebookresearch/d2go/blob/bfc08c534859358a5ee87d1091e2b5d661c937e7/d2go/data/extended_coco.py#L30-L43
import json import logging import shlex import subprocess from collections import defaultdict import detectron2.utils.comm as comm from detectron2.data import MetadataCatalog from detectron2.structures import BoxMode from pycocotools.coco import COCO from .cache_util import _cache_json_file try: from virtual_fs import virtual_os as os from virtual_fs.virtual_io import open except ImportError: import os logger = logging.getLogger(__name__) class InMemoryCOCO(COCO):
Apache License 2.0
dlhub-argonne/dlhub_sdk
dlhub_sdk/models/servables/__init__.py
BaseServableModel.register_function
python
def register_function(self, name, inputs, outputs, parameters=None, method_details=None): if method_details is None: method_details = {} if parameters is None: parameters = {} self.servable.methods[name] = MethodMetadata.parse_obj({ 'input': inputs, 'output': outputs, 'parameters': parameters, 'method_details': method_details }) return self
Registers a new function to this servable See :code:`compose_argument_type` utility function for how to define the inputs and outputs to this function. Args: name (string): Name of the function (e.g., "run") inputs (dict): Description of inputs to the function outputs (dict): Description of the outputs of the function parameters (dict): Any additional parameters for the function and their default values method_details (dict): Any options used when constructing a shim to run this function.
https://github.com/dlhub-argonne/dlhub_sdk/blob/3e5c672a48c433717237241edcd7da3cf1333a13/dlhub_sdk/models/servables/__init__.py#L87-L115
from typing import Dict, Optional, List, Any, Union from pydantic import BaseModel, Field from dlhub_sdk.models import BaseMetadataModel class ArgumentTypeMetadata(BaseModel): type: str = Field(None, help="Type of the argument") description: Optional[str] = Field(None, help="Description of the argument") shape: Optional[List[Union[None, int]]] = None python_type: Optional[str] = None item_type: Optional['ArgumentTypeMetadata'] = None element_types: Optional[List['ArgumentTypeMetadata']] = None properties: Optional[Dict[str, 'ArgumentTypeMetadata']] = None ArgumentTypeMetadata.update_forward_refs() class MethodMetadata(BaseModel): input: ArgumentTypeMetadata = Field(..., help="Description of the method inputs") output: ArgumentTypeMetadata = Field(..., help="Description of the method outputs") parameters: Dict[str, Any] = Field(default_factory=dict, help="Description of method runtime parameters") method_details: Dict = Field(default_factory=dict, help="Options used to construct the method in DLHub.") class ServableMetadata(BaseModel): type: Optional[str] = Field(None, help="Type of the servable. Meant to be human readable") shim: Optional[str] = Field(None, help="Name of the home_run shim used to run a servable.") model_type: Optional[str] = Field(None, help="Simple description of the type of a machine learning model") model_summary: Optional[str] = Field(None, help="Longer-form description of a model.") methods: Dict[str, MethodMetadata] = Field(default_factory=dict, help="Description of each method for the servable") options: Optional[Dict] = Field(default_factory=dict, help="Settings used to construct the servable") class Config: extra = 'allow' class BaseServableModel(BaseMetadataModel): servable: ServableMetadata = Field(ServableMetadata, help="Metadata describing how to construct and run a servable") def __init__(self): super(BaseServableModel, self).__init__() self.datacite.resourceType = {'resourceTypeGeneral': 'InteractiveResource'} self.dlhub.type = 'servable' self.servable = ServableMetadata(shim=self._get_handler(), type=self._get_type()) def _get_handler(self): raise NotImplementedError() def _get_type(self): raise NotImplementedError()
Apache License 2.0
protothis/python-synology
src/synology_dsm/api/core/upgrade.py
SynoCoreUpgrade.update_available
python
def update_available(self): return self._data.get("available")
Gets available update info.
https://github.com/protothis/python-synology/blob/645b818be2013231ac126c6962d2f9092a5c3aae/src/synology_dsm/api/core/upgrade.py#L22-L24
class SynoCoreUpgrade: API_KEY = "SYNO.Core.Upgrade" API_SERVER_KEY = API_KEY + ".Server" def __init__(self, dsm): self._dsm = dsm self._data = {} def update(self): raw_data = self._dsm.get(self.API_SERVER_KEY, "check") if raw_data: self._data = raw_data["data"].get("update", raw_data["data"]) @property
MIT License
numba/numba
numba/core/byteflow.py
TraceRunner.op_SLICE_2
python
def op_SLICE_2(self, state, inst): tos = state.pop() tos1 = state.pop() res = state.make_temp() slicevar = state.make_temp() indexvar = state.make_temp() nonevar = state.make_temp() state.append( inst, base=tos1, stop=tos, res=res, slicevar=slicevar, indexvar=indexvar, nonevar=nonevar, ) state.push(res)
TOS = TOS1[:TOS]
https://github.com/numba/numba/blob/8d4559a83b7b12da9121c030b8e3780874204a34/numba/core/byteflow.py#L395-L414
from pprint import pformat import logging from collections import namedtuple, defaultdict, deque from functools import total_ordering from numba.core.utils import UniqueDict, PYVERSION from numba.core.controlflow import NEW_BLOCKERS, CFGraph from numba.core.ir import Loc from numba.core.errors import UnsupportedError _logger = logging.getLogger(__name__) _EXCEPT_STACK_OFFSET = 6 _FINALLY_POP = _EXCEPT_STACK_OFFSET if PYVERSION >= (3, 8) else 1 _NO_RAISE_OPS = frozenset({ 'LOAD_CONST', }) @total_ordering class BlockKind(object): _members = frozenset({ 'LOOP', 'TRY', 'EXCEPT', 'FINALLY', 'WITH', 'WITH_FINALLY', }) def __init__(self, value): assert value in self._members self._value = value def __hash__(self): return hash((type(self), self._value)) def __lt__(self, other): if isinstance(other, BlockKind): return self._value < other._value else: raise TypeError('cannot compare to {!r}'.format(type(other))) def __eq__(self, other): if isinstance(other, BlockKind): return self._value == other._value else: raise TypeError('cannot compare to {!r}'.format(type(other))) def __repr__(self): return "BlockKind({})".format(self._value) class _lazy_pformat(object): def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs def __str__(self): return pformat(*self.args, **self.kwargs) class Flow(object): def __init__(self, bytecode): _logger.debug("bytecode dump:\n%s", bytecode.dump()) self._bytecode = bytecode self.block_infos = UniqueDict() def run(self): firststate = State(bytecode=self._bytecode, pc=0, nstack=0, blockstack=()) runner = TraceRunner(debug_filename=self._bytecode.func_id.filename) runner.pending.append(firststate) first_encounter = UniqueDict() while runner.pending: _logger.debug("pending: %s", runner.pending) state = runner.pending.popleft() if state not in runner.finished: _logger.debug("stack: %s", state._stack) first_encounter[state.pc_initial] = state while True: runner.dispatch(state) if state.has_terminated(): break elif (state.has_active_try() and state.get_inst().opname not in _NO_RAISE_OPS): state.fork(pc=state.get_inst().next) tryblk = state.get_top_block('TRY') state.pop_block_and_above(tryblk) nstack = state.stack_depth kwargs = {} if nstack > tryblk['entry_stack']: kwargs['npop'] = nstack - tryblk['entry_stack'] handler = tryblk['handler'] kwargs['npush'] = { BlockKind('EXCEPT'): _EXCEPT_STACK_OFFSET, BlockKind('FINALLY'): _FINALLY_POP }[handler['kind']] kwargs['extra_block'] = handler state.fork(pc=tryblk['end'], **kwargs) break else: state.advance_pc() if self._is_implicit_new_block(state): self._guard_with_as(state) state.split_new_block() break _logger.debug("end state. edges=%s", state.outgoing_edges) runner.finished.add(state) out_states = state.get_outgoing_states() runner.pending.extend(out_states) self._build_cfg(runner.finished) self._prune_phis(runner) for state in sorted(runner.finished, key=lambda x: x.pc_initial): self.block_infos[state.pc_initial] = si = adapt_state_infos(state) _logger.debug("block_infos %s:\n%s", state, si) def _build_cfg(self, all_states): graph = CFGraph() for state in all_states: b = state.pc_initial graph.add_node(b) for state in all_states: for edge in state.outgoing_edges: graph.add_edge(state.pc_initial, edge.pc, 0) graph.set_entry_point(0) graph.process() self.cfgraph = graph def _prune_phis(self, runner): _logger.debug("Prune PHIs".center(60, '-')) def get_used_phis_per_state(): used_phis = defaultdict(set) phi_set = set() for state in runner.finished: used = set(state._used_regs) phis = set(state._phis) used_phis[state] |= phis & used phi_set |= phis return used_phis, phi_set def find_use_defs(): defmap = {} phismap = defaultdict(set) for state in runner.finished: for phi, rhs in state._outgoing_phis.items(): if rhs not in phi_set: defmap[phi] = state phismap[phi].add((rhs, state)) _logger.debug("defmap: %s", _lazy_pformat(defmap)) _logger.debug("phismap: %s", _lazy_pformat(phismap)) return defmap, phismap def propagate_phi_map(phismap): blacklist = defaultdict(set) while True: changing = False for phi, defsites in sorted(list(phismap.items())): for rhs, state in sorted(list(defsites)): if rhs in phi_set: defsites |= phismap[rhs] blacklist[phi].add((rhs, state)) to_remove = blacklist[phi] if to_remove & defsites: defsites -= to_remove changing = True _logger.debug("changing phismap: %s", _lazy_pformat(phismap)) if not changing: break def apply_changes(used_phis, phismap): keep = {} for state, used_set in used_phis.items(): for phi in used_set: keep[phi] = phismap[phi] _logger.debug("keep phismap: %s", _lazy_pformat(keep)) new_out = defaultdict(dict) for phi in keep: for rhs, state in keep[phi]: new_out[state][phi] = rhs _logger.debug("new_out: %s", _lazy_pformat(new_out)) for state in runner.finished: state._outgoing_phis.clear() state._outgoing_phis.update(new_out[state]) used_phis, phi_set = get_used_phis_per_state() _logger.debug("Used_phis: %s", _lazy_pformat(used_phis)) defmap, phismap = find_use_defs() propagate_phi_map(phismap) apply_changes(used_phis, phismap) _logger.debug("DONE Prune PHIs".center(60, '-')) def _is_implicit_new_block(self, state): inst = state.get_inst() if inst.offset in self._bytecode.labels: return True elif inst.opname in NEW_BLOCKERS: return True else: return False def _guard_with_as(self, state): current_inst = state.get_inst() if current_inst.opname == "SETUP_WITH": next_op = self._bytecode[current_inst.next].opname if next_op != "POP_TOP": msg = ("The 'with (context manager) as " "(variable):' construct is not " "supported.") raise UnsupportedError(msg) class TraceRunner(object): def __init__(self, debug_filename): self.debug_filename = debug_filename self.pending = deque() self.finished = set() def get_debug_loc(self, lineno): return Loc(self.debug_filename, lineno) def dispatch(self, state): inst = state.get_inst() _logger.debug("dispatch pc=%s, inst=%s", state._pc, inst) _logger.debug("stack %s", state._stack) fn = getattr(self, "op_{}".format(inst.opname), None) if fn is not None: fn(state, inst) else: msg = "Use of unsupported opcode (%s) found" % inst.opname raise UnsupportedError(msg, loc=self.get_debug_loc(inst.lineno)) def op_NOP(self, state, inst): state.append(inst) def op_FORMAT_VALUE(self, state, inst): if inst.arg != 0: msg = "format spec in f-strings not supported yet" raise UnsupportedError(msg, loc=self.get_debug_loc(inst.lineno)) value = state.pop() strvar = state.make_temp() res = state.make_temp() state.append(inst, value=value, res=res, strvar=strvar) state.push(res) def op_BUILD_STRING(self, state, inst): count = inst.arg strings = list(reversed([state.pop() for _ in range(count)])) if count == 0: tmps = [state.make_temp()] else: tmps = [state.make_temp() for _ in range(count - 1)] state.append(inst, strings=strings, tmps=tmps) state.push(tmps[-1]) def op_POP_TOP(self, state, inst): state.pop() def op_LOAD_GLOBAL(self, state, inst): res = state.make_temp() state.append(inst, res=res) state.push(res) def op_LOAD_DEREF(self, state, inst): res = state.make_temp() state.append(inst, res=res) state.push(res) def op_LOAD_CONST(self, state, inst): res = state.make_temp("const") state.push(res) state.append(inst, res=res) def op_LOAD_ATTR(self, state, inst): item = state.pop() res = state.make_temp() state.append(inst, item=item, res=res) state.push(res) def op_LOAD_FAST(self, state, inst): name = state.get_varname(inst) res = state.make_temp(name) state.append(inst, res=res) state.push(res) def op_DELETE_FAST(self, state, inst): state.append(inst) def op_DELETE_ATTR(self, state, inst): target = state.pop() state.append(inst, target=target) def op_STORE_ATTR(self, state, inst): target = state.pop() value = state.pop() state.append(inst, target=target, value=value) def op_STORE_DEREF(self, state, inst): value = state.pop() state.append(inst, value=value) def op_STORE_FAST(self, state, inst): value = state.pop() state.append(inst, value=value) def op_SLICE_1(self, state, inst): tos = state.pop() tos1 = state.pop() res = state.make_temp() slicevar = state.make_temp() indexvar = state.make_temp() nonevar = state.make_temp() state.append( inst, base=tos1, start=tos, res=res, slicevar=slicevar, indexvar=indexvar, nonevar=nonevar, ) state.push(res)
BSD 2-Clause Simplified License
vdmitriyev/services-to-wordcloud
services-to-wordcloud/helper_wordcloud.py
WordCloudHelper.__init__
python
def __init__(self, work_dir=None): self.dir_helper = DirectoryHelper(work_dir) self.save_dir = self.dir_helper.work_dir self.dir_helper.prepare_working_directory() print '[i] working directory prepared'
(obj, str) -> None Initializing the class.
https://github.com/vdmitriyev/services-to-wordcloud/blob/45b06aec1735b73e22b554f2f7a98bb1c46925e4/services-to-wordcloud/helper_wordcloud.py#L149-L163
__author__ = 'Viktor Dmitriyev' __copyright__ = 'Copyright 2015, Viktor Dmitriyev' __credits__ = ['Viktor Dmitriyev'] __license__ = 'MIT' __version__ = '1.1.0' __maintainer__ = '-' __email__ = '' __status__ = 'dev' __date__ = '14.02.2015' __description__ = 'Main class and some helper class for the word cloud' import os import pandas as pd import matplotlib.pyplot as plt from wordcloud import WordCloud, STOPWORDS from helper_directory import DirectoryHelper class ServiceToWordCloud(object): def __init__(self, data_file, data_folder, save_directory=None): print '[i] initializing class' if save_directory is None: save_directory = 'generated-noname-service' print '[i] generated pngs will be saved inside "{}"'.format(save_directory) self.wc_helper = WordCloudHelper(save_directory) print '[i] initialing helper class' self.STOPWORDS = STOPWORDS print '[i] stopwords loaded' self.df = DataLoader().localdata(data_file, data_folder) print '[i] data loaded' self.config_stopwords() print '[i] stopwords configured' def config_stopwords(self, more_stopwords=None): if more_stopwords is not None: self.STOPWORDS = STOPWORDS.union(more_stopwords) def wrangle_data(self, df_field): assert df_field is not None, 'df_field should not be None\ncheck variable or override wrangle_data() method' words = ' '.join(self.df[df_field]) cleaned_collection = ' '.join([word for word in words.split()]) self.words = cleaned_collection print '[i] data formatted' def generate_word_cloud(self, fonts, masks, name_prefix='some-wordcloud', bg_color='white'): if name_prefix is None: name_prefix = 'some-wordcloud' BASE_FOLDER = self.wc_helper.save_dir STOPWORDS = self.STOPWORDS print BASE_FOLDER from scipy.misc import imread for mask_name in masks: _mask_file = imread(masks[mask_name], flatten=True) _mask_width = len(_mask_file[0]) + 1 _mask_height = len(_mask_file) + 1 for font_name in fonts: _font_file = fonts[font_name] _img_name = '%s-%s-%s-%s' % (str(name_prefix), str(font_name), str(mask_name), str(bg_color)) wordcloud = WordCloud( font_path=_font_file, stopwords=STOPWORDS, background_color=bg_color, width=_mask_width, height=_mask_height, mask=_mask_file ).generate(self.words) plt.imshow(wordcloud) plt.axis('off') plt.savefig(BASE_FOLDER + _img_name, dpi=300) print '[i] image %s.png was generated ' % _img_name def process(self, service_name=None, fonts=None, masks=None): if service_name is None: service_name = 'noname-service-wordcloud' if fonts is None: fonts = self.wc_helper.load_fonts() else: fonts = self.wc_helper.load_fonts(fonts) if masks is None: masks = self.wc_helper.load_masks() else: masks = self.wc_helper.load_masks(masks) self.generate_word_cloud(fonts, masks, name_prefix=service_name) self.generate_word_cloud( fonts, masks, name_prefix=service_name, bg_color='black') class WordCloudHelper():
MIT License
blurstudio/cross3d
cross3d/abstract/abstractsceneanimationcontroller.py
AbstractSceneAnimationController._createNativeKeyAt
python
def _createNativeKeyAt(self, time): return None
Creates a new key at the inputed time :type time: float :return: nativeKey or None
https://github.com/blurstudio/cross3d/blob/277968d1227de740fc87ef61005c75034420eadf/cross3d/abstract/abstractsceneanimationcontroller.py#L22-L29
import cross3d from cross3d import SceneWrapper, FCurve, FrameRange, abstractmethod from cross3d.constants import TangentType, ExtrapolationType class AbstractSceneAnimationController(SceneWrapper): @abstractmethod
MIT License
purestorage-openconnect/py-pure-client
pypureclient/flashblade/FB_2_1/api/directory_services_api.py
DirectoryServicesApi.api21_directory_services_get_with_http_info
python
def api21_directory_services_get_with_http_info( self, continuation_token=None, filter=None, ids=None, limit=None, names=None, offset=None, sort=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if ids is not None: if not isinstance(ids, list): ids = [ids] if names is not None: if not isinstance(names, list): names = [names] if sort is not None: if not isinstance(sort, list): sort = [sort] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if 'limit' in params and params['limit'] < 1: raise ValueError("Invalid value for parameter `limit` when calling `api21_directory_services_get`, must be a value greater than or equal to `1`") if 'offset' in params and params['offset'] < 0: raise ValueError("Invalid value for parameter `offset` when calling `api21_directory_services_get`, must be a value greater than or equal to `0`") collection_formats = {} path_params = {} query_params = [] if 'continuation_token' in params: query_params.append(('continuation_token', params['continuation_token'])) if 'filter' in params: query_params.append(('filter', params['filter'])) if 'ids' in params: query_params.append(('ids', params['ids'])) collection_formats['ids'] = 'csv' if 'limit' in params: query_params.append(('limit', params['limit'])) if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if 'offset' in params: query_params.append(('offset', params['offset'])) if 'sort' in params: query_params.append(('sort', params['sort'])) collection_formats['sort'] = 'csv' header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = ['AuthorizationHeader'] return self.api_client.call_api( '/api/2.1/directory-services', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='DirectoryServiceGetResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, )
GET directory-services List directory service configuration information for the array. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.api21_directory_services_get_with_http_info(async_req=True) >>> result = thread.get() :param str continuation_token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result. :param str filter: Exclude resources that don't match the specified criteria. :param list[str] ids: A comma-separated list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters. :param int limit: Limit the size of the response to the specified number of resources. A `limit` of `0` can be used to get the number of resources without getting all of the resources. It will be returned in the `total_item_count` field. If a client asks for a page size larger than the maximum number, the request is still valid. In that case the server just returns the maximum number of items, disregarding the client's page size request. :param list[str] names: A comma-separated list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned. :param int offset: The offset of the first resource to return from a collection. :param list[str] sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name). NOTE: If you provide a sort you will not get a `continuation_token` in the response. :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. :param bool _return_http_data_only: Returns only data field. :param bool _preload_content: Response is converted into objects. :param int _request_timeout: Total request timeout in seconds. It can also be a tuple of (connection time, read time) timeouts. :return: DirectoryServiceGetResponse If the method is called asynchronously, returns the request thread.
https://github.com/purestorage-openconnect/py-pure-client/blob/2d9fdef0b73321cea9613e7d1eb881b42845099b/pypureclient/flashblade/FB_2_1/api/directory_services_api.py#L29-L143
from __future__ import absolute_import import re import six from typing import List, Optional from .. import models class DirectoryServicesApi(object): def __init__(self, api_client): self.api_client = api_client
BSD 2-Clause Simplified License
dragonfly/dragonfly
dragonfly/nn/unittest_nn_modifier_class.py
networks_are_equal
python
def networks_are_equal(net1, net2, false_if_net1_is_net2=True): is_true = True for key in net1.__dict__.keys(): val1 = net1.__dict__[key] val2 = net2.__dict__[key] is_true = True if isinstance(val1, dict): if false_if_net1_is_net2: is_true = is_true and (val1 is not val2) for val_key in val1.keys(): is_true = is_true and np.all(val1[val_key] == val2[val_key]) elif hasattr(val1, '__iter__'): if false_if_net1_is_net2: is_true = is_true and (val1 is not val2) is_true = is_true and np.all(val1 == val2) else: is_true = is_true and val1 == val2 if not is_true: return is_true return is_true
Returns true if both net1 and net2 are equal. If any part of net1 is copied onto net2, then the output will be false if false_if_net1_is_net2 is True (default).
https://github.com/dragonfly/dragonfly/blob/a579b5eadf452e23b07d4caf27b402703b0012b7/dragonfly/nn/unittest_nn_modifier_class.py#L23-L46
from copy import deepcopy import numpy as np import os from shutil import rmtree import unittest from . import nn_domains from . import nn_modifiers from .neural_network import NeuralNetwork from .nn_visualise import visualise_nn from .unittest_neural_network import generate_cnn_architectures, generate_mlp_architectures from ..utils.base_test_class import BaseTestClass, execute_tests
MIT License
googleapis/synthtool
autosynth/git.py
patch_merge
python
def patch_merge( branch_name: str, patch_file_path: str, git_repo_dir: str = None ) -> None: with open(patch_file_path, "wb+") as patch_file: executor.check_call( ["git", "diff", "--binary", "HEAD", branch_name], stdout=patch_file, cwd=git_repo_dir, ) if os.stat(patch_file_path).st_size: executor.check_call(["git", "apply", patch_file_path], cwd=git_repo_dir)
Merges a branch via `git diff | git apply`. Does not commit changes. Modifies files only. Arguments: branch_name {str} -- The other branch to merge into this one. patch_file_path {str} -- The path where the patch file will be (over)written. Keyword Arguments: git_repo_dir {str} -- The repo directory (default: current working directory)
https://github.com/googleapis/synthtool/blob/d4ff3cd9a9b2567cc00ab67290eeb89992b20318/autosynth/git.py#L133-L153
import os import subprocess import typing import pathlib from autosynth import executor GLOBAL_GITIGNORE = """ __pycache__/ *.py[cod] *$py.class """ GLOBAL_GITIGNORE_FILE = os.path.expanduser("~/.autosynth-gitignore") def configure_git(user: str, email: str) -> None: with open(GLOBAL_GITIGNORE_FILE, "w") as fh: fh.write(GLOBAL_GITIGNORE) executor.check_call( ["git", "config", "--global", "core.excludesfile", GLOBAL_GITIGNORE_FILE] ) executor.check_call(["git", "config", "user.name", user]) executor.check_call(["git", "config", "user.email", email]) executor.check_call(["git", "config", "push.default", "simple"]) def setup_branch(branch: str) -> None: executor.check_call(["git", "branch", "-f", branch]) executor.check_call(["git", "checkout", branch]) def get_last_commit_to_file(file_path: str) -> str: parent_dir = pathlib.Path(file_path).parent proc = executor.run( ["git", "log", "--pretty=format:%H", "-1", "--no-decorate", file_path], stdout=subprocess.PIPE, universal_newlines=True, cwd=parent_dir, ) proc.check_returncode() return proc.stdout.strip() def get_commit_shas_since(sha: str, dir: str) -> typing.List[str]: proc = executor.run( ["git", "log", f"{sha}..HEAD", "--pretty=%H", "--no-decorate"], universal_newlines=True, stdout=subprocess.PIPE, cwd=dir, ) proc.check_returncode() shas = proc.stdout.split() shas.append(sha) shas.reverse() return shas def commit_all_changes(message: str) -> int: executor.check_call(["git", "add", "-A"]) status = executor.run( ["git", "status", "--porcelain"], universal_newlines=True, stdout=subprocess.PIPE, check=True, ).stdout.strip() if status: executor.check_call(["git", "commit", "-m", message]) return 1 else: return 0 def push_changes(branch): executor.check_call(["git", "push", "--force", "origin", branch]) def get_repo_root_dir(repo_path: str) -> str: path = pathlib.Path(repo_path) if not path.is_dir(): path = path.parent proc = executor.run( ["git", "rev-parse", "--show-toplevel"], stdout=subprocess.PIPE, universal_newlines=True, cwd=str(path), ) proc.check_returncode() return proc.stdout.strip()
Apache License 2.0
yandex-cloud/python-sdk
yandex/cloud/loadbalancer/v1/target_group_service_pb2_grpc.py
TargetGroupServiceServicer.Create
python
def Create(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
Creates a target group in the specified folder and adds the specified targets to it.
https://github.com/yandex-cloud/python-sdk/blob/6ddaaaf0ad01d8fc36cb72957f70a6e7943a5ce7/yandex/cloud/loadbalancer/v1/target_group_service_pb2_grpc.py#L80-L85
import grpc from yandex.cloud.loadbalancer.v1 import target_group_pb2 as yandex_dot_cloud_dot_loadbalancer_dot_v1_dot_target__group__pb2 from yandex.cloud.loadbalancer.v1 import target_group_service_pb2 as yandex_dot_cloud_dot_loadbalancer_dot_v1_dot_target__group__service__pb2 from yandex.cloud.operation import operation_pb2 as yandex_dot_cloud_dot_operation_dot_operation__pb2 class TargetGroupServiceStub(object): def __init__(self, channel): self.Get = channel.unary_unary( '/yandex.cloud.loadbalancer.v1.TargetGroupService/Get', request_serializer=yandex_dot_cloud_dot_loadbalancer_dot_v1_dot_target__group__service__pb2.GetTargetGroupRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_loadbalancer_dot_v1_dot_target__group__pb2.TargetGroup.FromString, ) self.List = channel.unary_unary( '/yandex.cloud.loadbalancer.v1.TargetGroupService/List', request_serializer=yandex_dot_cloud_dot_loadbalancer_dot_v1_dot_target__group__service__pb2.ListTargetGroupsRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_loadbalancer_dot_v1_dot_target__group__service__pb2.ListTargetGroupsResponse.FromString, ) self.Create = channel.unary_unary( '/yandex.cloud.loadbalancer.v1.TargetGroupService/Create', request_serializer=yandex_dot_cloud_dot_loadbalancer_dot_v1_dot_target__group__service__pb2.CreateTargetGroupRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString, ) self.Update = channel.unary_unary( '/yandex.cloud.loadbalancer.v1.TargetGroupService/Update', request_serializer=yandex_dot_cloud_dot_loadbalancer_dot_v1_dot_target__group__service__pb2.UpdateTargetGroupRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString, ) self.Delete = channel.unary_unary( '/yandex.cloud.loadbalancer.v1.TargetGroupService/Delete', request_serializer=yandex_dot_cloud_dot_loadbalancer_dot_v1_dot_target__group__service__pb2.DeleteTargetGroupRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString, ) self.AddTargets = channel.unary_unary( '/yandex.cloud.loadbalancer.v1.TargetGroupService/AddTargets', request_serializer=yandex_dot_cloud_dot_loadbalancer_dot_v1_dot_target__group__service__pb2.AddTargetsRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString, ) self.RemoveTargets = channel.unary_unary( '/yandex.cloud.loadbalancer.v1.TargetGroupService/RemoveTargets', request_serializer=yandex_dot_cloud_dot_loadbalancer_dot_v1_dot_target__group__service__pb2.RemoveTargetsRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString, ) self.ListOperations = channel.unary_unary( '/yandex.cloud.loadbalancer.v1.TargetGroupService/ListOperations', request_serializer=yandex_dot_cloud_dot_loadbalancer_dot_v1_dot_target__group__service__pb2.ListTargetGroupOperationsRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_loadbalancer_dot_v1_dot_target__group__service__pb2.ListTargetGroupOperationsResponse.FromString, ) class TargetGroupServiceServicer(object): def Get(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def List(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
MIT License
smartbgp/yabgp
yabgp/message/attribute/localpref.py
LocalPreference.construct
python
def construct(cls, value): try: return struct.pack('!B', cls.FLAG) + struct.pack('!B', cls.ID) + struct.pack('!B', 4) + struct.pack('!I', value) except Exception: raise excep.UpdateMessageError( sub_error=bgp_cons.ERR_MSG_UPDATE_ATTR_LEN, data='')
encode bgp local preference attribute :param value: interger value
https://github.com/smartbgp/yabgp/blob/f073633a813899cd9b413bc28ea2f7737deee141/yabgp/message/attribute/localpref.py#L51-L62
import struct from yabgp.message.attribute import Attribute from yabgp.message.attribute import AttributeID from yabgp.message.attribute import AttributeFlag from yabgp.common import constants as bgp_cons from yabgp.common import exception as excep class LocalPreference(Attribute): ID = AttributeID.LOCAL_PREF FLAG = AttributeFlag.TRANSITIVE MULTIPLE = False @classmethod def parse(cls, value): try: return struct.unpack('!I', value)[0] except: raise excep.UpdateMessageError( sub_error=bgp_cons.ERR_MSG_UPDATE_ATTR_LEN, data=value) @classmethod
Apache License 2.0
wangheda/youtube-8m
youtube-8m-wangheda/all_frame_models/progressive_attention_lstm_model.py
ProgressiveAttentionLstmModel.create_model
python
def create_model(self, model_input, vocab_size, num_frames, **unused_params): lstm_size = int(FLAGS.lstm_cells) number_of_layers = FLAGS.lstm_layers num_attentions = FLAGS.num_attentions print model_input.get_shape().as_list() max_frames = model_input.get_shape().as_list()[1] stacked_cell = tf.contrib.rnn.MultiRNNCell( [ tf.contrib.rnn.BasicLSTMCell( lstm_size, forget_bias=1.0, state_is_tuple=True) for _ in range(number_of_layers) ], state_is_tuple=True) att_cell = tf.contrib.rnn.AttentionCellWrapper(cell = tf.contrib.rnn.BasicLSTMCell( lstm_size, forget_bias=1.0, state_is_tuple=True), attn_length=1, state_is_tuple=True) loss = 0.0 with tf.variable_scope("RNN"): outputs, state = tf.nn.dynamic_rnn(stacked_cell, model_input, sequence_length=num_frames, swap_memory=FLAGS.rnn_swap_memory, dtype=tf.float32) final_memory = tf.concat(map(lambda x: x.c, state), axis = 1) with tf.variable_scope("ATT"): att_outputs, att_state = tf.nn.dynamic_rnn(att_cell, outputs, sequence_length=tf.ones_like(num_frames, dtype=tf.int32)*num_attentions, swap_memory=FLAGS.rnn_swap_memory, dtype=tf.float32) print att_outputs print att_state att_state, _, _ = att_state print att_state att_final_memory = att_state.c final_state = tf.concat([att_final_memory, final_memory], axis = 1) print "final_state", final_state aggregated_model = getattr(video_level_models, FLAGS.video_level_classifier_model) predictions = aggregated_model().create_model( model_input=final_state, original_input=model_input, vocab_size=vocab_size, **unused_params) print predictions return predictions
Creates a model which uses a stack of LSTMs to represent the video. Args: model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of input features. vocab_size: The number of classes in the dataset. num_frames: A vector of length 'batch' which indicates the number of frames for each video (before padding). Returns: A dictionary with a tensor containing the probability predictions of the model in the 'predictions' key. The dimensions of the tensor are 'batch_size' x 'num_classes'.
https://github.com/wangheda/youtube-8m/blob/07e54b387ee027cb58b0c14f5eb7c88cfa516d58/youtube-8m-wangheda/all_frame_models/progressive_attention_lstm_model.py#L15-L79
import sys import models import model_utils import math import numpy as np import video_level_models import tensorflow as tf import utils import tensorflow.contrib.slim as slim from tensorflow import flags FLAGS = flags.FLAGS class ProgressiveAttentionLstmModel(models.BaseModel):
Apache License 2.0
inovex/illuminatio
src/illuminatio/test_generator.py
get_namespace_label_strings
python
def get_namespace_label_strings(namespace_labels, namespaces): return { labels_to_string(namespace_label): [ namespace.metadata.name for namespace in namespaces if namespace.metadata.labels is not None and namespace_label.items() <= namespace.metadata.labels.items() ] for namespace_label in namespace_labels }
Returns a set of all stringified namespace labels
https://github.com/inovex/illuminatio/blob/dd57599ef675451ddbb35225d5c4ee09c70a3b3a/src/illuminatio/test_generator.py#L38-L51
import time from typing import List import kubernetes as k8s from illuminatio.k8s_util import labels_to_string from illuminatio.rule import Rule from illuminatio.test_case import NetworkTestCase from illuminatio.host import ClusterHost, GenericClusterHost from illuminatio.util import rand_port, INVERTED_ATTRIBUTE_PREFIX def _get_other_host_from(connection_targets, rule_namespace): namespace_labels = "namespaceLabels" pod_labels = "podLabels" namespace = "namespace" if namespace_labels in connection_targets and pod_labels in connection_targets: return GenericClusterHost( connection_targets[namespace_labels], connection_targets[pod_labels] ) if namespace in connection_targets and pod_labels in connection_targets: return ClusterHost( connection_targets[namespace], connection_targets[pod_labels] ) if namespace_labels in connection_targets: return GenericClusterHost(connection_targets[namespace_labels], {}) if pod_labels in connection_targets: return ClusterHost(rule_namespace, connection_targets[pod_labels]) if connection_targets == {}: return GenericClusterHost({}, {}) raise ValueError( "Unknown combination of field in connection %s" % connection_targets )
Apache License 2.0
cisco-en-programmability/dnacentersdk
dnacentersdk/api/v2_2_1/devices.py
Devices.threat_detail_count
python
def threat_detail_count(self, endTime=None, isNewThreat=None, limit=None, offset=None, siteId=None, startTime=None, threatLevel=None, threatType=None, headers=None, payload=None, active_validation=True, **request_parameters): check_type(headers, dict) check_type(payload, dict) if headers is not None: if 'Content-Type' in headers: check_type(headers.get('Content-Type'), basestring, may_be_none=False) if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { } _payload = { 'offset': offset, 'limit': limit, 'startTime': startTime, 'endTime': endTime, 'siteId': siteId, 'threatLevel': threatLevel, 'threatType': threatType, 'isNewThreat': isNewThreat, } _payload.update(payload or {}) _payload = dict_from_items_with_values(_payload) if active_validation: self._request_validator('jsd_c7266d89581c9601b79b7304fda3_v2_2_1') .validate(_payload) with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/security/threats/details/count') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.post(endpoint_full_url, params=_params, json=_payload, headers=_headers) else: json_data = self._session.post(endpoint_full_url, params=_params, json=_payload) return self._object_factory('bpm_c7266d89581c9601b79b7304fda3_v2_2_1', json_data)
The details count for the Rogue and aWIPS threats. Args: endTime(integer): Devices's endTime. isNewThreat(boolean): Devices's isNewThreat. limit(integer): Devices's limit. offset(integer): Devices's offset. siteId(list): Devices's siteId (list of strings). startTime(integer): Devices's startTime. threatLevel(list): Devices's threatLevel (list of strings). threatType(list): Devices's threatType (list of strings). headers(dict): Dictionary of HTTP Headers to send with the Request . payload(dict): A JSON serializable Python object to send in the body of the Request. active_validation(bool): Enable/Disable payload validation. Defaults to True. **request_parameters: Additional request parameters (provides support for parameters that may be added in the future). Returns: MyDict: JSON response. Access the object's properties by using the dot notation or the bracket notation. Raises: TypeError: If the parameter types are incorrect. MalformedRequest: If the request body created is invalid. ApiError: If the DNA Center cloud returns an error.
https://github.com/cisco-en-programmability/dnacentersdk/blob/ef2adde6113e7a6acd28a287007eb470fa39d31f/dnacentersdk/api/v2_2_1/devices.py#L75-L174
from __future__ import ( absolute_import, division, print_function, unicode_literals, ) from builtins import * from past.builtins import basestring from ...restsession import RestSession from ...utils import ( check_type, dict_from_items_with_values, apply_path_params, dict_of_str, ) class Devices(object): def __init__(self, session, object_factory, request_validator): check_type(session, RestSession) super(Devices, self).__init__() self._session = session self._object_factory = object_factory self._request_validator = request_validator
MIT License
purestorage-openconnect/py-pure-client
pypureclient/flasharray/FA_2_8/api/policies_api.py
PoliciesApi.api28_policies_members_get_with_http_info
python
def api28_policies_members_get_with_http_info( self, authorization=None, x_request_id=None, continuation_token=None, destroyed=None, filter=None, limit=None, member_ids=None, member_names=None, member_types=None, policy_ids=None, policy_names=None, offset=None, sort=None, total_item_count=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if member_ids is not None: if not isinstance(member_ids, list): member_ids = [member_ids] if member_names is not None: if not isinstance(member_names, list): member_names = [member_names] if member_types is not None: if not isinstance(member_types, list): member_types = [member_types] if policy_ids is not None: if not isinstance(policy_ids, list): policy_ids = [policy_ids] if policy_names is not None: if not isinstance(policy_names, list): policy_names = [policy_names] if sort is not None: if not isinstance(sort, list): sort = [sort] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if 'limit' in params and params['limit'] < 1: raise ValueError("Invalid value for parameter `limit` when calling `api28_policies_members_get`, must be a value greater than or equal to `1`") if 'offset' in params and params['offset'] < 0: raise ValueError("Invalid value for parameter `offset` when calling `api28_policies_members_get`, must be a value greater than or equal to `0`") collection_formats = {} path_params = {} query_params = [] if 'continuation_token' in params: query_params.append(('continuation_token', params['continuation_token'])) if 'destroyed' in params: query_params.append(('destroyed', params['destroyed'])) if 'filter' in params: query_params.append(('filter', params['filter'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'member_ids' in params: query_params.append(('member_ids', params['member_ids'])) collection_formats['member_ids'] = 'csv' if 'member_names' in params: query_params.append(('member_names', params['member_names'])) collection_formats['member_names'] = 'csv' if 'member_types' in params: query_params.append(('member_types', params['member_types'])) collection_formats['member_types'] = 'csv' if 'policy_ids' in params: query_params.append(('policy_ids', params['policy_ids'])) collection_formats['policy_ids'] = 'csv' if 'policy_names' in params: query_params.append(('policy_names', params['policy_names'])) collection_formats['policy_names'] = 'csv' if 'offset' in params: query_params.append(('offset', params['offset'])) if 'sort' in params: query_params.append(('sort', params['sort'])) collection_formats['sort'] = 'csv' if 'total_item_count' in params: query_params.append(('total_item_count', params['total_item_count'])) header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.8/policies/members', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='PolicyMemberGetResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, )
List policy members Displays a list of policy members. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.api28_policies_members_get_with_http_info(async_req=True) >>> result = thread.get() :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`) :param str x_request_id: Supplied by client during request or generated by server. :param str continuation_token: A token used to retrieve the next page of data with some consistency guaranteed. The token is a Base64 encoded value. Set `continuation_token` to the system-generated token taken from the `x-next-token` header field of the response. A query has reached its last page when the response does not include a token. Pagination requires the `limit` and `continuation_token` query parameters. :param bool destroyed: If set to `true`, lists only destroyed objects that are in the eradication pending state. If set to `false`, lists only objects that are not destroyed. For destroyed objects, the time remaining is displayed in milliseconds. :param str filter: Narrows down the results to only the response objects that satisfy the filter criteria. :param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size. :param list[str] member_ids: Performs the operation on the unique member IDs specified. Enter multiple member IDs in comma-separated format. The `member_ids` and `member_names` parameters cannot be provided together. :param list[str] member_names: Performs the operation on the unique member name specified. Examples of members include volumes, hosts, host groups, and directories. Enter multiple names in comma-separated format. For example, `vol01,vol02`. :param list[str] member_types: Performs the operation on the member types specified. The type of member is the full name of the resource endpoint. Valid values include `directories`. Enter multiple member types in comma-separated format. For example, `type01,type02`. :param list[str] policy_ids: Performs the operation on the unique policy IDs specified. Enter multiple policy IDs in comma-separated format. The `policy_ids` and `policy_names` parameters cannot be provided together. :param list[str] policy_names: Performs the operation on the policy names specified. Enter multiple policy names in comma-separated format. For example, `name01,name02`. :param int offset: The starting position based on the results of the query in relation to the full set of response objects returned. :param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values. :param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`. :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. :param bool _return_http_data_only: Returns only data field. :param bool _preload_content: Response is converted into objects. :param int _request_timeout: Total request timeout in seconds. It can also be a tuple of (connection time, read time) timeouts. :return: PolicyMemberGetResponse If the method is called asynchronously, returns the request thread.
https://github.com/purestorage-openconnect/py-pure-client/blob/2d9fdef0b73321cea9613e7d1eb881b42845099b/pypureclient/flasharray/FA_2_8/api/policies_api.py#L157-L311
from __future__ import absolute_import import re import six from typing import List, Optional from .. import models class PoliciesApi(object): def __init__(self, api_client): self.api_client = api_client def api28_policies_get_with_http_info( self, authorization=None, x_request_id=None, continuation_token=None, filter=None, ids=None, limit=None, names=None, offset=None, sort=None, total_item_count=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if ids is not None: if not isinstance(ids, list): ids = [ids] if names is not None: if not isinstance(names, list): names = [names] if sort is not None: if not isinstance(sort, list): sort = [sort] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if 'limit' in params and params['limit'] < 1: raise ValueError("Invalid value for parameter `limit` when calling `api28_policies_get`, must be a value greater than or equal to `1`") if 'offset' in params and params['offset'] < 0: raise ValueError("Invalid value for parameter `offset` when calling `api28_policies_get`, must be a value greater than or equal to `0`") collection_formats = {} path_params = {} query_params = [] if 'continuation_token' in params: query_params.append(('continuation_token', params['continuation_token'])) if 'filter' in params: query_params.append(('filter', params['filter'])) if 'ids' in params: query_params.append(('ids', params['ids'])) collection_formats['ids'] = 'csv' if 'limit' in params: query_params.append(('limit', params['limit'])) if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if 'offset' in params: query_params.append(('offset', params['offset'])) if 'sort' in params: query_params.append(('sort', params['sort'])) collection_formats['sort'] = 'csv' if 'total_item_count' in params: query_params.append(('total_item_count', params['total_item_count'])) header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.8/policies', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='PolicyGetResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, )
BSD 2-Clause Simplified License
jshaffstall/pyphysicssandbox
py2d/FOV.py
Vision.get_vision
python
def get_vision(self, eye, radius, boundary): if self.cached_vision == None or (self.cached_position - eye).get_length_squared() > 1: self.calculate(eye, radius, boundary) return self.cached_vision
Get a vision polygon for a given eye position and boundary Polygon. @type eye: Vector @param eye: The position of the viewer (normally the center of the boundary polygon) @type radius: float @param radius: The maximum vision radius (normally the radius of the boundary polygon) @type boundary: Polygon @param boundary: The boundary polygon that describes the maximal field of vision
https://github.com/jshaffstall/pyphysicssandbox/blob/ad9300ccc1e50f95372c6e0ca7bb0a9cafc839b7/py2d/FOV.py#L59-L73
import functools import py2d.Math class Vision: def __init__(self, obstructors, debug=False): self.set_obstructors(obstructors) self.debug = debug self.debug_points = [] self.debug_linesegs = [] def set_obstructors(self, obstructors): def flatten_list(l): return functools.reduce(lambda x,y: x+y, l) self.obs_points = flatten_list(obstructors) self.obs_segs = flatten_list([ list(zip(strip, strip[1:])) for strip in obstructors ]) self.cached_vision = None self.cached_position = None self.cached_radius = None
MIT License
hustlzp/flask-boost
flask_boost/project/manage.py
live
python
def live(): from livereload import Server server = Server(app) map(server.watch, glob2.glob("application/pages/**/*.*")) map(server.watch, glob2.glob("application/macros/**/*.html")) map(server.watch, glob2.glob("application/static/**/*.*")) server.serve(port=PORT)
Run livereload server
https://github.com/hustlzp/flask-boost/blob/d0308408ebb248dd752b77123b845f8ec637fab2/flask_boost/project/manage.py#L27-L37
import os import glob2 from flask.ext.script import Manager from flask.ext.migrate import Migrate, MigrateCommand from application import create_app from application.models import db PORT = 5000 app = create_app() manager = Manager(app) migrate = Migrate(app, db) manager.add_command('db', MigrateCommand) @manager.command def run(): app.run(port=PORT) @manager.command
MIT License
zalando/patroni
patroni/ctl.py
touch_member
python
def touch_member(config, dcs): p = Postgresql(config['postgresql']) p.set_state('running') p.set_role('master') def restapi_connection_string(config): protocol = 'https' if config.get('certfile') else 'http' connect_address = config.get('connect_address') listen = config['listen'] return '{0}://{1}/patroni'.format(protocol, connect_address or listen) data = { 'conn_url': p.connection_string, 'api_url': restapi_connection_string(config['restapi']), 'state': p.state, 'role': p.role } return dcs.touch_member(data, permanent=True)
Rip-off of the ha.touch_member without inter-class dependencies
https://github.com/zalando/patroni/blob/47ebda0d5d4fc69d41b9160ebc76ac494c06b2bd/patroni/ctl.py#L897-L916
import click import codecs import datetime import dateutil.parser import dateutil.tz import copy import difflib import io import json import logging import os import random import six import subprocess import sys import tempfile import time import yaml from click import ClickException from collections import defaultdict from contextlib import contextmanager from prettytable import ALL, FRAME, PrettyTable from six.moves.urllib_parse import urlparse try: from ydiff import markup_to_pager, PatchStream except ImportError: from cdiff import markup_to_pager, PatchStream from .dcs import get_dcs as _get_dcs from .exceptions import PatroniException from .postgresql import Postgresql from .postgresql.misc import postgres_version_to_int from .utils import cluster_as_json, find_executable, patch_config, polling_loop from .request import PatroniRequest from .version import __version__ CONFIG_DIR_PATH = click.get_app_dir('patroni') CONFIG_FILE_PATH = os.path.join(CONFIG_DIR_PATH, 'patronictl.yaml') DCS_DEFAULTS = {'zookeeper': {'port': 2181, 'template': "zookeeper:\n hosts: ['{host}:{port}']"}, 'exhibitor': {'port': 8181, 'template': "exhibitor:\n hosts: [{host}]\n port: {port}"}, 'consul': {'port': 8500, 'template': "consul:\n host: '{host}:{port}'"}, 'etcd': {'port': 2379, 'template': "etcd:\n host: '{host}:{port}'"}} class PatroniCtlException(ClickException): pass class PatronictlPrettyTable(PrettyTable): def __init__(self, header, *args, **kwargs): PrettyTable.__init__(self, *args, **kwargs) self.__table_header = header self.__hline_num = 0 self.__hline = None def _is_first_hline(self): return self.__hline_num == 0 def _set_hline(self, value): self.__hline = value def _get_hline(self): ret = self.__hline if self._is_first_hline() and self.__table_header: header = self.__table_header[:len(ret) - 2] ret = "".join([ret[0], header, ret[1 + len(header):]]) self.__hline_num += 1 return ret _hrule = property(_get_hline, _set_hline) def parse_dcs(dcs): if dcs is None: return None elif '//' not in dcs: dcs = '//' + dcs parsed = urlparse(dcs) scheme = parsed.scheme port = int(parsed.port) if parsed.port else None if scheme == '': scheme = ([k for k, v in DCS_DEFAULTS.items() if v['port'] == port] or ['etcd'])[0] elif scheme not in DCS_DEFAULTS: raise PatroniCtlException('Unknown dcs scheme: {}'.format(scheme)) default = DCS_DEFAULTS[scheme] return yaml.safe_load(default['template'].format(host=parsed.hostname or 'localhost', port=port or default['port'])) def load_config(path, dcs): from patroni.config import Config if not (os.path.exists(path) and os.access(path, os.R_OK)): if path != CONFIG_FILE_PATH: raise PatroniCtlException('Provided config file {0} not existing or no read rights.' ' Check the -c/--config-file parameter'.format(path)) else: logging.debug('Ignoring configuration file "%s". It does not exists or is not readable.', path) else: logging.debug('Loading configuration from file %s', path) config = Config(path, validator=None).copy() dcs = parse_dcs(dcs) or parse_dcs(config.get('dcs_api')) or {} if dcs: for d in DCS_DEFAULTS: config.pop(d, None) config.update(dcs) return config def store_config(config, path): dir_path = os.path.dirname(path) if dir_path and not os.path.isdir(dir_path): os.makedirs(dir_path) with open(path, 'w') as fd: yaml.dump(config, fd) option_format = click.option('--format', '-f', 'fmt', help='Output format (pretty, tsv, json, yaml)', default='pretty') option_watchrefresh = click.option('-w', '--watch', type=float, help='Auto update the screen every X seconds') option_watch = click.option('-W', is_flag=True, help='Auto update the screen every 2 seconds') option_force = click.option('--force', is_flag=True, help='Do not ask for confirmation at any point') arg_cluster_name = click.argument('cluster_name', required=False, default=lambda: click.get_current_context().obj.get('scope')) option_insecure = click.option('-k', '--insecure', is_flag=True, help='Allow connections to SSL sites without certs') @click.group() @click.option('--config-file', '-c', help='Configuration file', envvar='PATRONICTL_CONFIG_FILE', default=CONFIG_FILE_PATH) @click.option('--dcs', '-d', help='Use this DCS', envvar='DCS') @option_insecure @click.pass_context def ctl(ctx, config_file, dcs, insecure): level = 'WARNING' for name in ('LOGLEVEL', 'PATRONI_LOGLEVEL', 'PATRONI_LOG_LEVEL'): level = os.environ.get(name, level) logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=level) logging.captureWarnings(True) ctx.obj = load_config(config_file, dcs) ctx.obj.setdefault('ctl', {})['insecure'] = ctx.obj.get('ctl', {}).get('insecure') or insecure def get_dcs(config, scope): config.update({'scope': scope, 'patronictl': True}) config.setdefault('name', scope) try: return _get_dcs(config) except PatroniException as e: raise PatroniCtlException(str(e)) def request_patroni(member, method='GET', endpoint=None, data=None): ctx = click.get_current_context() request_executor = ctx.obj.get('__request_patroni') if not request_executor: request_executor = ctx.obj['__request_patroni'] = PatroniRequest(ctx.obj) return request_executor(member, method, endpoint, data) def print_output(columns, rows, alignment=None, fmt='pretty', header=None, delimiter='\t'): if fmt in {'json', 'yaml', 'yml'}: elements = [{k: v for k, v in zip(columns, r) if not header or str(v)} for r in rows] func = json.dumps if fmt == 'json' else format_config_for_editing click.echo(func(elements)) elif fmt in {'pretty', 'tsv', 'topology'}: list_cluster = bool(header and columns and columns[0] == 'Cluster') if list_cluster and 'Tags' in columns: i = columns.index('Tags') for row in rows: if row[i]: row[i] = format_config_for_editing(row[i], fmt != 'pretty').strip() if list_cluster and fmt != 'tsv': columns = columns[1:] if columns else [] rows = [row[1:] for row in rows] if fmt == 'tsv': for r in ([columns] if columns else []) + rows: click.echo(delimiter.join(map(str, r))) else: hrules = ALL if any(any(isinstance(c, six.string_types) and '\n' in c for c in r) for r in rows) else FRAME table = PatronictlPrettyTable(header, columns, hrules=hrules) table.align = 'l' for k, v in (alignment or {}).items(): table.align[k] = v for r in rows: table.add_row(r) click.echo(table) def watching(w, watch, max_count=None, clear=True): if w and not watch: watch = 2 if watch and clear: click.clear() yield 0 if max_count is not None and max_count < 1: return counter = 1 while watch and counter <= (max_count or counter): time.sleep(watch) counter += 1 if clear: click.clear() yield 0 def get_all_members(cluster, role='master'): if role == 'master': if cluster.leader is not None and cluster.leader.name: yield cluster.leader return leader_name = (cluster.leader.member.name if cluster.leader else None) for m in cluster.members: if role == 'any' or role == 'replica' and m.name != leader_name: yield m def get_any_member(cluster, role='master', member=None): members = get_all_members(cluster, role) for m in members: if member is None or m.name == member: return m def get_all_members_leader_first(cluster): leader_name = cluster.leader.member.name if cluster.leader and cluster.leader.member.api_url else None if leader_name: yield cluster.leader.member for member in cluster.members: if member.api_url and member.name != leader_name: yield member def get_cursor(cluster, connect_parameters, role='master', member=None): member = get_any_member(cluster, role=role, member=member) if member is None: return None params = member.conn_kwargs(connect_parameters) params.update({'fallback_application_name': 'Patroni ctl', 'connect_timeout': '5'}) if 'database' in connect_parameters: params['database'] = connect_parameters['database'] else: params.pop('database') import psycopg2 conn = psycopg2.connect(**params) conn.autocommit = True cursor = conn.cursor() if role == 'any': return cursor cursor.execute('SELECT pg_catalog.pg_is_in_recovery()') in_recovery = cursor.fetchone()[0] if in_recovery and role == 'replica' or not in_recovery and role == 'master': return cursor conn.close() return None def get_members(cluster, cluster_name, member_names, role, force, action, ask_confirmation=True): candidates = {m.name: m for m in cluster.members} if not force or role: if not member_names and not candidates: raise PatroniCtlException('{0} cluster doesn\'t have any members'.format(cluster_name)) output_members(cluster, cluster_name) if role: role_names = [m.name for m in get_all_members(cluster, role)] if member_names: member_names = list(set(member_names) & set(role_names)) if not member_names: raise PatroniCtlException('No {0} among provided members'.format(role)) else: member_names = role_names if not member_names and not force: member_names = [click.prompt('Which member do you want to {0} [{1}]?'.format(action, ', '.join(candidates.keys())), type=str, default='')] for member_name in member_names: if member_name not in candidates: raise PatroniCtlException('{0} is not a member of cluster'.format(member_name)) members = [candidates[n] for n in member_names] if ask_confirmation: confirm_members_action(members, force, action) return members def confirm_members_action(members, force, action, scheduled_at=None): if scheduled_at: if not force: confirm = click.confirm('Are you sure you want to schedule {0} of members {1} at {2}?' .format(action, ', '.join([m.name for m in members]), scheduled_at)) if not confirm: raise PatroniCtlException('Aborted scheduled {0}'.format(action)) else: if not force: confirm = click.confirm('Are you sure you want to {0} members {1}?' .format(action, ', '.join([m.name for m in members]))) if not confirm: raise PatroniCtlException('Aborted {0}'.format(action)) @ctl.command('dsn', help='Generate a dsn for the provided member, defaults to a dsn of the master') @click.option('--role', '-r', help='Give a dsn of any member with this role', type=click.Choice(['master', 'replica', 'any']), default=None) @click.option('--member', '-m', help='Generate a dsn for this member', type=str) @arg_cluster_name @click.pass_obj def dsn(obj, cluster_name, role, member): if role is not None and member is not None: raise PatroniCtlException('--role and --member are mutually exclusive options') if member is None and role is None: role = 'master' cluster = get_dcs(obj, cluster_name).get_cluster() m = get_any_member(cluster, role=role, member=member) if m is None: raise PatroniCtlException('Can not find a suitable member') params = m.conn_kwargs() click.echo('host={host} port={port}'.format(**params)) @ctl.command('query', help='Query a Patroni PostgreSQL member') @arg_cluster_name @click.option('--format', 'fmt', help='Output format (pretty, tsv, json, yaml)', default='tsv') @click.option('--file', '-f', 'p_file', help='Execute the SQL commands from this file', type=click.File('rb')) @click.option('--password', help='force password prompt', is_flag=True) @click.option('-U', '--username', help='database user name', type=str) @option_watch @option_watchrefresh @click.option('--role', '-r', help='The role of the query', type=click.Choice(['master', 'replica', 'any']), default=None) @click.option('--member', '-m', help='Query a specific member', type=str) @click.option('--delimiter', help='The column delimiter', default='\t') @click.option('--command', '-c', help='The SQL commands to execute') @click.option('-d', '--dbname', help='database name to connect to', type=str) @click.pass_obj def query( obj, cluster_name, role, member, w, watch, delimiter, command, p_file, password, username, dbname, fmt='tsv', ): if role is not None and member is not None: raise PatroniCtlException('--role and --member are mutually exclusive options') if member is None and role is None: role = 'master' if p_file is not None and command is not None: raise PatroniCtlException('--file and --command are mutually exclusive options') if p_file is None and command is None: raise PatroniCtlException('You need to specify either --command or --file') connect_parameters = {} if username: connect_parameters['username'] = username if password: connect_parameters['password'] = click.prompt('Password', hide_input=True, type=str) if dbname: connect_parameters['database'] = dbname if p_file is not None: command = p_file.read() dcs = get_dcs(obj, cluster_name) cursor = None for _ in watching(w, watch, clear=False): if cursor is None: cluster = dcs.get_cluster() output, header = query_member(cluster, cursor, member, role, command, connect_parameters) print_output(header, output, fmt=fmt, delimiter=delimiter) def query_member(cluster, cursor, member, role, command, connect_parameters): import psycopg2 try: if cursor is None: cursor = get_cursor(cluster, connect_parameters, role=role, member=member) if cursor is None: if role is None: message = 'No connection to member {0} is available'.format(member) else: message = 'No connection to role={0} is available'.format(role) logging.debug(message) return [[timestamp(0), message]], None cursor.execute(command) return cursor.fetchall(), [d.name for d in cursor.description] except (psycopg2.OperationalError, psycopg2.DatabaseError) as oe: logging.debug(oe) if cursor is not None and not cursor.connection.closed: cursor.connection.close() message = oe.pgcode or oe.pgerror or str(oe) message = message.replace('\n', ' ') return [[timestamp(0), 'ERROR, SQLSTATE: {0}'.format(message)]], None @ctl.command('remove', help='Remove cluster from DCS') @click.argument('cluster_name') @option_format @click.pass_obj def remove(obj, cluster_name, fmt): dcs = get_dcs(obj, cluster_name) cluster = dcs.get_cluster() output_members(cluster, cluster_name, fmt=fmt) confirm = click.prompt('Please confirm the cluster name to remove', type=str) if confirm != cluster_name: raise PatroniCtlException('Cluster names specified do not match') message = 'Yes I am aware' confirm = click.prompt('You are about to remove all information in DCS for {0}, please type: "{1}"'.format(cluster_name, message), type=str) if message != confirm: raise PatroniCtlException('You did not exactly type "{0}"'.format(message)) if cluster.leader and cluster.leader.name: confirm = click.prompt('This cluster currently is healthy. Please specify the master name to continue') if confirm != cluster.leader.name: raise PatroniCtlException('You did not specify the current master of the cluster') dcs.delete_cluster() def check_response(response, member_name, action_name, silent_success=False): if response.status >= 400: click.echo('Failed: {0} for member {1}, status code={2}, ({3})'.format( action_name, member_name, response.status, response.data.decode('utf-8') )) return False elif not silent_success: click.echo('Success: {0} for member {1}'.format(action_name, member_name)) return True def parse_scheduled(scheduled): if (scheduled or 'now') != 'now': try: scheduled_at = dateutil.parser.parse(scheduled) if scheduled_at.tzinfo is None: scheduled_at = scheduled_at.replace(tzinfo=dateutil.tz.tzlocal()) except (ValueError, TypeError): message = 'Unable to parse scheduled timestamp ({0}). It should be in an unambiguous format (e.g. ISO 8601)' raise PatroniCtlException(message.format(scheduled)) return scheduled_at return None @ctl.command('reload', help='Reload cluster member configuration') @click.argument('cluster_name') @click.argument('member_names', nargs=-1) @click.option('--role', '-r', help='Reload only members with this role', default='any', type=click.Choice(['master', 'replica', 'any'])) @option_force @click.pass_obj def reload(obj, cluster_name, member_names, force, role): cluster = get_dcs(obj, cluster_name).get_cluster() members = get_members(cluster, cluster_name, member_names, role, force, 'reload') for member in members: r = request_patroni(member, 'post', 'reload') if r.status == 200: click.echo('No changes to apply on member {0}'.format(member.name)) elif r.status == 202: click.echo('Reload request received for member {0} and will be processed within {1} seconds'.format( member.name, cluster.config.data.get('loop_wait')) ) else: click.echo('Failed: reload for member {0}, status code={1}, ({2})'.format( member.name, r.status, r.data.decode('utf-8')) ) @ctl.command('restart', help='Restart cluster member') @click.argument('cluster_name') @click.argument('member_names', nargs=-1) @click.option('--role', '-r', help='Restart only members with this role', default='any', type=click.Choice(['master', 'replica', 'any'])) @click.option('--any', 'p_any', help='Restart a single member only', is_flag=True) @click.option('--scheduled', help='Timestamp of a scheduled restart in unambiguous format (e.g. ISO 8601)', default=None) @click.option('--pg-version', 'version', help='Restart if the PostgreSQL version is less than provided (e.g. 9.5.2)', default=None) @click.option('--pending', help='Restart if pending', is_flag=True) @click.option('--timeout', help='Return error and fail over if necessary when restarting takes longer than this.') @option_force @click.pass_obj def restart(obj, cluster_name, member_names, force, role, p_any, scheduled, version, pending, timeout): cluster = get_dcs(obj, cluster_name).get_cluster() members = get_members(cluster, cluster_name, member_names, role, force, 'restart', False) if scheduled is None and not force: next_hour = (datetime.datetime.now() + datetime.timedelta(hours=1)).strftime('%Y-%m-%dT%H:%M') scheduled = click.prompt('When should the restart take place (e.g. ' + next_hour + ') ', type=str, default='now') scheduled_at = parse_scheduled(scheduled) confirm_members_action(members, force, 'restart', scheduled_at) if p_any: random.shuffle(members) members = members[:1] if version is None and not force: version = click.prompt('Restart if the PostgreSQL version is less than provided (e.g. 9.5.2) ', type=str, default='') content = {} if pending: content['restart_pending'] = True if version: try: postgres_version_to_int(version) except PatroniException as e: raise PatroniCtlException(e.value) content['postgres_version'] = version if scheduled_at: if cluster.is_paused(): raise PatroniCtlException("Can't schedule restart in the paused state") content['schedule'] = scheduled_at.isoformat() if timeout is not None: content['timeout'] = timeout for member in members: if 'schedule' in content: if force and member.data.get('scheduled_restart'): r = request_patroni(member, 'delete', 'restart') check_response(r, member.name, 'flush scheduled restart', True) r = request_patroni(member, 'post', 'restart', content) if r.status == 200: click.echo('Success: restart on member {0}'.format(member.name)) elif r.status == 202: click.echo('Success: restart scheduled on member {0}'.format(member.name)) elif r.status == 409: click.echo('Failed: another restart is already scheduled on member {0}'.format(member.name)) else: click.echo('Failed: restart for member {0}, status code={1}, ({2})'.format( member.name, r.status, r.data.decode('utf-8')) ) @ctl.command('reinit', help='Reinitialize cluster member') @click.argument('cluster_name') @click.argument('member_names', nargs=-1) @option_force @click.option('--wait', help='Wait until reinitialization completes', is_flag=True) @click.pass_obj def reinit(obj, cluster_name, member_names, force, wait): cluster = get_dcs(obj, cluster_name).get_cluster() members = get_members(cluster, cluster_name, member_names, None, force, 'reinitialize') wait_on_members = [] for member in members: body = {'force': force} while True: r = request_patroni(member, 'post', 'reinitialize', body) started = check_response(r, member.name, 'reinitialize') if not started and r.data.endswith(b' already in progress') and not force and click.confirm('Do you want to cancel it and reinitialize anyway?'): body['force'] = True continue break if started and wait: wait_on_members.append(member) last_display = [] while wait_on_members: if wait_on_members != last_display: click.echo('Waiting for reinitialize to complete on: {0}'.format( ", ".join(member.name for member in wait_on_members)) ) last_display[:] = wait_on_members time.sleep(2) for member in wait_on_members: data = json.loads(request_patroni(member, 'get', 'patroni').data.decode('utf-8')) if data.get('state') != 'creating replica': click.echo('Reinitialize is completed on: {0}'.format(member.name)) wait_on_members.remove(member) def _do_failover_or_switchover(obj, action, cluster_name, master, candidate, force, scheduled=None): dcs = get_dcs(obj, cluster_name) cluster = dcs.get_cluster() if action == 'switchover' and (cluster.leader is None or not cluster.leader.name): raise PatroniCtlException('This cluster has no master') if master is None: if force or action == 'failover': master = cluster.leader and cluster.leader.name else: master = click.prompt('Master', type=str, default=cluster.leader.member.name) if master is not None and cluster.leader and cluster.leader.member.name != master: raise PatroniCtlException('Member {0} is not the leader of cluster {1}'.format(master, cluster_name)) candidate_names = [str(m.name) for m in cluster.members if m.name != master and not m.nofailover] candidate_names.sort() if not candidate_names: raise PatroniCtlException('No candidates found to {0} to'.format(action)) if candidate is None and not force: candidate = click.prompt('Candidate ' + str(candidate_names), type=str, default='') if action == 'failover' and not candidate: raise PatroniCtlException('Failover could be performed only to a specific candidate') if candidate == master: raise PatroniCtlException(action.title() + ' target and source are the same.') if candidate and candidate not in candidate_names: raise PatroniCtlException('Member {0} does not exist in cluster {1}'.format(candidate, cluster_name)) scheduled_at_str = None scheduled_at = None if action == 'switchover': if scheduled is None and not force: next_hour = (datetime.datetime.now() + datetime.timedelta(hours=1)).strftime('%Y-%m-%dT%H:%M') scheduled = click.prompt('When should the switchover take place (e.g. ' + next_hour + ' ) ', type=str, default='now') scheduled_at = parse_scheduled(scheduled) if scheduled_at: if cluster.is_paused(): raise PatroniCtlException("Can't schedule switchover in the paused state") scheduled_at_str = scheduled_at.isoformat() failover_value = {'leader': master, 'candidate': candidate, 'scheduled_at': scheduled_at_str} logging.debug(failover_value) click.echo('Current cluster topology') output_members(dcs.get_cluster(), cluster_name) if not force: demote_msg = ', demoting current master ' + master if master else '' if scheduled_at_str: if not click.confirm('Are you sure you want to schedule {0} of cluster {1} at {2}{3}?' .format(action, cluster_name, scheduled_at_str, demote_msg)): raise PatroniCtlException('Aborting scheduled ' + action) else: if not click.confirm('Are you sure you want to {0} cluster {1}{2}?' .format(action, cluster_name, demote_msg)): raise PatroniCtlException('Aborting ' + action) r = None try: member = cluster.leader.member if cluster.leader else cluster.get_member(candidate, False) r = request_patroni(member, 'post', action, failover_value) if r.status == 501 and action == 'switchover' and b'Server does not support this operation' in r.data: r = request_patroni(member, 'post', 'failover', failover_value) if r.status in (200, 202): logging.debug(r) cluster = dcs.get_cluster() logging.debug(cluster) click.echo('{0} {1}'.format(timestamp(), r.data.decode('utf-8'))) else: click.echo('{0} failed, details: {1}, {2}'.format(action.title(), r.status, r.data.decode('utf-8'))) return except Exception: logging.exception(r) logging.warning('Failing over to DCS') click.echo('{0} Could not {1} using Patroni api, falling back to DCS'.format(timestamp(), action)) dcs.manual_failover(master, candidate, scheduled_at=scheduled_at) output_members(cluster, cluster_name) @ctl.command('failover', help='Failover to a replica') @arg_cluster_name @click.option('--master', help='The name of the current master', default=None) @click.option('--candidate', help='The name of the candidate', default=None) @option_force @click.pass_obj def failover(obj, cluster_name, master, candidate, force): action = 'switchover' if master else 'failover' _do_failover_or_switchover(obj, action, cluster_name, master, candidate, force) @ctl.command('switchover', help='Switchover to a replica') @arg_cluster_name @click.option('--master', help='The name of the current master', default=None) @click.option('--candidate', help='The name of the candidate', default=None) @click.option('--scheduled', help='Timestamp of a scheduled switchover in unambiguous format (e.g. ISO 8601)', default=None) @option_force @click.pass_obj def switchover(obj, cluster_name, master, candidate, force, scheduled): _do_failover_or_switchover(obj, 'switchover', cluster_name, master, candidate, force, scheduled) def generate_topology(level, member, topology): members = topology.get(member['name'], []) if level > 0: member['name'] = '{0}+ {1}'.format((' ' * (level - 1) * 2), member['name']) if member['name']: yield member for member in members: for member in generate_topology(level + 1, member, topology): yield member def topology_sort(members): topology = defaultdict(list) leader = next((m for m in members if m['role'].endswith('leader')), {'name': None}) replicas = set(member['name'] for member in members if not member['role'].endswith('leader')) for member in members: if not member['role'].endswith('leader'): parent = member.get('tags', {}).get('replicatefrom') parent = parent if parent and parent != member['name'] and parent in replicas else leader['name'] topology[parent].append(member) for member in generate_topology(0, leader, topology): yield member def output_members(cluster, name, extended=False, fmt='pretty'): rows = [] logging.debug(cluster) initialize = {None: 'uninitialized', '': 'initializing'}.get(cluster.initialize, cluster.initialize) cluster = cluster_as_json(cluster) columns = ['Cluster', 'Member', 'Host', 'Role', 'State', 'TL', 'Lag in MB'] for c in ('Pending restart', 'Scheduled restart', 'Tags'): if extended or any(m.get(c.lower().replace(' ', '_')) for m in cluster['members']): columns.append(c) members = [m for m in cluster['members'] if 'host' in m] append_port = any('port' in m and m['port'] != 5432 for m in members) or len(set(m['host'] for m in members)) < len(members) sort = topology_sort if fmt == 'topology' else iter for m in sort(cluster['members']): logging.debug(m) lag = m.get('lag', '') m.update(cluster=name, member=m['name'], host=m.get('host', ''), tl=m.get('timeline', ''), role=m['role'].replace('_', ' ').title(), lag_in_mb=round(lag/1024/1024) if isinstance(lag, six.integer_types) else lag, pending_restart='*' if m.get('pending_restart') else '') if append_port and m['host'] and m.get('port'): m['host'] = ':'.join([m['host'], str(m['port'])]) if 'scheduled_restart' in m: value = m['scheduled_restart']['schedule'] if 'postgres_version' in m['scheduled_restart']: value += ' if version < {0}'.format(m['scheduled_restart']['postgres_version']) m['scheduled_restart'] = value rows.append([m.get(n.lower().replace(' ', '_'), '') for n in columns]) print_output(columns, rows, {'Lag in MB': 'r', 'TL': 'r'}, fmt, ' Cluster: {0} ({1}) '.format(name, initialize)) if fmt not in ('pretty', 'topology'): return service_info = [] if cluster.get('pause'): service_info.append('Maintenance mode: on') if 'scheduled_switchover' in cluster: info = 'Switchover scheduled at: ' + cluster['scheduled_switchover']['at'] for name in ('from', 'to'): if name in cluster['scheduled_switchover']: info += '\n{0:>24}: {1}'.format(name, cluster['scheduled_switchover'][name]) service_info.append(info) if service_info: click.echo(' ' + '\n '.join(service_info)) @ctl.command('list', help='List the Patroni members for a given Patroni') @click.argument('cluster_names', nargs=-1) @click.option('--extended', '-e', help='Show some extra information', is_flag=True) @click.option('--timestamp', '-t', 'ts', help='Print timestamp', is_flag=True) @option_format @option_watch @option_watchrefresh @click.pass_obj def members(obj, cluster_names, fmt, watch, w, extended, ts): if not cluster_names: if 'scope' in obj: cluster_names = [obj['scope']] if not cluster_names: return logging.warning('Listing members: No cluster names were provided') for cluster_name in cluster_names: dcs = get_dcs(obj, cluster_name) for _ in watching(w, watch): if ts: click.echo(timestamp(0)) cluster = dcs.get_cluster() output_members(cluster, cluster_name, extended, fmt) @ctl.command('topology', help='Prints ASCII topology for given cluster') @click.argument('cluster_names', nargs=-1) @option_watch @option_watchrefresh @click.pass_obj @click.pass_context def topology(ctx, obj, cluster_names, watch, w): ctx.forward(members, fmt='topology') def timestamp(precision=6): return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:precision - 7] @ctl.command('configure', help='Create configuration file') @click.option('--config-file', '-c', help='Configuration file', prompt='Configuration file', default=CONFIG_FILE_PATH) @click.option('--dcs', '-d', help='The DCS connect url', prompt='DCS connect url', default='etcd://localhost:2379') @click.option('--namespace', '-n', help='The namespace', prompt='Namespace', default='/service/') def configure(config_file, dcs, namespace): store_config({'dcs_api': str(dcs), 'namespace': str(namespace)}, config_file)
MIT License
socketlabs/socketlabs-python
socketlabs/injectionapi/core/serialization/injectionresponsedto.py
InjectionResponseDto.to_json
python
def to_json(self): json = { "errorCode": self._error_code, "transactionReceipt": self._transaction_receipt } if len(self._message_results) > 0: e = [] for a in self._message_results: e.append(a.to_json()) json["messageResult"] = e return json
build json dict for InjectionResponseDto :return the json dictionary :rtype dict
https://github.com/socketlabs/socketlabs-python/blob/10e3a91c00faeb4b309f87610e44ef8408333d56/socketlabs/injectionapi/core/serialization/injectionresponsedto.py#L77-L92
from .messageresultdto import MessageResultDto class InjectionResponseDto(object): def __init__(self): self._error_code = None self._transaction_receipt = None self._message_results = None @property def error_code(self): return self._error_code @error_code.setter def error_code(self, val: str): self._error_code = val @property def transaction_receipt(self): return self._transaction_receipt @transaction_receipt.setter def transaction_receipt(self, val: str): self._transaction_receipt = val @property def message_results(self): return self._message_results @message_results.setter def message_results(self, val: list): self._message_results = [] if val is not None: for item in val: if isinstance(item, MessageResultDto): self._message_results.append(item)
MIT License
zachary2wave/torch-rl
Torch_rl/agent/core_policy.py
Agent_policy_based.cuda
python
def cuda(self): raise NotImplementedError()
use the cuda
https://github.com/zachary2wave/torch-rl/blob/cdf2128c5415b2e3d2c1f4f8861a1346f6c4dcd5/Torch_rl/agent/core_policy.py#L269-L273
from copy import deepcopy from abc import ABC import numpy as np import torch from Torch_rl.common import logger from Torch_rl.common.logger import CSVOutputFormat from Torch_rl.common.memory import ReplayMemory from Torch_rl.common.distribution import * from Torch_rl.common.util import csv_record class Agent_policy_based(ABC): def __init__(self, path): self.step = 0 self.episode = 0 configlist = ["stdout", "log", 'tensorboard', "csv"] if path is None: path = "./" logger.configure(path, configlist) self.csvwritter = CSVOutputFormat(path+"record_trajectory.csv") loggerCEN = logger.get_current().output_formats[configlist.index('tensorboard')] self.writer = loggerCEN.writer self.path = path def imitation_learning(self): pass def train(self, max_step=None, max_ep_cycle=2000, verbose=2, learning_start=1000, render=False, record_ep_inter=None): self.learning = True print("the train phase ........") self.interact(max_step=max_step, max_ep_cycle=max_ep_cycle, learning_start=learning_start, render=render, verbose=verbose, record_ep_inter=record_ep_inter) def test(self, max_step=None, max_ep_cycle=2000, verbose=2, render=False, record_ep_inter=None): self.learning = False self.learning_starts = 0 self.step = 0 self.episode = 0 print("the test phase ........") self.interact(max_step=max_step, max_ep_cycle=max_ep_cycle, render=render, verbose=verbose, record_ep_inter=record_ep_inter) def interact(self, max_step=50000, max_ep_cycle=2000, train_rollout=10,learning_start=1000, render = False, verbose=1, record_ep_inter=None): self.render = render rollout = 0 now_best_reward = -np.inf self.dist = make_pdtype(self.env.action_space, self.policy) sample_generate = self.runner(self.sample_rollout, self.sample_ep, max_ep_cycle, record_ep_inter, lstm_enable=self.lstm_enable) while self.step < max_step: sample = next(sample_generate) logger.record_tabular("01.step", self.step) logger.record_tabular("02.episode",self.episode) logger.record_tabular("03.rollout", rollout) logger.record_tabular("04.rollout/ep", sample["ep_used"]) logger.record_tabular("05.rollout/step", sum(sample["ep_step_used"])) logger.record_tabular("06.mean_episode_reward", np.mean(sample["ep_reward"])) logger.record_tabular("07.mean_step_reward", np.mean(sample["buffer"]["r"])) logger.record_tabular("08.mean_ep_step_used", np.mean(sample["ep_step_used"])) logger.dump_tabular() csv_record(sample["ep_reward"], self.path) record_sample = sample["buffer"] rollout += 1 if self.step > learning_start and self.learning: ep_show = {} if self.backward_ep_show_list: for key in self.backward_ep_show_list: ep_show[key] = 0 rollout_loss = 0 for time in range(train_rollout): loss, other_infor = self.update(record_sample) if verbose == 1: logger.record_tabular("06.train_rollout", time) logger.record_tabular("07.loss", loss) flag = 10 if self.backward_step_show_list: for key in self.backward_step_show_list: logger.record_tabular(str(flag) +"."+ key, other_infor[key]) flag += 1 logger.dump_tabular() rollout_loss += loss if self.backward_ep_show_list: for key in self.backward_ep_show_list: ep_show[key] += other_infor[key] if verbose == 2: logger.record_tabular("06.rollouts/loss", rollout_loss) logger.record_tabular("07.rollouts/episode_Q_value", torch.mean( torch.tensor(sample["ep_Q_value"])).cpu().detach().numpy()) flag = 10 if self.backward_ep_show_list: for key in self.backward_ep_show_list: logger.record_tabular(str(flag) + "." + key, ep_show[key]) flag += 1 logger.dump_tabular() if np.mean(sample["ep_reward"])>now_best_reward: self.save_weights(self.path) print("the best mean ep reward is ", np.mean(sample["ep_reward"]), "the weight is saved") now_best_reward = np.mean(sample["ep_reward"]) def runner(self, sample_step=None, sample_ep=None, max_ep_step=2000, record_ep_inter=None, lstm_enable=False): if sample_step is not None: buffer = ReplayMemory(sample_step, ["value", "logp","info"]) else: buffer = ReplayMemory(sample_ep*max_ep_step, ["value", "logp","info"]) s = self.env.reset() ep_reward, ep_Q_value, ep_step_used = [], [], [] ep_r, ep_q, ep_cycle = 0, 0, 0 while True: s = torch.from_numpy(s.astype(np.float32)) with torch.no_grad(): outcome = self.policy.forward(s.unsqueeze(0)) Q = self.value.forward(s.unsqueeze(0)) pd = self.dist(outcome) a = pd.sample() s_, r, done, info = self.env.step(a.cpu().squeeze(0).numpy()) if self.render: self.env.render() ep_r += r ep_q += Q ep_cycle +=1 self.step += 1 logp = pd.log_prob(a) sample_ = { "s": s, "a": a.squeeze(0), "r": torch.tensor(np.array([r]).astype(np.float32)), "tr": torch.tensor([int(done)]), "s_":torch.from_numpy(s_), "logp": logp.squeeze(0), "value": Q.squeeze(0), "info": info} buffer.push(sample_) s = deepcopy(s_) if record_ep_inter is not None: if self.episode % record_ep_inter == 0: kvs = {"s": s, "a": a, "s_": s_, "r": r, "tr": done, "ep": self.episode, "step": self.step, "ep_step": ep_cycle} self.csvwritter.writekvs(kvs) if done: s = self.env.reset() self.episode += 1 ep_reward.append(ep_r) ep_Q_value.append(ep_q) ep_step_used.append(ep_cycle) ep_r, ep_q, ep_cycle = 0, 0, 0 if lstm_enable: self.policy.reset_h() if sample_step is not None: if self.step > 0 and self.step % sample_step==0: s_ = torch.from_numpy(s_[np.newaxis,:].astype(np.float32)) with torch.no_grad(): last_Q = self.value.forward(s_).squeeze() yield {"buffer": buffer.memory, "ep_reward": ep_reward, "ep_Q_value": ep_Q_value, "ep_step_used": ep_step_used, "ep_used": len(ep_reward), "step_used": sample_step, "last_Q" : last_Q } ep_reward, ep_Q_value, ep_step_used = [], [], [] if sample_step is not None: buffer = ReplayMemory(sample_step, ["value", "logp","info"]) else: buffer = ReplayMemory(sample_ep * max_ep_step, ["value", "logp","info"]) else: if self.step > 0 and self.episode % sample_ep==0: s_ = torch.from_numpy(s_.astype(np.float32)) last_Q = self.value.forward(s_) yield {"buffer": buffer.memory, "ep_reward": ep_reward, "ep_Q_value": ep_Q_value, "ep_step_used": ep_step_used, "ep_used": sample_ep, "step_used": len(buffer.memory["tr"]), "last_Q": last_Q } ep_reward, ep_Q_value = [], [] if sample_step is not None: buffer = ReplayMemory(sample_step, ["value", "logp","info"]) else: buffer = ReplayMemory(sample_ep * max_ep_step, ["value", "logp","info"]) def update(self, sample): raise NotImplementedError() def load_weights(self, filepath): raise NotImplementedError() def save_weights(self, filepath, overwrite=False): raise NotImplementedError()
MIT License
beijbom/coralnet
project/export/tests/test_cpc.py
CPCExportBaseTest.assert_cpc_label_lines_equal
python
def assert_cpc_label_lines_equal( self, actual_cpc_content, expected_point_lines): actual_lines = actual_cpc_content.splitlines() point_count = len(expected_point_lines) actual_point_lines = actual_lines[-(28+point_count):-28] for point_num, actual_line in enumerate(actual_point_lines, 1): expected_line = expected_point_lines[point_num-1] self.assertEqual(actual_line, expected_line, msg=( "Line for point {point_num} not equal | Actual: {actual_line}" " | Expected: {expected_line}").format( point_num=point_num, actual_line=actual_line, expected_line=expected_line, ))
Tests that a CPC's label lines (the lines with the label codes) are as expected.
https://github.com/beijbom/coralnet/blob/1f47f666a783f5ed4bcb5057513a4ae76e3d2d8c/project/export/tests/test_cpc.py#L116-L138
from io import BytesIO from zipfile import ZipFile from bs4 import BeautifulSoup from django.core.files.base import ContentFile from django.shortcuts import resolve_url from django.urls import reverse from export.utils import get_previous_cpcs_status, write_zip from images.model_utils import PointGen from images.models import Image from lib.tests.utils import BasePermissionTest, ClientTest from upload.tests.utils import UploadAnnotationsTestMixin class PermissionTest(BasePermissionTest): def test_cpc_create_ajax(self): url = reverse( 'export_annotations_cpc_create_ajax', args=[self.source.pk]) self.source_to_private() self.assertPermissionLevel(url, self.SOURCE_EDIT, is_json=True) self.source_to_public() self.assertPermissionLevel(url, self.SOURCE_EDIT, is_json=True) def test_cpc_serve(self): url = reverse('export_annotations_cpc_serve', args=[self.source.pk]) template = 'visualization/browse_images.html' self.source_to_private() self.assertPermissionLevel(url, self.SOURCE_EDIT, template=template) self.source_to_public() self.assertPermissionLevel(url, self.SOURCE_EDIT, template=template) class CPCExportBaseTest(ClientTest): @classmethod def setUpTestData(cls): super().setUpTestData() cls.default_search_params = dict( image_form_type='search', aux1='', aux2='', aux3='', aux4='', aux5='', height_in_cm='', latitude='', longitude='', depth='', photographer='', framing='', balance='', photo_date_0='', photo_date_1='', photo_date_2='', photo_date_3='', photo_date_4='', image_name='', annotation_status='', last_annotated_0='', last_annotated_1='', last_annotated_2='', last_annotated_3='', last_annotated_4='', last_annotator_0='', last_annotator_1='', sort_method='name', sort_direction='asc', ) def export_cpcs(self, post_data): self.client.force_login(self.user) self.client.post( resolve_url('export_annotations_cpc_create_ajax', self.source.pk), post_data) return self.client.post( resolve_url('export_annotations_cpc_serve', self.source.pk)) @staticmethod def export_response_to_cpc(response, cpc_filename): zf = ZipFile(BytesIO(response.content)) return zf.read(cpc_filename).decode() def upload_cpcs(self, cpc_files, plus_notes=False): self.client.force_login(self.user) self.client.post( resolve_url('upload_annotations_cpc_preview_ajax', self.source.pk), {'cpc_files': cpc_files, 'plus_notes': plus_notes}) self.client.post( resolve_url('upload_annotations_ajax', self.source.pk)) def assert_cpc_content_equal(self, actual_cpc_content, expected_lines): actual_lines = actual_cpc_content.splitlines() expected_cpc_content = '\r\n'.join(expected_lines) + '\r\n' for line_num, actual_line in enumerate(actual_lines, 1): expected_line = expected_lines[line_num-1] self.assertEqual(actual_line, expected_line, msg=( "Line {line_num} not equal | Actual: {actual_line}" " | Expected: {expected_line}").format( line_num=line_num, actual_line=actual_line, expected_line=expected_line, )) self.assertEqual(actual_cpc_content, expected_cpc_content)
BSD 2-Clause Simplified License
future-architect/sublime-uroborosql-formatter
sqlparse/filters.py
compact
python
def compact(stream): pipe = Pipeline() pipe.append(StripComments()) pipe.append(StripWhitespace) return pipe(stream)
Function that return a compacted version of the stream
https://github.com/future-architect/sublime-uroborosql-formatter/blob/7b1c1d81d377a8d341847247020173d1861b6a8f/sqlparse/filters.py#L737-L744
import re from os.path import abspath, join from sqlparse import sql, tokens as T from sqlparse.engine import FilterStack from sqlparse.lexer import tokenize from sqlparse.pipeline import Pipeline from sqlparse.tokens import (Comment, Comparison, Keyword, Name, Punctuation, String, Whitespace) from sqlparse.utils import memoize_generator from sqlparse.utils import split_unquoted_newlines class _CaseFilter: ttype = None def __init__(self, case=None): if case is None: case = 'upper' assert case in ['lower', 'upper', 'capitalize'] def get_convert(): import sys if sys.version_info[0] < 3: unicodecase = getattr(unicode, case) def convert(s): if isinstance(s, str): return unicodecase(s.decode('utf-8')).encode('utf-8') else: return unicodecase(s) return convert else: return getattr(str, case) self.convert = get_convert() def process(self, stack, stream): for ttype, value in stream: if ttype in self.ttype: value = self.convert(value) yield ttype, value class KeywordCaseFilter(_CaseFilter): ttype = T.Keyword class IdentifierCaseFilter(_CaseFilter): ttype = (T.Name, T.String.Symbol) def process(self, stack, stream): for ttype, value in stream: if ttype in self.ttype and not value.strip()[0] == '"': value = self.convert(value) yield ttype, value class TruncateStringFilter: def __init__(self, width, char): self.width = max(width, 1) self.char = str(char) def process(self, stack, stream): for ttype, value in stream: if ttype is T.Literal.String.Single: if value[:2] == '\'\'': inner = value[2:-2] quote = '\'\'' else: inner = value[1:-1] quote = '\'' if len(inner) > self.width: value = ''.join((quote, inner[:self.width], self.char, quote)) yield ttype, value class GetComments: def process(self, stack, stream): for token_type, value in stream: if token_type in Comment: yield token_type, value class StripComments: def process(self, stack, stream): for token_type, value in stream: if token_type not in Comment: yield token_type, value def StripWhitespace(stream): last_type = None has_space = False ignore_group = frozenset((Comparison, Punctuation)) for token_type, value in stream: if last_type: if token_type in Whitespace: has_space = True continue elif token_type in (Whitespace, Whitespace.Newline, ignore_group): continue if has_space: if not ignore_group.intersection((last_type, token_type)): yield Whitespace, ' ' has_space = False yield token_type, value last_type = token_type class IncludeStatement: def __init__(self, dirpath=".", maxrecursive=10, raiseexceptions=False): if maxrecursive <= 0: raise ValueError('Max recursion limit reached') self.dirpath = abspath(dirpath) self.maxRecursive = maxrecursive self.raiseexceptions = raiseexceptions self.detected = False @memoize_generator def process(self, stack, stream): for token_type, value in stream: if token_type in Name and value.upper() == 'INCLUDE': self.detected = True continue elif self.detected: if token_type in Whitespace: continue if token_type in String.Symbol: path = join(self.dirpath, value[1:-1]) try: f = open(path) raw_sql = f.read() f.close() except IOError as err: if self.raiseexceptions: raise yield Comment, '-- IOError: %s\n' % err else: try: filtr = IncludeStatement(self.dirpath, self.maxRecursive - 1, self.raiseexceptions) except ValueError as err: if self.raiseexceptions: raise yield Comment, '-- ValueError: %s\n' % err stack = FilterStack() stack.preprocess.append(filtr) for tv in stack.run(raw_sql): yield tv self.detected = False continue yield token_type, value class StripCommentsFilter: def _get_next_comment(self, tlist): token = tlist.token_next_by_instance(0, sql.Comment) if token is None: token = tlist.token_next_by_type(0, T.Comment) return token def _process(self, tlist): token = self._get_next_comment(tlist) while token: tidx = tlist.token_index(token) prev = tlist.token_prev(tidx, False) next_ = tlist.token_next(tidx, False) if (prev is not None and next_ is not None and not prev.is_whitespace() and not next_.is_whitespace() and not (prev.match(T.Punctuation, '(') or next_.match(T.Punctuation, ')'))): tlist.tokens[tidx] = sql.Token(T.Whitespace, ' ') else: tlist.tokens.pop(tidx) token = self._get_next_comment(tlist) def process(self, stack, stmt): [self.process(stack, sgroup) for sgroup in stmt.get_sublists()] self._process(stmt) class StripWhitespaceFilter(object): def _stripws(self, tlist): func_name = '_stripws_%s' % tlist.__class__.__name__.lower() func = getattr(self, func_name, self._stripws_default) func(tlist) def _stripws_default(self, tlist): last_was_ws = False for token in tlist.tokens: if token.is_whitespace(): if last_was_ws: token.value = '' else: token.value = ' ' last_was_ws = token.is_whitespace() def _stripws_identifierlist(self, tlist): last_nl = None for token in tlist.tokens[:]: if (token.ttype is T.Punctuation and token.value == ',' and last_nl is not None): tlist.tokens.remove(last_nl) if token.is_whitespace(): last_nl = token else: last_nl = None return self._stripws_default(tlist) def _stripws_parenthesis(self, tlist): if tlist.tokens[1].is_whitespace(): tlist.tokens.pop(1) if tlist.tokens[-2].is_whitespace(): tlist.tokens.pop(-2) self._stripws_default(tlist) def process(self, stack, stmt, depth=0): [self.process(stack, sgroup, depth + 1) for sgroup in stmt.get_sublists()] self._stripws(stmt) if ( depth == 0 and stmt.tokens and stmt.tokens[-1].is_whitespace() ): stmt.tokens.pop(-1) class ReindentFilter(object): def __init__(self, width=2, char=' ', line_width=None): self.width = width self.char = char self.indent = 0 self.offset = 0 self.line_width = line_width self._curr_stmt = None self._last_stmt = None def _flatten_up_to_token(self, token): iterator = self._curr_stmt.flatten() for t in iterator: yield t if t == token: raise StopIteration def _get_offset(self, token): raw = ''.join(map(str, self._flatten_up_to_token(token))) line = raw.splitlines()[-1] full_offset = len(line) - len(self.char * (self.width * self.indent)) return full_offset - self.offset def nl(self): space = (self.char * ((self.indent * self.width) + self.offset)) if len(space) > 200: self.indent = self.offset = 0 space = (self.char * ((self.indent * self.width) + self.offset)) ws = '\n' + space return sql.Token(T.Whitespace, ws) def _split_kwds(self, tlist): split_words = ('FROM', 'STRAIGHT_JOIN$', 'JOIN$', 'AND', 'OR', 'GROUP', 'ORDER', 'UNION', 'VALUES', 'SET', 'BETWEEN', 'EXCEPT', 'HAVING') def _next_token(i): t = tlist.token_next_match(i, T.Keyword, split_words, regex=True) if t and t.value.upper() == 'BETWEEN': t = _next_token(tlist.token_index(t) + 1) if t and t.value.upper() == 'AND': t = _next_token(tlist.token_index(t) + 1) return t idx = 0 token = _next_token(idx) added = set() while token: prev = tlist.token_prev(tlist.token_index(token), False) offset = 1 if prev and prev.is_whitespace() and prev not in added: tlist.tokens.pop(tlist.token_index(prev)) offset += 1 uprev = str(prev) if (prev and (uprev.endswith('\n') or uprev.endswith('\r'))): nl = tlist.token_next(token) else: nl = self.nl() added.add(nl) tlist.insert_before(token, nl) offset += 1 token = _next_token(tlist.token_index(nl) + offset) def _split_statements(self, tlist): idx = 0 token = tlist.token_next_by_type(idx, (T.Keyword.DDL, T.Keyword.DML)) while token: prev = tlist.token_prev(tlist.token_index(token), False) if prev and prev.is_whitespace(): tlist.tokens.pop(tlist.token_index(prev)) if prev: nl = self.nl() tlist.insert_before(token, nl) token = tlist.token_next_by_type(tlist.token_index(token) + 1, (T.Keyword.DDL, T.Keyword.DML)) def _process(self, tlist): func_name = '_process_%s' % tlist.__class__.__name__.lower() func = getattr(self, func_name, self._process_default) func(tlist) def _process_where(self, tlist): token = tlist.token_next_match(0, T.Keyword, 'WHERE') try: tlist.insert_before(token, self.nl()) except ValueError: pass self.indent += 1 self._process_default(tlist) self.indent -= 1 def _process_having(self, tlist): token = tlist.token_next_match(0, T.Keyword, 'HAVING') try: tlist.insert_before(token, self.nl()) except ValueError: pass self.indent += 1 self._process_default(tlist) self.indent -= 1 def _process_parenthesis(self, tlist): first = tlist.token_next(0) indented = False if first and first.ttype in (T.Keyword.DML, T.Keyword.DDL): self.indent += 1 tlist.tokens.insert(0, self.nl()) indented = True num_offset = self._get_offset( tlist.token_next_match(0, T.Punctuation, '(')) self.offset += num_offset self._process_default(tlist, stmts=not indented) if indented: self.indent -= 1 self.offset -= num_offset def _process_identifierlist(self, tlist): identifiers = list(tlist.get_identifiers()) if len(identifiers) > 1 and not tlist.within(sql.Function): first = list(identifiers[0].flatten())[0] if self.char == '\t': num_offset = 1 else: num_offset = self._get_offset(first) - len(first.value) self.offset += num_offset for token in identifiers[1:]: tlist.insert_before(token, self.nl()) self.offset -= num_offset self._process_default(tlist) def _process_case(self, tlist): is_first = True num_offset = None case = tlist.tokens[0] outer_offset = self._get_offset(case) - len(case.value) self.offset += outer_offset for cond, value in tlist.get_cases(): if is_first: tcond = list(cond[0].flatten())[0] is_first = False num_offset = self._get_offset(tcond) - len(tcond.value) self.offset += num_offset continue if cond is None: token = value[0] else: token = cond[0] tlist.insert_before(token, self.nl()) self.offset += 5 self._process_default(tlist) self.offset -= 5 if num_offset is not None: self.offset -= num_offset end = tlist.token_next_match(0, T.Keyword, 'END') tlist.insert_before(end, self.nl()) self.offset -= outer_offset def _process_default(self, tlist, stmts=True, kwds=True): if stmts: self._split_statements(tlist) if kwds: self._split_kwds(tlist) [self._process(sgroup) for sgroup in tlist.get_sublists()] def process(self, stack, stmt): if isinstance(stmt, sql.Statement): self._curr_stmt = stmt self._process(stmt) if isinstance(stmt, sql.Statement): if self._last_stmt is not None: if str(self._last_stmt).endswith('\n'): nl = '\n' else: nl = '\n\n' stmt.tokens.insert( 0, sql.Token(T.Whitespace, nl)) if self._last_stmt != stmt: self._last_stmt = stmt class RightMarginFilter: keep_together = ( ) def __init__(self, width=79): self.width = width self.line = '' def _process(self, stack, group, stream): for token in stream: if token.is_whitespace() and '\n' in token.value: if token.value.endswith('\n'): self.line = '' else: self.line = token.value.splitlines()[-1] elif (token.is_group() and not token.__class__ in self.keep_together): token.tokens = self._process(stack, token, token.tokens) else: val = str(token) if len(self.line) + len(val) > self.width: match = re.search('^ +', self.line) if match is not None: indent = match.group() else: indent = '' yield sql.Token(T.Whitespace, '\n%s' % indent) self.line = indent self.line += val yield token def process(self, stack, group): return group.tokens = self._process(stack, group, group.tokens) class ColumnsSelect: def process(self, stack, stream): mode = 0 oldValue = "" parenthesis = 0 for token_type, value in stream: if token_type in Comment: continue if mode == 0: if token_type in Keyword and value == 'SELECT': mode = 1 elif mode == 1: if value == 'FROM': if oldValue: yield oldValue mode = 3 elif value == 'AS': oldValue = "" mode = 2 elif (token_type == Punctuation and value == ',' and not parenthesis): if oldValue: yield oldValue oldValue = "" elif token_type not in Whitespace: if value == '(': parenthesis += 1 elif value == ')': parenthesis -= 1 oldValue += value elif mode == 2: if token_type == Name or token_type == Keyword: yield value mode = 1 class SerializerUnicode: def process(self, stack, stmt): raw = str(stmt) lines = split_unquoted_newlines(raw) res = '\n'.join(line.rstrip() for line in lines) return res def Tokens2Unicode(stream): result = "" for _, value in stream: result += str(value) return result class OutputFilter: varname_prefix = '' def __init__(self, varname='sql'): self.varname = self.varname_prefix + varname self.count = 0 def _process(self, stream, varname, has_nl): raise NotImplementedError def process(self, stack, stmt): self.count += 1 if self.count > 1: varname = '%s%d' % (self.varname, self.count) else: varname = self.varname has_nl = len(str(stmt).strip().splitlines()) > 1 stmt.tokens = self._process(stmt.tokens, varname, has_nl) return stmt class OutputPythonFilter(OutputFilter): def _process(self, stream, varname, has_nl): if self.count > 1: yield sql.Token(T.Whitespace, '\n') yield sql.Token(T.Name, varname) yield sql.Token(T.Whitespace, ' ') yield sql.Token(T.Operator, '=') yield sql.Token(T.Whitespace, ' ') if has_nl: yield sql.Token(T.Operator, '(') yield sql.Token(T.Text, "'") for token in stream: if token.is_whitespace() and '\n' in token.value: yield sql.Token(T.Text, " '") yield sql.Token(T.Whitespace, '\n') yield sql.Token(T.Whitespace, ' ' * (len(varname) + 4)) yield sql.Token(T.Text, "'") after_lb = token.value.split('\n', 1)[1] if after_lb: yield sql.Token(T.Whitespace, after_lb) continue elif "'" in token.value: token.value = token.value.replace("'", "\\'") yield sql.Token(T.Text, token.value) yield sql.Token(T.Text, "'") if has_nl: yield sql.Token(T.Operator, ')') class OutputPHPFilter(OutputFilter): varname_prefix = '$' def _process(self, stream, varname, has_nl): if self.count > 1: yield sql.Token(T.Whitespace, '\n') yield sql.Token(T.Name, varname) yield sql.Token(T.Whitespace, ' ') if has_nl: yield sql.Token(T.Whitespace, ' ') yield sql.Token(T.Operator, '=') yield sql.Token(T.Whitespace, ' ') yield sql.Token(T.Text, '"') for token in stream: if token.is_whitespace() and '\n' in token.value: yield sql.Token(T.Text, ' ";') yield sql.Token(T.Whitespace, '\n') yield sql.Token(T.Name, varname) yield sql.Token(T.Whitespace, ' ') yield sql.Token(T.Operator, '.=') yield sql.Token(T.Whitespace, ' ') yield sql.Token(T.Text, '"') after_lb = token.value.split('\n', 1)[1] if after_lb: yield sql.Token(T.Whitespace, after_lb) continue elif '"' in token.value: token.value = token.value.replace('"', '\\"') yield sql.Token(T.Text, token.value) yield sql.Token(T.Text, '"') yield sql.Token(T.Punctuation, ';') class Limit: def process(self, stack, stream): index = 7 stream = list(stream) stream.reverse() for token_type, value in stream: index -= 1 if index and token_type in Keyword and value == 'LIMIT': return stream[4 - index][1] return -1
BSD 2-Clause Simplified License
iglpdc/dmrg101
dmrg101/core/lanczos.py
cycle_lanczos_vectors
python
def cycle_lanczos_vectors(lv, saved_lanczos_vectors): saved_lanczos_vectors.append(lv[0]) lv[0], lv[1], lv[2] = lv[1], lv[2], create_empty_like(lv[2])
Cycles the Lanczos vectors to prepare them for the next iteration. You use this function to cycle the Lanczos vectors in this way: - lv[1] -> lv[0] - lv[2] -> lv[1] The first Lanczos vector before the cycle, `lv[0]` is not needed anymore and is appended to the `saved_lanczos_vectors` list. The last Lanczos vector after the cycle, `lv[2]` contains garbage. Parameters ---------- lv : the 3 tuple of Wavefunctions. With the three Lanczos vectors in use. saved_lanczos_vectors : a list of Wavefunctions. The Lanczos vectors that are saved.
https://github.com/iglpdc/dmrg101/blob/aaf3913f5a616dc84c4100efbeb819648973c582/dmrg101/core/lanczos.py#L263-L283
import numpy as np from math import fabs from sys import float_info from dmrg101.core.braket import braket from dmrg101.core.dmrg_exceptions import DMRGException from dmrg101.core.get_real import get_real from dmrg101.core.wavefunction import create_empty_like from dmrg101.core.wavefunction import Wavefunction from dmrg101.utils.tridiagonal_solver.tridiagonal_solver import tridiagonal_solver def create_lanczos_vectors(initial_wf): result = [create_empty_like(initial_wf), create_empty_like(initial_wf), create_empty_like(initial_wf)] result[0].as_matrix = np.copy(initial_wf.as_matrix) return result def generate_tridiagonal_matrix(alpha, beta, iteration): if (len(alpha) != iteration + 1): raise DMRGException("alpha has wrong size") if (len(alpha) != len(beta) ): raise DMRGException("beta has wrong size") if not (len(alpha) > 1): raise DMRGException("alpha not large enough") d = np.array(alpha) e = np.array(beta[:d.size-1]) assert(d.size == e.size + 1) assert(d.size == iteration+1) return d, e def diagonalize_tridiagonal_matrix(d, e, eigenvectors): if ( d.size != e.size + 1): raise DMRGException("Wrong sizes for d, e") evals, evecs = tridiagonal_solver(d, e, eigenvectors) return evals, evecs def lanczos_zeroth_iteration(alpha, beta, lv, hamiltonian): if alpha and beta: raise DMRGException("Lists not empty at zeroth Lanczos iter") lv[1] = hamiltonian.apply(lv[0]) alpha.append(get_real(braket(lv[0], lv[1]))) lv[1].as_matrix -= alpha[0]*lv[0].as_matrix beta.append(lv[1].get_norm()) lv[1].normalize() assert(len(alpha) == 1) assert(len(beta) == 1) already_the_ground_state = ( beta[0] < float_info.epsilon ) return already_the_ground_state def lanczos_nth_iteration(alpha, beta, lv, saved_lanczos_vectors, hamiltonian, iteration): if len(alpha) != len(beta): DMRGException("alpha and beta have wrong sizes") lv[2] = hamiltonian.apply(lv[1]) alpha.append(get_real(braket(lv[1], lv[2]))) lv[2].as_matrix -= (alpha[iteration]*lv[1].as_matrix + beta[iteration-1]*lv[0].as_matrix) beta.append(lv[2].get_norm()) lv[2].normalize() cycle_lanczos_vectors(lv, saved_lanczos_vectors) assert(len(alpha) == iteration + 1) assert(len(beta) == iteration + 1)
MIT License
stevearc/dql
dql/models.py
format_throughput
python
def format_throughput(available: float, used: Optional[float] = None) -> str: if available == 0: if used is not None: return "{0:.0f}/∞".format(used) return "N/A" if used is None: return str(available) percent = used / available return "{0:.0f}/{1:.0f} ({2:.0%})".format(used, available, percent)
Format the read/write throughput for display
https://github.com/stevearc/dql/blob/6ed2cfcd16d5b077c1613a0ff219bcedfcaf5539/dql/models.py#L14-L23
from decimal import Decimal from typing import Any, Dict, Iterable, Iterator, List, Optional, Set, Tuple, Union from dynamo3 import DynamoKey, GlobalIndex, Table, Throughput from dynamo3.constants import TableStatusType from dynamo3.fields import BaseIndex from dynamo3.types import TYPES_REV from .exceptions import EngineRuntimeError
MIT License
calysto/metakernel
metakernel/magics/plot_magic.py
PlotMagic.line_plot
python
def line_plot(self, *args, **kwargs): if args and not args[0].startswith('-'): kwargs['backend'] = args[0] if 'size' in kwargs and kwargs['size'] is not None: width, height = kwargs['size'] kwargs['width'] = int(width) kwargs['height'] = int(height) for key in ['resolution', 'format', 'size', 'width', 'height']: if key in kwargs and kwargs[key] is None: del kwargs[key] self.kernel.plot_settings = kwargs self.kernel.handle_plot_settings()
%plot [options] backend - configure plotting for the session. This line magic will configure the plot settings for this language. Examples: %plot qt --format=png %plot inline -w 640 Note: not all languages may support the %plot magic, and not all options may be supported.
https://github.com/calysto/metakernel/blob/861a40d87e45a73f4bd3fa6fba35be024d29b6d7/metakernel/magics/plot_magic.py#L33-L58
from metakernel import Magic, option class PlotMagic(Magic): @option( '-s', '--size', action='store', help='Pixel size of plots, "width,height"' ) @option( '-f', '--format', action='store', help='Plot format (png, svg or jpg).' ) @option( '-b', '--backend', action='store', default='inline', help='Backend selection' ) @option( '-r', '--resolution', action='store', help='Resolution in pixels per inch' ) @option( '-w', '--width', action='store', help='Plot width in pixels' ) @option( '-h', '--height', action='store', help='Plot height in pixels' )
BSD 3-Clause New or Revised License
packtpublishing/django-web-development-with-python
Module 2/Chapter05/django-myproject-05/utils/templatetags/utility_tags.py
modify_query
python
def modify_query(context, *params_to_remove, **params_to_change): query_params = [] for key, value_list in context["request"].GET._iterlists(): if not key in params_to_remove: if key in params_to_change: query_params.append((key, params_to_change[key])) params_to_change.pop(key) else: for value in value_list: query_params.append((key, value)) for key, value in params_to_change.items(): query_params.append((key, value)) query_string = context["request"].path if len(query_params): query_string += "?%s" % urllib.urlencode([ (key, force_str(value)) for (key, value) in query_params if value ]).replace("&", "&amp;") return query_string
Renders a link with modified current query parameters
https://github.com/packtpublishing/django-web-development-with-python/blob/2c88b7899b666b9007fd2c40395662dc30e49177/Module 2/Chapter05/django-myproject-05/utils/templatetags/utility_tags.py#L208-L229
from __future__ import unicode_literals import re import urllib from datetime import datetime from django.db import models from django import template from django.template.loader import get_template from django.utils.encoding import force_str from django.utils.translation import ugettext_lazy as _ from django.utils.safestring import mark_safe from django.utils.timezone import now as tz_now register = template.Library() @register.filter def days_since(value): today = tz_now().date() if isinstance(value, datetime): value = value.date() diff = today - value if diff.days > 1: return _("%s days ago") % diff.days elif diff.days == 1: return _("yesterday") elif diff.days == 0: return _("today") else: return value.strftime("%B %d, %Y") media_tags_regex = re.compile( r"<figure[\S\s]+?</figure>|" r"<object[\S\s]+?</object>|" r"<video[\S\s]+?</video>|" r"<audio[\S\s]+?</audio>|" r"<iframe[\S\s]+?</iframe>|" r"<(img|embed)[^>]+>", re.MULTILINE ) @register.filter def first_media(content): m = media_tags_regex.search(content) media_tag = "" if m: media_tag = m.group() return mark_safe(media_tag) @register.filter def humanize_url(url, letter_count): letter_count = int(letter_count) re_start = re.compile(r'^https?://') re_end = re.compile(r'/$') url = re_end.sub("", re_start.sub("", url)) if len(url) > letter_count: url = "%s…" % url[:letter_count - 1] return url @register.tag def try_to_include(parser, token): try: tag_name, template_name = token.split_contents() except ValueError: raise template.TemplateSyntaxError, "%r tag requires a single argument" % token.contents.split()[0] return IncludeNode(template_name) class IncludeNode(template.Node): def __init__(self, template_name): self.template_name = template_name def render(self, context): try: template_name = template.resolve_variable(self.template_name, context) included_template = get_template(template_name).render(context) except template.TemplateDoesNotExist: included_template = "" return included_template @register.tag def get_objects(parser, token): amount = None try: tag_name, manager_method, str_from, appmodel, str_limit, amount, str_as, var_name = token.split_contents() except ValueError: try: tag_name, manager_method, str_from, appmodel, str_as, var_name = token.split_contents() except ValueError: raise template.TemplateSyntaxError, "get_objects tag requires a following syntax: {% get_objects [<manager>.]<method> from <app_name>.<model_name> [limit <amount>] as <var_name> %}" try: app_name, model_name = appmodel.split(".") except ValueError: raise template.TemplateSyntaxError, "get_objects tag requires application name and model name separated by a dot" model = models.get_model(app_name, model_name) return ObjectsNode(model, manager_method, amount, var_name) class ObjectsNode(template.Node): def __init__(self, model, manager_method, amount, var_name): self.model = model self.manager_method = manager_method self.amount = amount self.var_name = var_name def render(self, context): if "." in self.manager_method: manager, method = self.manager_method.split(".") else: manager = "_default_manager" method = self.manager_method qs = getattr( getattr(self.model, manager), method, self.model._default_manager.none, )() if self.amount: amount = template.resolve_variable(self.amount, context) context[self.var_name] = qs[:amount] else: context[self.var_name] = qs return "" @register.tag def parse(parser, token): bits = token.split_contents() tag_name = bits.pop(0) try: template_value = bits.pop(0) var_name = None if len(bits) == 2: bits.pop(0) var_name = bits.pop(0) except ValueError: raise template.TemplateSyntaxError, "parse tag requires a following syntax: {% parse <template_value> [as <variable>] %}" return ParseNode(template_value, var_name) class ParseNode(template.Node): def __init__(self, template_value, var_name): self.template_value = template_value self.var_name = var_name def render(self, context): template_value = template.resolve_variable(self.template_value, context) t = template.Template(template_value) context_vars = {} for d in list(context): for var, val in d.items(): context_vars[var] = val result = t.render(template.RequestContext(context["request"], context_vars)) if self.var_name: context[self.var_name] = result return "" return result @register.simple_tag(takes_context=True)
MIT License
unofficial-memsource/memsource-cli-client
memsource_cli/models/file_import_settings_dto.py
FileImportSettingsDto.json
python
def json(self, json): self._json = json
Sets the json of this FileImportSettingsDto. :param json: The json of this FileImportSettingsDto. # noqa: E501 :type: JsonSettingsDto
https://github.com/unofficial-memsource/memsource-cli-client/blob/a6639506b74e95476da87f4375953448b76ea90c/memsource_cli/models/file_import_settings_dto.py#L582-L590
import pprint import re import six from memsource_cli.models.android_settings_dto import AndroidSettingsDto from memsource_cli.models.csv_settings_dto import CsvSettingsDto from memsource_cli.models.dita_settings_dto import DitaSettingsDto from memsource_cli.models.doc_book_settings_dto import DocBookSettingsDto from memsource_cli.models.doc_settings_dto import DocSettingsDto from memsource_cli.models.html_settings_dto import HtmlSettingsDto from memsource_cli.models.idml_settings_dto import IdmlSettingsDto from memsource_cli.models.json_settings_dto import JsonSettingsDto from memsource_cli.models.mac_settings_dto import MacSettingsDto from memsource_cli.models.md_settings_dto import MdSettingsDto from memsource_cli.models.mif_settings_dto import MifSettingsDto from memsource_cli.models.multilingual_xls_settings_dto import MultilingualXlsSettingsDto from memsource_cli.models.multilingual_xml_settings_dto import MultilingualXmlSettingsDto from memsource_cli.models.pdf_settings_dto import PdfSettingsDto from memsource_cli.models.php_settings_dto import PhpSettingsDto from memsource_cli.models.po_settings_dto import PoSettingsDto from memsource_cli.models.ppt_settings_dto import PptSettingsDto from memsource_cli.models.properties_settings_dto import PropertiesSettingsDto from memsource_cli.models.psd_settings_dto import PsdSettingsDto from memsource_cli.models.quark_tag_settings_dto import QuarkTagSettingsDto from memsource_cli.models.resx_settings_dto import ResxSettingsDto from memsource_cli.models.sdl_xlf_settings_dto import SdlXlfSettingsDto from memsource_cli.models.seg_rule_reference import SegRuleReference from memsource_cli.models.tm_match_settings_dto import TMMatchSettingsDto from memsource_cli.models.ttx_settings_dto import TtxSettingsDto from memsource_cli.models.txt_settings_dto import TxtSettingsDto from memsource_cli.models.xlf2_settings_dto import Xlf2SettingsDto from memsource_cli.models.xlf_settings_dto import XlfSettingsDto from memsource_cli.models.xls_settings_dto import XlsSettingsDto from memsource_cli.models.xml_settings_dto import XmlSettingsDto from memsource_cli.models.yaml_settings_dto import YamlSettingsDto class FileImportSettingsDto(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'input_charset': 'str', 'output_charset': 'str', 'zip_charset': 'str', 'file_format': 'str', 'target_length': 'bool', 'target_length_max': 'int', 'target_length_percent': 'bool', 'target_length_percent_value': 'float', 'android': 'AndroidSettingsDto', 'idml': 'IdmlSettingsDto', 'xls': 'XlsSettingsDto', 'multilingual_xml': 'MultilingualXmlSettingsDto', 'php': 'PhpSettingsDto', 'resx': 'ResxSettingsDto', 'json': 'JsonSettingsDto', 'html': 'HtmlSettingsDto', 'multilingual_xls': 'MultilingualXlsSettingsDto', 'csv': 'CsvSettingsDto', 'txt': 'TxtSettingsDto', 'xlf2': 'Xlf2SettingsDto', 'quark_tag': 'QuarkTagSettingsDto', 'pdf': 'PdfSettingsDto', 'tm_match': 'TMMatchSettingsDto', 'xml': 'XmlSettingsDto', 'mif': 'MifSettingsDto', 'properties': 'PropertiesSettingsDto', 'doc': 'DocSettingsDto', 'xlf': 'XlfSettingsDto', 'sdl_xlf': 'SdlXlfSettingsDto', 'ttx': 'TtxSettingsDto', 'ppt': 'PptSettingsDto', 'yaml': 'YamlSettingsDto', 'dita': 'DitaSettingsDto', 'doc_book': 'DocBookSettingsDto', 'po': 'PoSettingsDto', 'mac': 'MacSettingsDto', 'md': 'MdSettingsDto', 'psd': 'PsdSettingsDto', 'seg_rule': 'SegRuleReference', 'target_seg_rule': 'SegRuleReference' } attribute_map = { 'input_charset': 'inputCharset', 'output_charset': 'outputCharset', 'zip_charset': 'zipCharset', 'file_format': 'fileFormat', 'target_length': 'targetLength', 'target_length_max': 'targetLengthMax', 'target_length_percent': 'targetLengthPercent', 'target_length_percent_value': 'targetLengthPercentValue', 'android': 'android', 'idml': 'idml', 'xls': 'xls', 'multilingual_xml': 'multilingualXml', 'php': 'php', 'resx': 'resx', 'json': 'json', 'html': 'html', 'multilingual_xls': 'multilingualXls', 'csv': 'csv', 'txt': 'txt', 'xlf2': 'xlf2', 'quark_tag': 'quarkTag', 'pdf': 'pdf', 'tm_match': 'tmMatch', 'xml': 'xml', 'mif': 'mif', 'properties': 'properties', 'doc': 'doc', 'xlf': 'xlf', 'sdl_xlf': 'sdlXlf', 'ttx': 'ttx', 'ppt': 'ppt', 'yaml': 'yaml', 'dita': 'dita', 'doc_book': 'docBook', 'po': 'po', 'mac': 'mac', 'md': 'md', 'psd': 'psd', 'seg_rule': 'segRule', 'target_seg_rule': 'targetSegRule' } def __init__(self, input_charset=None, output_charset=None, zip_charset=None, file_format=None, target_length=None, target_length_max=None, target_length_percent=None, target_length_percent_value=None, android=None, idml=None, xls=None, multilingual_xml=None, php=None, resx=None, json=None, html=None, multilingual_xls=None, csv=None, txt=None, xlf2=None, quark_tag=None, pdf=None, tm_match=None, xml=None, mif=None, properties=None, doc=None, xlf=None, sdl_xlf=None, ttx=None, ppt=None, yaml=None, dita=None, doc_book=None, po=None, mac=None, md=None, psd=None, seg_rule=None, target_seg_rule=None): self._input_charset = None self._output_charset = None self._zip_charset = None self._file_format = None self._target_length = None self._target_length_max = None self._target_length_percent = None self._target_length_percent_value = None self._android = None self._idml = None self._xls = None self._multilingual_xml = None self._php = None self._resx = None self._json = None self._html = None self._multilingual_xls = None self._csv = None self._txt = None self._xlf2 = None self._quark_tag = None self._pdf = None self._tm_match = None self._xml = None self._mif = None self._properties = None self._doc = None self._xlf = None self._sdl_xlf = None self._ttx = None self._ppt = None self._yaml = None self._dita = None self._doc_book = None self._po = None self._mac = None self._md = None self._psd = None self._seg_rule = None self._target_seg_rule = None self.discriminator = None if input_charset is not None: self.input_charset = input_charset if output_charset is not None: self.output_charset = output_charset if zip_charset is not None: self.zip_charset = zip_charset if file_format is not None: self.file_format = file_format if target_length is not None: self.target_length = target_length if target_length_max is not None: self.target_length_max = target_length_max if target_length_percent is not None: self.target_length_percent = target_length_percent if target_length_percent_value is not None: self.target_length_percent_value = target_length_percent_value if android is not None: self.android = android if idml is not None: self.idml = idml if xls is not None: self.xls = xls if multilingual_xml is not None: self.multilingual_xml = multilingual_xml if php is not None: self.php = php if resx is not None: self.resx = resx if json is not None: self.json = json if html is not None: self.html = html if multilingual_xls is not None: self.multilingual_xls = multilingual_xls if csv is not None: self.csv = csv if txt is not None: self.txt = txt if xlf2 is not None: self.xlf2 = xlf2 if quark_tag is not None: self.quark_tag = quark_tag if pdf is not None: self.pdf = pdf if tm_match is not None: self.tm_match = tm_match if xml is not None: self.xml = xml if mif is not None: self.mif = mif if properties is not None: self.properties = properties if doc is not None: self.doc = doc if xlf is not None: self.xlf = xlf if sdl_xlf is not None: self.sdl_xlf = sdl_xlf if ttx is not None: self.ttx = ttx if ppt is not None: self.ppt = ppt if yaml is not None: self.yaml = yaml if dita is not None: self.dita = dita if doc_book is not None: self.doc_book = doc_book if po is not None: self.po = po if mac is not None: self.mac = mac if md is not None: self.md = md if psd is not None: self.psd = psd if seg_rule is not None: self.seg_rule = seg_rule if target_seg_rule is not None: self.target_seg_rule = target_seg_rule @property def input_charset(self): return self._input_charset @input_charset.setter def input_charset(self, input_charset): self._input_charset = input_charset @property def output_charset(self): return self._output_charset @output_charset.setter def output_charset(self, output_charset): self._output_charset = output_charset @property def zip_charset(self): return self._zip_charset @zip_charset.setter def zip_charset(self, zip_charset): self._zip_charset = zip_charset @property def file_format(self): return self._file_format @file_format.setter def file_format(self, file_format): self._file_format = file_format @property def target_length(self): return self._target_length @target_length.setter def target_length(self, target_length): self._target_length = target_length @property def target_length_max(self): return self._target_length_max @target_length_max.setter def target_length_max(self, target_length_max): self._target_length_max = target_length_max @property def target_length_percent(self): return self._target_length_percent @target_length_percent.setter def target_length_percent(self, target_length_percent): self._target_length_percent = target_length_percent @property def target_length_percent_value(self): return self._target_length_percent_value @target_length_percent_value.setter def target_length_percent_value(self, target_length_percent_value): self._target_length_percent_value = target_length_percent_value @property def android(self): return self._android @android.setter def android(self, android): self._android = android @property def idml(self): return self._idml @idml.setter def idml(self, idml): self._idml = idml @property def xls(self): return self._xls @xls.setter def xls(self, xls): self._xls = xls @property def multilingual_xml(self): return self._multilingual_xml @multilingual_xml.setter def multilingual_xml(self, multilingual_xml): self._multilingual_xml = multilingual_xml @property def php(self): return self._php @php.setter def php(self, php): self._php = php @property def resx(self): return self._resx @resx.setter def resx(self, resx): self._resx = resx @property def json(self): return self._json @json.setter
Apache License 2.0
prajdabre/yanmtt
transformers/src/transformers/convert_graph_to_onnx.py
ensure_valid_input
python
def ensure_valid_input(model, tokens, input_names): print("Ensuring inputs are in correct order") model_args_name = model.forward.__code__.co_varnames model_args, ordered_input_names = [], [] for arg_name in model_args_name[1:]: if arg_name in input_names: ordered_input_names.append(arg_name) model_args.append(tokens[arg_name]) else: print(f"{arg_name} is not present in the generated input list.") break print("Generated inputs order: {}".format(ordered_input_names)) return ordered_input_names, tuple(model_args)
Ensure input are presented in the correct order, without any Non Args: model: The model used to forward the input data tokens: BatchEncoding holding the input data input_names: The name of the inputs Returns: Tuple
https://github.com/prajdabre/yanmtt/blob/4d329c3bcb81ca432d5947bb4673897086ee7f32/transformers/src/transformers/convert_graph_to_onnx.py#L133-L158
from argparse import ArgumentParser from os import listdir, makedirs from pathlib import Path from typing import Dict, List, Optional, Tuple from packaging.version import Version, parse from .file_utils import ModelOutput, is_tf_available, is_torch_available from .pipelines import Pipeline, pipeline from .tokenization_utils import BatchEncoding ORT_QUANTIZE_MINIMUM_VERSION = parse("1.4.0") SUPPORTED_PIPELINES = [ "feature-extraction", "ner", "sentiment-analysis", "fill-mask", "question-answering", "text-generation", "translation_en_to_fr", "translation_en_to_de", "translation_en_to_ro", ] class OnnxConverterArgumentParser(ArgumentParser): def __init__(self): super().__init__("ONNX Converter") self.add_argument( "--pipeline", type=str, choices=SUPPORTED_PIPELINES, default="feature-extraction", ) self.add_argument( "--model", type=str, required=True, help="Model's id or path (ex: bert-base-cased)", ) self.add_argument("--tokenizer", type=str, help="Tokenizer's id or path (ex: bert-base-cased)") self.add_argument( "--framework", type=str, choices=["pt", "tf"], help="Framework for loading the model", ) self.add_argument("--opset", type=int, default=11, help="ONNX opset to use") self.add_argument( "--check-loading", action="store_true", help="Check ONNX is able to load the model", ) self.add_argument( "--use-external-format", action="store_true", help="Allow exporting model >= than 2Gb", ) self.add_argument( "--quantize", action="store_true", help="Quantize the neural network to be run with int8", ) self.add_argument("output") def generate_identified_filename(filename: Path, identifier: str) -> Path: return filename.parent.joinpath(filename.stem + identifier).with_suffix(filename.suffix) def check_onnxruntime_requirements(minimum_version: Version): try: import onnxruntime ort_version = parse(onnxruntime.__version__) if ort_version < ORT_QUANTIZE_MINIMUM_VERSION: raise ImportError( f"We found an older version of onnxruntime ({onnxruntime.__version__}) " f"but we require onnxruntime to be >= {minimum_version} to enable all the conversions options.\n" f"Please update onnxruntime by running `pip install --upgrade onnxruntime`" ) except ImportError: raise ImportError( "onnxruntime doesn't seem to be currently installed. " "Please install the onnxruntime by running `pip install onnxruntime`" " and relaunch the conversion." )
MIT License
alehander92/airtight
airtight/converter.py
PythonConverter.convert_name
python
def convert_name(self, id, ctx, context): return hm_ast.Ident(id)
alexander => Ident("alexander")
https://github.com/alehander92/airtight/blob/3f54b4889ab60288ce69669ebf1c3d284662c7aa/airtight/converter.py#L240-L246
import ast import airtight.hindley_milner_ast as hm_ast from airtight.errors import * class PythonConverter: OPERATOR_MAGIC_FUNCTIONS = { ast.Add: '__add__', ast.Sub: '__substract__', ast.Mult: '__mult__', ast.FloorDiv: '__real_divide__', ast.Div: '__divide__', ast.Mod: '__percent__', ast.Pow: '__power__', ast.Eq: '__equals__', ast.NotEq: '__not_equals__', ast.Lt: '__lt__', ast.LtE: '__lte__', ast.Gt: '__gt__', ast.GtE: '__gte', ast.And: '__and__', ast.Or: '__or__', ast.Not: '__not__', ast.Index: '__index__', ast.Slice: '__slice__' } def __init__(self): pass def convert(self, python_ast): self.type_vars = [] return self.convert_node(python_ast) def _unique_type_var(self): return hm_ast.TypeVariable() def node_dict(self, node): return {field : getattr(node, field) for field in node._fields} def convert_node(self, node, context=None): return getattr(self, 'convert_' + str(node.__class__.__name__).lower())( context=context, **self.node_dict(node)) def convert_module(self, body, context, docstring=None): return self.convert_body(body, context) def convert_assign(self, targets, value, context): if len(targets) == 1 and hasattr(targets[0], 'id'): return hm_ast.Let( targets[0].id, self.convert_node(value), context) else: return hm_ast.Letmany( [t.id for t in targets[0].elts], [self.convert_node(node) for node in value.elts], context) def convert_augassign(self, target, op, value, context): return self.convert_assign( [target], ast.BinOp( target, op, value), context) def convert_str(self, s, context): return hm_ast.aString(s) def convert_num(self, n, context): if type(n) == float: return hm_ast.aFloat(n) else: return hm_ast.anInteger(n) def convert_functiondef(self, name, args, body, decorator_list, returns, context, docstring=None): expected = [] vars = {} for arg in args.args: expected.append(self.convert_annotation(arg.annotation, vars)) expected.append(self.convert_annotation(returns, vars)) result = hm_ast.Let( name, hm_ast.Multi_Lambda( [arg.arg for arg in args.args], self.convert_body(body, None), expected=expected), context) result.a_native = False result.a_vars = [] if decorator_list: if isinstance(decorator_list[0], ast.Name) and decorator_list[0].id == 'native': result.a_native = True if isinstance(decorator_list[-1], ast.Call) and decorator_list[-1].func.id == 'template': result.a_vars = [vars[arg.id] for arg in decorator_list[-1].args] return result def convert_annotation(self, annotation, vars): if isinstance(annotation, ast.Name) and annotation.id.islower(): if annotation.id not in vars: vars[annotation.id] = hm_ast.TypeVariable() return vars[annotation.id] elif isinstance(annotation, ast.Name): return hm_ast.TypeOperator(annotation.id, []) elif isinstance(annotation, ast.BinOp) and isinstance(annotation.op, ast.RShift): if isinstance(annotation.left, ast.Name): left = [annotation.left, annotation.right] else: left = annotation.left.elts + [annotation.right] return hm_ast.Multi_Function([self.convert_annotation(l, vars) for l in left]) elif isinstance(annotation, ast.BinOp) and isinstance(annotation.op, ast.BinOr): left, right = [self.convert_annotation(a, vars) for a in [annotation.left, annotation.right]] return hm_ast.Union(left, right) elif isinstance(annotation, ast.List): return hm_ast.List(self.convert_annotation(annotation.elts[0], vars)) else: return None def convert_expr(self, value, context): return self.convert_node(value, context) def convert_body(self, body, context): if len(body) == 1: converted = self.convert_node(body[0], context) if not isinstance(converted, (hm_ast.Let, hm_ast.Letrec)): return converted elif context is None: converted.body = hm_ast.Ident(converted.v) return converted else: return converted else: current = len(body) - 1 while current >= 0: next_node = self.convert_node(body[current], context) if isinstance(next_node, (hm_ast.Let, hm_ast.Letrec)): context = next_node elif context: context = hm_ast.Body(next_node, context) else: context = next_node current -= 1 return context def convert_return(self, value, context): return self.convert_node(value, context) def convert_binop(self, left, right, op, context): return hm_ast.Multi_Apply( hm_ast.Ident('a' + self.OPERATOR_MAGIC_FUNCTIONS[type(op)]), [self.convert_node(left, context), self.convert_node(right, context)]) def convert_compare(self, ops, left, comparators, context): return hm_ast.Multi_Apply( hm_ast.Ident('a' + self.OPERATOR_MAGIC_FUNCTIONS[type(ops[0])]), [self.convert_node(left, context), self.convert_node(comparators[0], context)]) def convert_if(self, test, body, orelse, context): return hm_ast.If( self.convert_node(test, context), self.convert_body(body, context), self.convert_body(orelse, context)) def convert_for(self, target, body, iter, orelse, context): return hm_ast.For( self.convert_node(iter, context), self.convert_node(target, context), self.convert_body(body, context)) def convert_while(self, test, body, orelse, context): if orelse: raise NotSupportedError("else not supported after while") return hm_ast.While( self.convert_node(test, context), self.convert_body(body, context)) def convert_subscript(self, value, slice, ctx, context): if isinstance(slice, ast.Index): return hm_ast.Multi_Apply( hm_ast.Ident('a' + self.OPERATOR_MAGIC_FUNCTIONS[type(slice)]), [ self.convert_node(value, context), self.convert_node(slice.value)]) else: return hm_ast.Multi_Apply( hm_ast.Ident('a' + self.OPERATOR_MAGIC_FUNCTIONS[type(slice)]), [ self.convert_node(value, context), self.convert_node(slice.lower) if slice.lower else hm_ast.anInteger(0), self.convert_node(slice.upper) if slice.upper else hm_ast.Multi_Apply( self.OPERATOR_MAGIC_FUNCTIONS[ast.Sub], [ hm_ast.Apply(hm_ast.Ident('len'), self.convert_node(value, context)), hm_ast.anInteger(1)])])
MIT License
harmony-one/pyhmy
pyhmy/logging.py
ControlledLogger.warning
python
def warning(self, msg): self._lock.acquire() self.warning_buffer.append(f"[{threading.get_ident()}] " f"{datetime.datetime.utcnow()} : {msg}") self._lock.release()
:param msg: The warning message to log
https://github.com/harmony-one/pyhmy/blob/668d7ef756e4fefe4621f435bb9a3e37cbeb82d9/pyhmy/logging.py#L79-L86
import threading import datetime import gzip import os import logging import logging.handlers class _GZipRotator: def __call__(self, source, dest): os.rename(source, dest) f_in = open(dest, 'rb') f_out = gzip.open("%s.gz" % dest, 'wb') f_out.writelines(f_in) f_out.close() f_in.close() os.remove(dest) class ControlledLogger: def __init__(self, logger_name, log_dir, backup_count=5): if log_dir.endswith('/'): log_dir = log_dir[:-1] log_dir = os.path.realpath(log_dir) os.makedirs(log_dir, exist_ok=True) handler = logging.handlers.TimedRotatingFileHandler(f"{log_dir}/{logger_name}.log", 'midnight', 1, backupCount=backup_count) handler.setFormatter(logging.Formatter('%(levelname)s - %(message)s')) handler.rotator = _GZipRotator() self.filename = handler.baseFilename self.logger = logging.getLogger(logger_name) self.logger.addHandler(handler) self._lock = threading.Lock() self.filepath = f"{log_dir}/{logger_name}.log" self.info_buffer = [] self.debug_buffer = [] self.warning_buffer = [] self.error_buffer = [] def __repr__(self): return f"<ControlledLogger @ {self.filepath} : {self.logger}>" def _clear(self): self.info_buffer.clear() self.debug_buffer.clear() self.warning_buffer.clear() self.error_buffer.clear() def info(self, msg): self._lock.acquire() self.info_buffer.append(f"[{threading.get_ident()}] " f"{datetime.datetime.utcnow()} : {msg}") self._lock.release() def debug(self, msg): self._lock.acquire() self.debug_buffer.append(f"[{threading.get_ident()}] " f"{datetime.datetime.utcnow()} : {msg}") self._lock.release()
MIT License
allenai/ruletaker
utils.py
constantize
python
def constantize(term): return f"'{term.replace(' ', '_')}'"
Formats a term (string) to look like a Constant.
https://github.com/allenai/ruletaker/blob/ccb445d637a2b6ab301d57f67490929845d68866/utils.py#L19-L22
import random from common import Fact, Rule def variablize(term): term = term.replace(" ", "_") return term[0].upper() + term[1:] def predicatize(term): term = term.replace(" ", "_") return term[0].lower() + term[1:]
Apache License 2.0
nuitka/nuitka
tests/benchmarks/pybench/pybench.py
Test.compatible
python
def compatible(self, other): if self.version != other.version: return 0 if self.rounds != other.rounds: return 0 return 1
Return 1/0 depending on whether the test is compatible with the other Test instance or not.
https://github.com/nuitka/nuitka/blob/4c5161620ea8f0f1c93a1d6be79e7e6eda7161d4/tests/benchmarks/pybench/pybench.py#L271-L281
__copyright__ = """\ Copyright (c), 1997-2006, Marc-Andre Lemburg (mal@lemburg.com) Copyright (c), 2000-2006, eGenix.com Software GmbH (info@egenix.com) All Rights Reserved. Permission to use, copy, modify, and distribute this software and its documentation for any purpose and without fee or royalty is hereby granted, provided that the above copyright notice appear in all copies and that both that copyright notice and this permission notice appear in supporting documentation or portions thereof, including modifications, that you make. THE AUTHOR MARC-ANDRE LEMBURG DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE ! """ import sys, time, operator, string, platform from CommandLine import * try: import cPickle pickle = cPickle except ImportError: import pickle __version__ = '2.0' MILLI_SECONDS = 1e3 MICRO_SECONDS = 1e6 PERCENT = 100 LINE = 79 MIN_TEST_RUNTIME = 1e-6 CALIBRATION_RUNS = 20 CALIBRATION_LOOPS = 20 ALLOW_SKIPPING_CALIBRATION = 1 TIMER_TIME_TIME = 'time.time' TIMER_TIME_CLOCK = 'time.clock' TIMER_SYSTIMES_PROCESSTIME = 'systimes.processtime' if sys.platform[:3] == 'win': TIMER_PLATFORM_DEFAULT = TIMER_TIME_CLOCK else: TIMER_PLATFORM_DEFAULT = TIMER_TIME_TIME _debug = 0 def get_timer(timertype): if timertype == TIMER_TIME_TIME: return time.time elif timertype == TIMER_TIME_CLOCK: return time.clock elif timertype == TIMER_SYSTIMES_PROCESSTIME: import systimes return systimes.processtime else: raise TypeError('unknown timer type: %s' % timertype) def get_machine_details(): if _debug: print 'Getting machine details...' buildno, builddate = platform.python_build() python = platform.python_version() try: unichr(100000) except ValueError: unicode = 'UCS2' except NameError: unicode = None else: unicode = 'UCS4' bits, linkage = platform.architecture() return { 'platform': platform.platform(), 'processor': platform.processor(), 'executable': sys.executable, 'implementation': getattr(platform, 'python_implementation', lambda:'n/a')(), 'python': platform.python_version(), 'compiler': platform.python_compiler(), 'buildno': buildno, 'builddate': builddate, 'unicode': unicode, 'bits': bits, } def print_machine_details(d, indent=''): l = ['Machine Details:', ' Platform ID: %s' % d.get('platform', 'n/a'), ' Processor: %s' % d.get('processor', 'n/a'), '', 'Python:', ' Implementation: %s' % d.get('implementation', 'n/a'), ' Executable: %s' % d.get('executable', 'n/a'), ' Version: %s' % d.get('python', 'n/a'), ' Compiler: %s' % d.get('compiler', 'n/a'), ' Bits: %s' % d.get('bits', 'n/a'), ' Build: %s (#%s)' % (d.get('builddate', 'n/a'), d.get('buildno', 'n/a')), ' Unicode: %s' % d.get('unicode', 'n/a'), ] print indent + string.join(l, '\n' + indent) + '\n' class Test: version = 2.0 operations = 1 rounds = 100000 is_a_test = 1 last_timing = (0.0, 0.0, 0.0) warp = 1 calibration_runs = CALIBRATION_RUNS overhead_times = None times = [] timer = TIMER_PLATFORM_DEFAULT def __init__(self, warp=None, calibration_runs=None, timer=None): if warp is not None: self.rounds = int(self.rounds / warp) if self.rounds == 0: raise ValueError('warp factor set too high') self.warp = warp if calibration_runs is not None: if (not ALLOW_SKIPPING_CALIBRATION and calibration_runs < 1): raise ValueError('at least one calibration run is required') self.calibration_runs = calibration_runs if timer is not None: self.timer = timer self.times = [] self.overhead_times = [] self.version = self.version self.operations = self.operations self.rounds = self.rounds def get_timer(self): return get_timer(self.timer)
Apache License 2.0
hanguangbaihuo/sparrow_cloud
sparrow_cloud/apps/schema_command/schemas/generators.py
EndpointEnumerator.should_include_endpoint
python
def should_include_endpoint(self, path, callback): if not is_api_view(callback): return False if callback.cls.schema is None: return False if hasattr(callback, "initkwargs") and 'schema' in callback.initkwargs: if callback.initkwargs['schema'] is None: return False if path.endswith('.{format}') or path.endswith('.{format}/'): return False return True
Return `True` if the given endpoint should be included.
https://github.com/hanguangbaihuo/sparrow_cloud/blob/a06f82ca17d9e50b24c176ee0428dc006d1b4ac0/sparrow_cloud/apps/schema_command/schemas/generators.py#L200-L215
import re from collections import Counter, OrderedDict from importlib import import_module from django.conf import settings from django.contrib.admindocs.views import simplify_regex from django.utils import six from ..schemas.compat import ( URLPattern, URLResolver, coreapi, coreschema, get_original_route ) from rest_framework.utils.model_meta import _get_pk from ..schemas import incompatible_settings from .utils import is_list_view from openapi_codec import generate_swagger_object def common_path(paths): split_paths = [path.strip('/').split('/') for path in paths] s1 = min(split_paths) s2 = max(split_paths) common = s1 for i, c in enumerate(s1): if c != s2[i]: common = s1[:i] break return '/' + '/'.join(common) def get_pk_name(model): meta = model._meta.concrete_model._meta return _get_pk(meta).name def is_api_view(callback): from rest_framework.views import APIView cls = getattr(callback, 'cls', None) return (cls is not None) and issubclass(cls, APIView) INSERT_INTO_COLLISION_FMT = """ Schema Naming Collision. coreapi.Link for URL path {value_url} cannot be inserted into schema. Position conflicts with coreapi.Link for URL path {target_url}. Attemped to insert link with keys: {keys}. Adjust URLs to avoid naming collision or override `SchemaGenerator.get_keys()` to customise schema structure. """ class LinkNode(OrderedDict): def __init__(self): self.links = [] self.methods_counter = Counter() super(LinkNode, self).__init__() def get_available_key(self, preferred_key): if preferred_key not in self: return preferred_key while True: current_val = self.methods_counter[preferred_key] self.methods_counter[preferred_key] += 1 key = '{}_{}'.format(preferred_key, current_val) if key not in self: return key def insert_into(target, keys, value): for key in keys[:-1]: if key not in target: target[key] = LinkNode() target = target[key] try: target.links.append((keys[-1], value)) except TypeError: msg = INSERT_INTO_COLLISION_FMT.format( value_url=value.url, target_url=target.url, keys=keys ) raise ValueError(msg) def distribute_links(obj): for key, value in obj.items(): distribute_links(value) for preferred_key, link in obj.links: key = obj.get_available_key(preferred_key) obj[key] = link def is_custom_action(action): return action not in { 'retrieve', 'list', 'create', 'update', 'partial_update', 'destroy' } def endpoint_ordering(endpoint): path, method, callback = endpoint method_priority = { 'GET': 0, 'POST': 1, 'PUT': 2, 'PATCH': 3, 'DELETE': 4 }.get(method, 5) return (path, method_priority) _PATH_PARAMETER_COMPONENT_RE = re.compile( r'<(?:(?P<converter>[^>:]+):)?(?P<parameter>\w+)>' ) class EndpointEnumerator(object): def __init__(self, patterns=None, urlconf=None): if patterns is None: if urlconf is None: urlconf = settings.ROOT_URLCONF if isinstance(urlconf, six.string_types): urls = import_module(urlconf) else: urls = urlconf patterns = urls.urlpatterns self.patterns = patterns def get_api_endpoints(self, patterns=None, prefix=''): if patterns is None: patterns = self.patterns api_endpoints = [] for pattern in patterns: path_regex = prefix + get_original_route(pattern) if isinstance(pattern, URLPattern): path = self.get_path_from_regex(path_regex) callback = pattern.callback if self.should_include_endpoint(path, callback): for method in self.get_allowed_methods(callback): endpoint = (path, method, callback) api_endpoints.append(endpoint) elif isinstance(pattern, URLResolver): nested_endpoints = self.get_api_endpoints( patterns=pattern.url_patterns, prefix=path_regex ) api_endpoints.extend(nested_endpoints) api_endpoints = sorted(api_endpoints, key=endpoint_ordering) return api_endpoints def get_path_from_regex(self, path_regex): path = simplify_regex(path_regex) path = re.sub(_PATH_PARAMETER_COMPONENT_RE, r'{\g<parameter>}', path) return path
MIT License
microsoft/seed-encoder
utils/eval_mrr_doc.py
search_knn
python
def search_knn(xq, xb, k, distance_type=faiss.METRIC_L2): nq, d = xq.shape nb, d2 = xb.shape assert d == d2 I = np.empty((nq, k), dtype='int64') D = np.empty((nq, k), dtype='float32') if distance_type == faiss.METRIC_L2: heaps = faiss.float_maxheap_array_t() heaps.k = k heaps.nh = nq heaps.val = faiss.swig_ptr(D) heaps.ids = faiss.swig_ptr(I) faiss.knn_L2sqr( faiss.swig_ptr(xq), faiss.swig_ptr(xb), d, nq, nb, heaps ) elif distance_type == faiss.METRIC_INNER_PRODUCT: heaps = faiss.float_minheap_array_t() heaps.k = k heaps.nh = nq heaps.val = faiss.swig_ptr(D) heaps.ids = faiss.swig_ptr(I) faiss.knn_inner_product( faiss.swig_ptr(xq), faiss.swig_ptr(xb), d, nq, nb, heaps ) return D, I
wrapper around the faiss knn functions without index
https://github.com/microsoft/seed-encoder/blob/ececaa9cd5c9a8bc93184eb5a45ec38d9e2a1f95/utils/eval_mrr_doc.py#L67-L96
import sys sys.path += ["../"] from utils.msmarco_eval import quality_checks_qids, compute_metrics, load_reference import torch.distributed as dist import gzip import faiss import numpy as np from data.process_fn import dual_process_fn_doc from tqdm import tqdm import torch import os from utils.util import concat_key, is_first_worker, all_gather, StreamingDataset from torch.utils.data import DataLoader def embedding_inference(args, path, model, fn, bz, num_workers=2, is_query=True): f = open(path, encoding="utf-8") model = model.module if hasattr(model, "module") else model sds = StreamingDataset(f, fn) loader = DataLoader(sds, batch_size=bz, num_workers=1) emb_list, id_list = [], [] model.eval() for i, batch in tqdm(enumerate(loader), desc="Eval", disable=args.local_rank not in [-1, 0]): batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {"input_ids": batch[0].long( ), "attention_mask": batch[1].long()} idx = batch[3].long() if is_query: embs = model.query_emb(**inputs) else: embs = model.body_emb(**inputs) if len(embs.shape) == 3: B, C, E = embs.shape embs = embs.view(B*C, -1) idx = idx.repeat_interleave(C) assert embs.shape[0] == idx.shape[0] emb_list.append(embs.detach().cpu().numpy()) id_list.append(idx.detach().cpu().numpy()) f.close() emb_arr = np.concatenate(emb_list, axis=0) id_arr = np.concatenate(id_list, axis=0) return emb_arr, id_arr def parse_top_dev(input_path, qid_col, pid_col): ret = {} with open(input_path, encoding="utf-8") as f: for line in f: cells = line.strip().split(" ") qid = int(cells[qid_col]) pid = int(cells[pid_col].strip('D')) if qid not in ret: ret[qid] = [] ret[qid].append(pid) return ret
MIT License
brython-dev/brython
www/src/Lib/contextlib.py
_BaseExitStack.enter_context
python
def enter_context(self, cm): _cm_type = type(cm) _exit = _cm_type.__exit__ result = _cm_type.__enter__(cm) self._push_cm_exit(cm, _exit) return result
Enters the supplied context manager. If successful, also pushes its __exit__ method as a callback and returns the result of the __enter__ method.
https://github.com/brython-dev/brython/blob/33aeaab551f1b73209326c5a0aecf98642d4c126/www/src/Lib/contextlib.py#L482-L494
import abc import sys import _collections_abc from collections import deque from functools import wraps from types import MethodType, GenericAlias __all__ = ["asynccontextmanager", "contextmanager", "closing", "nullcontext", "AbstractContextManager", "AbstractAsyncContextManager", "AsyncExitStack", "ContextDecorator", "ExitStack", "redirect_stdout", "redirect_stderr", "suppress", "aclosing"] class AbstractContextManager(abc.ABC): __class_getitem__ = classmethod(GenericAlias) def __enter__(self): return self @abc.abstractmethod def __exit__(self, exc_type, exc_value, traceback): return None @classmethod def __subclasshook__(cls, C): if cls is AbstractContextManager: return _collections_abc._check_methods(C, "__enter__", "__exit__") return NotImplemented class AbstractAsyncContextManager(abc.ABC): __class_getitem__ = classmethod(GenericAlias) async def __aenter__(self): return self @abc.abstractmethod async def __aexit__(self, exc_type, exc_value, traceback): return None @classmethod def __subclasshook__(cls, C): if cls is AbstractAsyncContextManager: return _collections_abc._check_methods(C, "__aenter__", "__aexit__") return NotImplemented class ContextDecorator(object): def _recreate_cm(self): return self def __call__(self, func): @wraps(func) def inner(*args, **kwds): with self._recreate_cm(): return func(*args, **kwds) return inner class AsyncContextDecorator(object): def _recreate_cm(self): return self def __call__(self, func): @wraps(func) async def inner(*args, **kwds): async with self._recreate_cm(): return await func(*args, **kwds) return inner class _GeneratorContextManagerBase: def __init__(self, func, args, kwds): self.gen = func(*args, **kwds) self.func, self.args, self.kwds = func, args, kwds doc = getattr(func, "__doc__", None) if doc is None: doc = type(self).__doc__ self.__doc__ = doc def _recreate_cm(self): return self.__class__(self.func, self.args, self.kwds) class _GeneratorContextManager( _GeneratorContextManagerBase, AbstractContextManager, ContextDecorator, ): def __enter__(self): del self.args, self.kwds, self.func try: return next(self.gen) except StopIteration: raise RuntimeError("generator didn't yield") from None def __exit__(self, typ, value, traceback): if typ is None: try: next(self.gen) except StopIteration: return False else: raise RuntimeError("generator didn't stop") else: if value is None: value = typ() try: self.gen.throw(typ, value, traceback) except StopIteration as exc: return exc is not value except RuntimeError as exc: if exc is value: return False if ( isinstance(value, StopIteration) and exc.__cause__ is value ): return False raise except BaseException as exc: if exc is not value: raise return False raise RuntimeError("generator didn't stop after throw()") class _AsyncGeneratorContextManager( _GeneratorContextManagerBase, AbstractAsyncContextManager, AsyncContextDecorator, ): async def __aenter__(self): del self.args, self.kwds, self.func try: return await anext(self.gen) except StopAsyncIteration: raise RuntimeError("generator didn't yield") from None async def __aexit__(self, typ, value, traceback): if typ is None: try: await anext(self.gen) except StopAsyncIteration: return False else: raise RuntimeError("generator didn't stop") else: if value is None: value = typ() try: await self.gen.athrow(typ, value, traceback) except StopAsyncIteration as exc: return exc is not value except RuntimeError as exc: if exc is value: return False if ( isinstance(value, (StopIteration, StopAsyncIteration)) and exc.__cause__ is value ): return False raise except BaseException as exc: if exc is not value: raise return False raise RuntimeError("generator didn't stop after athrow()") def contextmanager(func): @wraps(func) def helper(*args, **kwds): return _GeneratorContextManager(func, args, kwds) return helper def asynccontextmanager(func): @wraps(func) def helper(*args, **kwds): return _AsyncGeneratorContextManager(func, args, kwds) return helper class closing(AbstractContextManager): def __init__(self, thing): self.thing = thing def __enter__(self): return self.thing def __exit__(self, *exc_info): self.thing.close() class aclosing(AbstractAsyncContextManager): def __init__(self, thing): self.thing = thing async def __aenter__(self): return self.thing async def __aexit__(self, *exc_info): await self.thing.aclose() class _RedirectStream(AbstractContextManager): _stream = None def __init__(self, new_target): self._new_target = new_target self._old_targets = [] def __enter__(self): self._old_targets.append(getattr(sys, self._stream)) setattr(sys, self._stream, self._new_target) return self._new_target def __exit__(self, exctype, excinst, exctb): setattr(sys, self._stream, self._old_targets.pop()) class redirect_stdout(_RedirectStream): _stream = "stdout" class redirect_stderr(_RedirectStream): _stream = "stderr" class suppress(AbstractContextManager): def __init__(self, *exceptions): self._exceptions = exceptions def __enter__(self): pass def __exit__(self, exctype, excinst, exctb): return exctype is not None and issubclass(exctype, self._exceptions) class _BaseExitStack: @staticmethod def _create_exit_wrapper(cm, cm_exit): return MethodType(cm_exit, cm) @staticmethod def _create_cb_wrapper(callback, /, *args, **kwds): def _exit_wrapper(exc_type, exc, tb): callback(*args, **kwds) return _exit_wrapper def __init__(self): self._exit_callbacks = deque() def pop_all(self): new_stack = type(self)() new_stack._exit_callbacks = self._exit_callbacks self._exit_callbacks = deque() return new_stack def push(self, exit): _cb_type = type(exit) try: exit_method = _cb_type.__exit__ except AttributeError: self._push_exit_callback(exit) else: self._push_cm_exit(exit, exit_method) return exit
BSD 3-Clause New or Revised License
habout632/stargan2
model_v1.py
Discriminator.__init__
python
def __init__(self, repeat_num=5, channel_multiplier=32, num_domains=2, dimension=1): super(Discriminator, self).__init__() curr_dim = channel_multiplier self.conv1x1 = nn.Conv2d(3, channel_multiplier, kernel_size=1) self.resblk1 = PreActDBlock(dim_in=curr_dim, dim_out=curr_dim * 2) self.avgpool1 = nn.AvgPool2d(2) curr_dim = curr_dim * 2 self.resblk2 = PreActDBlock(dim_in=curr_dim, dim_out=curr_dim * 2) self.avgpool2 = nn.AvgPool2d(2) curr_dim = curr_dim * 2 self.resblk3 = PreActDBlock(dim_in=curr_dim, dim_out=curr_dim * 2) self.avgpool3 = nn.AvgPool2d(2) curr_dim = curr_dim * 2 self.resblk4 = PreActDBlock(dim_in=curr_dim, dim_out=curr_dim * 2) self.avgpool4 = nn.AvgPool2d(2) curr_dim = curr_dim * 2 self.resblk5 = PreActDBlock(dim_in=curr_dim, dim_out=curr_dim * 2) self.avgpool5 = nn.AvgPool2d(2) curr_dim = curr_dim * 2 self.resblk6 = PreActDBlock(dim_in=curr_dim, dim_out=curr_dim) self.avgpool6 = nn.AvgPool2d(2) self.curr_dim = curr_dim self.lrelu1 = nn.LeakyReLU(0.01) self.conv4x4 = nn.Conv2d(curr_dim, curr_dim, kernel_size=4) self.lrelu2 = nn.LeakyReLU(0.01) self.outputs = nn.ModuleList([nn.Linear(curr_dim, 1) for _ in range(num_domains)])
:param repeat_num: :param channel_multiplier: 16 for style encoder, 32 for discriminator :param num_domains: :param dimension: Style Encoder 64 for style code, Discriminator 1 for real/fake classification
https://github.com/habout632/stargan2/blob/c4c87b274ccf652f88771a71ab702de798594c1f/model_v1.py#L692-L751
import torch.nn as nn import torch import torch.nn.functional as F from math import sqrt class PreActDBlock(nn.Module): expansion = 1 def __init__(self, dim_in, dim_out, stride=1, bias=True): super(PreActDBlock, self).__init__() self.conv1 = nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=stride, padding=1, bias=bias) self.conv2 = nn.Conv2d(dim_out, dim_out, kernel_size=3, stride=1, padding=1, bias=bias) if dim_in != self.expansion * dim_out: self.shortcut = nn.Sequential( nn.Conv2d(dim_in, self.expansion * dim_out, kernel_size=1, stride=stride, bias=False) ) def forward(self, x): out = F.leaky_relu(x, 0.2) shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x out = self.conv1(out) out = self.conv2(F.leaky_relu(out, 0.2)) out += shortcut return out class PreActEBlock(nn.Module): expansion = 1 def __init__(self, dim_in, dim_out, stride=1, bias=True): super(PreActEBlock, self).__init__() self.conv1 = nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=stride, padding=1, bias=bias) self.conv2 = nn.Conv2d(dim_out, dim_out, kernel_size=3, stride=1, padding=1, bias=bias) if dim_in != self.expansion * dim_out: self.shortcut = nn.Sequential( nn.Conv2d(dim_in, self.expansion * dim_out, kernel_size=1, stride=stride, bias=False) ) def forward(self, x): out = F.relu(x) shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x out = self.conv1(out) out = self.conv2(F.relu(out)) out += shortcut return out class PreActDownsampleBlock(nn.Module): expansion = 1 def __init__(self, dim_in, dim_out, stride=1, bias=True): super(PreActDownsampleBlock, self).__init__() self.in1 = nn.InstanceNorm2d(dim_in) self.conv1 = nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=stride, padding=1, bias=bias) self.in2 = nn.InstanceNorm2d(dim_out) self.conv2 = nn.Conv2d(dim_out, dim_out, kernel_size=3, stride=1, padding=1, bias=bias) if dim_in != self.expansion * dim_out: self.shortcut = nn.Sequential( nn.Conv2d(dim_in, self.expansion * dim_out, kernel_size=1, stride=stride, bias=bias) ) def forward(self, x): out = F.relu(self.in1(x)) shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x out = self.conv1(out) out = self.conv2(F.relu(self.in2(out))) out += shortcut return out class PreActInterBlock(nn.Module): expansion = 1 def __init__(self, dim_in, dim_out, stride=1, bias=True): super(PreActInterBlock, self).__init__() self.adain1 = AdaptiveInstanceNorm(in_channel=dim_in, style_dim=64, is_upsample=False) self.conv1 = nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=stride, padding=1, bias=bias) self.adain2 = AdaptiveInstanceNorm(in_channel=dim_in, style_dim=64, is_upsample=False) self.conv2 = nn.Conv2d(dim_out, dim_out, kernel_size=3, stride=1, padding=1, bias=bias) if dim_in != self.expansion * dim_out: self.shortcut = nn.Sequential( nn.Conv2d(dim_in, self.expansion * dim_out, kernel_size=1, stride=stride, bias=bias) ) def forward(self, x, style_code): out = F.relu(self.adain1(x, style_code)) shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x out = self.conv1(out) out = self.conv2(F.relu(self.adain2(out, style_code))) out += shortcut return out class PreActUpsampleBlock(nn.Module): expansion = 1 def __init__(self, dim_in, dim_out, stride=1, bias=True): super(PreActUpsampleBlock, self).__init__() self.adain1 = AdaptiveInstanceNorm(dim_in, 64, False) self.conv1 = nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=stride, padding=1, bias=bias) self.adain2 = AdaptiveInstanceNorm(dim_in, 64, True) self.conv2 = nn.Conv2d(dim_out, dim_out, kernel_size=3, stride=1, padding=1, bias=bias) if dim_in != self.expansion * dim_out: self.shortcut = nn.Sequential( nn.Conv2d(dim_in, self.expansion * dim_out, kernel_size=1, stride=stride, bias=bias) ) def forward(self, x, style_code): out = F.relu(self.adain1(x, style_code)) shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x out = self.conv1(out) out = self.conv2(F.relu(self.adain2(out, style_code))) out += shortcut return out def calc_mean_std(feat, eps=1e-5): size = feat.size() N, C = size[:2] feat_var = feat.view(N, C, -1).var(dim=2) + eps feat_std = feat_var.sqrt().view(N, C, 1, 1) feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1) return feat_mean, feat_std def adaptive_instance_normalization(content_feat, style_feat): size = content_feat.size() style_mean, style_std = calc_mean_std(style_feat) content_mean, content_std = calc_mean_std(content_feat) normalized_feat = (content_feat - content_mean.expand( size)) / content_std.expand(size) return normalized_feat * style_std.expand(size) + style_mean.expand(size) class EqualLR: def __init__(self, name): self.name = name def compute_weight(self, module): weight = getattr(module, self.name + '_orig') fan_in = weight.data.size(1) * weight.data[0][0].numel() return weight * sqrt(2 / fan_in) @staticmethod def apply(module, name): fn = EqualLR(name) weight = getattr(module, name) del module._parameters[name] module.register_parameter(name + '_orig', nn.Parameter(weight.data)) module.register_forward_pre_hook(fn) return fn def __call__(self, module, input): weight = self.compute_weight(module) setattr(module, self.name, weight) def equal_lr(module, name='weight'): EqualLR.apply(module, name) return module class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim): super().__init__() linear = nn.Linear(in_dim, out_dim) linear.weight.data.normal_() linear.bias.data.zero_() self.linear = equal_lr(linear) def forward(self, input): return self.linear(input) class AdaptiveInstanceNorm(nn.Module): def __init__(self, in_channel, style_dim, is_upsample=False): super().__init__() self.norm = nn.InstanceNorm2d(in_channel) if not is_upsample: self.style = EqualLinear(style_dim, in_channel * 2) else: self.style = EqualLinear(style_dim, in_channel) self.style.linear.bias.data[:in_channel] = 1 self.style.linear.bias.data[in_channel:] = 0 def forward(self, content, style): style = self.style(style).unsqueeze(2).unsqueeze(3) gamma, beta = style.chunk(2, 1) out = self.norm(content) out = gamma * out + beta return out class Generator(nn.Module): def __init__(self, conv_dim=64): super(Generator, self).__init__() self.input_conv = nn.Conv2d(3, 32, kernel_size=1) curr_dim = conv_dim self.resblk1 = PreActDownsampleBlock(dim_in=curr_dim, dim_out=curr_dim * 2) self.avgpool1 = nn.AvgPool2d(2) curr_dim = curr_dim * 2 self.resblk2 = PreActDownsampleBlock(dim_in=curr_dim, dim_out=curr_dim * 2) self.avgpool2 = nn.AvgPool2d(2) curr_dim = curr_dim * 2 self.resblk3 = PreActDownsampleBlock(dim_in=curr_dim, dim_out=curr_dim * 2) self.avgpool3 = nn.AvgPool2d(2) curr_dim = curr_dim * 2 self.resblk4 = PreActDownsampleBlock(dim_in=curr_dim, dim_out=curr_dim * 2) self.avgpool4 = nn.AvgPool2d(2) curr_dim = curr_dim * 2 self.resblk1_bottleneck = PreActDownsampleBlock(dim_in=curr_dim, dim_out=curr_dim) self.resblk2_bottleneck = PreActDownsampleBlock(dim_in=curr_dim, dim_out=curr_dim) self.resblk3_bottleneck = PreActInterBlock(dim_in=curr_dim, dim_out=curr_dim) self.resblk4_bottleneck = PreActInterBlock(dim_in=curr_dim, dim_out=curr_dim) self.resblk1_upsample = PreActUpsampleBlock(dim_in=curr_dim, dim_out=curr_dim // 2) curr_dim = curr_dim // 2 self.resblk2_upsample = PreActUpsampleBlock(dim_in=curr_dim, dim_out=curr_dim // 2) curr_dim = curr_dim // 2 self.resblk3_upsample = PreActUpsampleBlock(dim_in=curr_dim, dim_out=curr_dim // 2) curr_dim = curr_dim // 2 self.resblk4_upsample = PreActUpsampleBlock(dim_in=curr_dim, dim_out=curr_dim // 2) curr_dim = curr_dim // 2 self.output_conv = nn.Conv2d(curr_dim, 3, kernel_size=1) def forward(self, x, style_code): output = self.input_conv(x) output = self.resblk1(output) output = self.avgpool1(output) output = self.resblk2(output) output = self.avgpool2(output) output = self.resblk3(output) output = self.avgpool3(output) output = self.resblk4(output) output = self.avgpool4(output) output = self.resblk1_bottleneck(output) output = self.resblk2_bottleneck(output) output = self.resblk3_bottleneck(output, style_code) output = self.resblk4_bottleneck(output, style_code) output = F.interpolate(output, scale_factor=2, mode='nearest') output = self.resblk1_upsample(output, style_code) output = F.interpolate(output, scale_factor=2, mode='nearest') output = self.resblk2_upsample(output, style_code) output = F.interpolate(output, scale_factor=2, mode='nearest') output = self.resblk3_upsample(output, style_code) output = F.interpolate(output, scale_factor=2, mode='nearest') output = self.resblk4_upsample(output, style_code) return self.output_conv(output) class Discriminator(nn.Module):
MIT License
monkeython/loremipsum
loremipsum/tests/testcases.py
TestSerializationScheme._dump
python
def _dump(self, url, **args): self._sample.dump(url, **args)
Apply the Sample.dump method.
https://github.com/monkeython/loremipsum/blob/62c699f46ffa4b360a8f8031ef509c4be82114a4/loremipsum/tests/testcases.py#L50-L52
import types import unittest import loremipsum class TestPackagesPlugs(unittest.TestCase): def test_get(self): module = self._package.get(self._plug_name) Sample = loremipsum.generator.Sample self.assertIsInstance(module, (types.ModuleType, Sample)) attrs = [getattr(self._package, attr) for attr in dir(self._package)] self.assertIn(module, attrs) def test_set_default(self): module = self._package.get(self._plug_name) self._package.set_default(self._plug_name) self.assertIs(module, self._package.DEFAULT) def test_registered(self): _REGISTERED = loremipsum.plugs._REGISTERED registered = self._package.registered() self.assertIsInstance(registered, dict) self.assertIn(self._plug_name, registered) self.assertEqual(registered, _REGISTERED[self._package.__name__]) self.assertIsNot(registered, _REGISTERED[self._package.__name__]) def test_DEFAULT(self): classes = (types.ModuleType, loremipsum.generator.Sample) self.assertIs(self._package.DEFAULT, self._package.get(self._DEFAULT)) self.assertIsInstance(self._package.DEFAULT, classes) class TestSerializationScheme(unittest.TestCase): def test_dump_load(self): for url, args in self._urls.items(): self._dump(url, **args) self._load(url, **args) self._remove(url, **args)
BSD 3-Clause New or Revised License
ryanwang520/aiothrift
aiothrift/pool.py
ThriftPool.acquire
python
async def acquire(self): if self.closed: raise PoolClosedError("Pool is closed") async with self._cond: if self.closed: raise PoolClosedError("Pool is closed") while True: await self.fill_free(override_min=True) if self.freesize: conn = self._pool.popleft() assert not conn.closed, conn assert conn not in self._used, (conn, self._used) self._used.add(conn) return conn else: await self._cond.wait()
Acquires a connection from free pool. Creates new connection if needed.
https://github.com/ryanwang520/aiothrift/blob/5bc4aae8bfa98f1525f1db71df7fbe637301e3a7/aiothrift/pool.py#L144-L166
import asyncio import contextvars as cv import collections import functools from .connection import create_connection from .log import logger from .errors import PoolClosedError acquired_connection = cv.ContextVar("acquired_connection") async def create_pool( service, address=("127.0.0.1", 6000), *, minsize=1, maxsize=10, timeout=None, framed=False ): pool = ThriftPool( service, address, minsize=minsize, maxsize=maxsize, timeout=timeout, framed=framed, ) try: await pool.fill_free(override_min=False) except Exception: pool.close() raise return pool class ThriftPool: def __init__(self, service, address, *, minsize, maxsize, timeout=None, framed=False): assert isinstance(minsize, int) and minsize >= 0, ( "minsize must be int >= 0", minsize, type(minsize), ) assert maxsize is not None, "Arbitrary pool size is disallowed." assert isinstance(maxsize, int) and maxsize > 0, ( "maxsize must be int > 0", maxsize, type(maxsize), ) assert minsize <= maxsize, ("Invalid pool min/max sizes", minsize, maxsize) self._address = address self.minsize = minsize self.maxsize = maxsize self._pool = collections.deque(maxlen=maxsize) self._used = set() self._acquiring = 0 self._cond = asyncio.Condition() self._service = service self._timeout = timeout self._framed = framed self.closed = False self._release_tasks = set() self._init_rpc_apis() def _init_rpc_apis(self): for api in self._service.thrift_services: if not hasattr(self, api): setattr(self, api, functools.partial(self.execute, api)) else: logger.warning( "api name {0} is conflicted with connection attribute " '{0}, while you can still call this api by `execute("{0}")`'.format( api ) ) async def execute(self, cmd, *args, **kwargs): conn = await self.acquire() try: return await conn.execute(cmd, *args, **kwargs) finally: self.release(conn) @property def size(self): return self.freesize + len(self._used) + self._acquiring @property def freesize(self): return len(self._pool) async def clear(self): while self._pool: conn = self._pool.popleft() conn.close() def close(self): self.closed = True conn_num = 0 while self._pool: conn = self._pool.popleft() conn.close() conn_num += 1 for conn in self._used: conn.close() conn_num += 1 logger.debug("Closed %d connections", conn_num) async def wait_closed(self): for task in self._release_tasks: await asyncio.shield(task)
MIT License
arubacloud/pyarubacloud
ArubaCloud/base/__init__.py
Request.__init__
python
def __init__(self, logger=None, Username=str(), Password=str(), SessionId=None, ApplicationId=None, RequestId=None, uri=None): super(Request, self).__init__() self.logger = logger self.Username = Username self.Password = Password self.SessionId = SessionId if SessionId is not None else self.__class__.__name__ self.ApplicationId = ApplicationId if ApplicationId is not None else self.__class__.__name__ self.RequestId = RequestId if RequestId is not None else self.__class__.__name__ self.uri = uri
:type logger: ArubaLog :type Username: str :type Password: str :type SessionId: str :type ApplicationId: str :type RequestId: str :type uri: str :param logger: Logger object :param Username: ArubaCloud Service Login Username :param Password: ArubaCloud Service Login Password :param SessionId: Can be Null, otherwise the current SessionId :param ApplicationId: Same as RequestId :param RequestId: The name of the Request :param uri: WCF base URI
https://github.com/arubacloud/pyarubacloud/blob/5db264a611c78360d6a661e2dc41cdb6794ecea2/ArubaCloud/base/__init__.py#L68-L93
import json from abc import ABCMeta, abstractmethod from ArubaCloud.helper import Http import jsonpickle class JsonInterfaceBase(object): __metaclass__ = ABCMeta def __init__(self): pass def gen_def_json_scheme(self, req, method_fields=None): json_dict = dict( ApplicationId=req, RequestId=req, SessionId=req, Password=self.auth.password, Username=self.auth.username ) if method_fields is not None: json_dict.update(method_fields) self.logger.debug(json.dumps(json_dict)) return json.dumps(json_dict) def call_method_post(self, method, json_scheme, debug=False): url = '{}/{}'.format(self.wcf_baseurl, method) headers = {'Content-Type': 'application/json', 'Content-Length': str(len(json_scheme))} response = Http.post(url=url, data=json_scheme, headers=headers) parsed_response = json.loads(response.content.decode('utf-8')) if response.status_code != 200: from ArubaCloud.base.Errors import MalformedJsonRequest raise MalformedJsonRequest("Request: {}, Status Code: {}".format(json_scheme, response.status_code)) if parsed_response['Success'] is False: from ArubaCloud.base.Errors import RequestFailed raise RequestFailed("Request: {}, Response: {}".format(json_scheme, parsed_response)) if debug is True: msg = "Response Message: {}\nHTTP Status Code: {}".format(parsed_response, response.status_code) self.logger.debug(msg) print(msg) return parsed_response class IRequest(object): __metaclass__ = ABCMeta def __init__(self): pass @abstractmethod def _commit(self): raise NotImplementedError @abstractmethod def commit(self): raise NotImplementedError class Request(IRequest):
Apache License 2.0
michaelfranzl/gerbil
gerbil.py
Gerbil.stream
python
def stream(self, lines): self._load_lines_into_buffer(lines) self.job_run()
A more convenient alias for `write(lines)` and `job_run()` @param lines A string of G-Code commands. Each command is \n separated.
https://github.com/michaelfranzl/gerbil/blob/e6828fd5a682ec36970de83d38a0bea46b765d8d/gerbil.py#L629-L637
import logging import time import re import threading import atexit import os import collections from queue import Queue from .interface import Interface from .callbackloghandler import CallbackLogHandler from gcode_machine.gcode_machine import GcodeMachine class Gerbil: __version__ = "0.5.0" def __init__(self, callback, name="mygrbl"): self.name = name self.cmode = None self.cmpos = (0, 0, 0) self.cwpos = (0, 0, 0) self.gps = [ "0", "54", "17", "21", "90", "94", "0", "0", "5", "0", "99", "0", ] self.poll_interval = 0.2 self.settings = { 130: { "val": "1000", "cmt": "width" }, 131: { "val": "1000", "cmt": "height" } } self.settings_hash = { "G54": (-600, -300, 0), "G55": (-400, -300, 0), "G56": (-200, -300, 0), "G57": (-600, -600, 0), "G58": (-400, -600, 0), "G59": (-200, -600, 0), "G28": (0, 0, 0), "G30": (0, 0, 0), "G92": (0, 0, 0), "TLO": 0, "PRB": (0, 0, 0), } self.gcode_parser_state_requested = False self.hash_state_requested = False self.logger = logging.getLogger("gerbil") self.logger.setLevel(5) self.logger.propagate = False self.target = "firmware" self.connected = False self.preprocessor = GcodeMachine() self.preprocessor.callback = self._preprocessor_callback self.travel_dist_buffer = {} self.travel_dist_current = {} self.is_standstill = False self._ifacepath = None self._last_setting_number = 132 self._last_cmode = None self._last_cmpos = (0, 0, 0) self._last_cwpos = (0, 0, 0) self._standstill_watchdog_increment = 0 self._rx_buffer_size = 128 self._rx_buffer_fill = [] self._rx_buffer_backlog = [] self._rx_buffer_backlog_line_number = [] self._rx_buffer_fill_percent = 0 self._current_line = "" self._current_line_sent = True self._streaming_mode = None self._wait_empty_buffer = False self.streaming_complete = True self.job_finished = True self._streaming_src_end_reached = True self._streaming_enabled = True self._error = False self._incremental_streaming = False self._hash_state_sent = False self.buffer = [] self.buffer_size = 0 self._current_line_nr = 0 self.buffer_stash = [] self.buffer_size_stash = 0 self._current_line_nr_stash = 0 self._poll_keep_alive = False self._iface_read_do = False self._thread_polling = None self._thread_read_iface = None self._iface = None self._queue = Queue() self._loghandler = None self._counter = 0 self._callback = callback atexit.register(self.disconnect) self._callback("on_settings_downloaded", self.settings) self._callback("on_hash_stateupdate", self.settings_hash) self.preprocessor.cs_offsets = self.settings_hash self._callback("on_gcode_parser_stateupdate", self.gps) def setup_logging(self, handler=None): if handler: self._loghandler = handler else: lh = CallbackLogHandler() self._loghandler = lh self.logger.addHandler(self._loghandler) self._loghandler.callback = self._callback def cnect(self, path=None, baudrate=115200): if path == None or path.strip() == "": return else: self._ifacepath = path if self._iface == None: self.logger.debug("{}: Setting up interface on {}".format(self.name, self._ifacepath)) self._iface = Interface("iface_" + self.name, self._ifacepath, baudrate) self._iface.start(self._queue) else: self.logger.info("{}: Cannot start another interface. There is already an interface {}.".format(self.name, self._iface)) self._iface_read_do = True self._thread_read_iface = threading.Thread(target=self._onread) self._thread_read_iface.start() self.softreset() def disconnect(self): if self.is_connected() == False: return self.poll_stop() self._iface.stop() self._iface = None self.logger.debug("{}: Please wait until reading thread has joined...".format(self.name)) self._iface_read_do = False self._queue.put("dummy_msg_for_joining_thread") self._thread_read_iface.join() self.logger.debug("{}: Reading thread successfully joined.".format(self.name)) self.connected = False self._callback("on_disconnected") def softreset(self): self._iface.write("\x18") self.update_preprocessor_position() def abort(self): if self.is_connected() == False: return self.softreset() def hold(self): if self.is_connected() == False: return self._iface_write("!") def resume(self): if self.is_connected() == False: return self._iface_write("~") def killalarm(self): self._iface_write("$X\n") def homing(self): self._iface_write("$H\n") def poll_start(self): if self.is_connected() == False: return self._poll_keep_alive = True self._last_cmode = None if self._thread_polling == None: self._thread_polling = threading.Thread(target=self._poll_state) self._thread_polling.start() self.logger.debug("{}: Polling thread started".format(self.name)) else: self.logger.debug("{}: Polling thread already running...".format(self.name)) def poll_stop(self): if self.is_connected() == False: return if self._thread_polling != None: self._poll_keep_alive = False self.logger.debug("{}: Please wait until polling thread has joined...".format(self.name)) self._thread_polling.join() self.logger.debug("{}: Polling thread has successfully joined...".format(self.name)) else: self.logger.debug("{}: Cannot start a polling thread. Another one is already running.".format(self.name)) self._thread_polling = None def set_feed_override(self, val): self.preprocessor.do_feed_override = val def request_feed(self, requested_feed): self.preprocessor.request_feed = float(requested_feed) @property def incremental_streaming(self): return self._incremental_streaming @incremental_streaming.setter def incremental_streaming(self, onoff): self._incremental_streaming = onoff if self._incremental_streaming == True: self._wait_empty_buffer = True self.logger.debug("{}: Incremental streaming set to {}".format(self.name, self._incremental_streaming)) def send_immediately(self, line): bytes_in_firmware_buffer = sum(self._rx_buffer_fill) if bytes_in_firmware_buffer > 0: self.logger.error("Firmware buffer has {:d} unprocessed bytes in it. Will not send {}".format(bytes_in_firmware_buffer, line)) return if self.cmode == "Alarm": self.logger.error("Grbl is in ALARM state. Will not send {}.".format(line)) return if self.cmode == "Hold": self.logger.error("Grbl is in HOLD state. Will not send {}.".format(line)) return if "$#" in line: self.hash_state_requested = True return self.preprocessor.set_line(line) self.preprocessor.strip() self.preprocessor.tidy() self.preprocessor.parse_state() self.preprocessor.override_feed() self._iface_write(self.preprocessor.line + "\n")
MIT License
felixgwu/fastfusionnet
qa/general_utils.py
pre_proc_qanet
python
def pre_proc_qanet(text): text = re.sub(u'-|\u2010|\u2011|\u2012|\u2013|\u2014|\u2015|%|\[|\]|:|\(|\)|/', space_extend, text) text = extra_split_chars_re.sub(space_extend, text) text = text.strip(' \n') text = re.sub('\s+', ' ', text) return text
from QANet code
https://github.com/felixgwu/fastfusionnet/blob/f55bf739a8111a62a5f605e45c0de39474bcb341/qa/general_utils.py#L61-L68
import re import os import sys import random import string import logging import argparse import unicodedata from shutil import copyfile from datetime import datetime from collections import Counter import torch import msgpack import jsonlines import numpy as np def normalize_text(text): return unicodedata.normalize('NFD', text) def load_glove_vocab(file, wv_dim): vocab = set() with open(file, encoding="utf8") as f: for line in f: elems = line.split() token = normalize_text(''.join(elems[0:-wv_dim])) vocab.add(token) return vocab def pre_proc_sru(text): text = re.sub('\s+', ' ', text) return text def space_extend(matchobj): return ' ' + matchobj.group(0) + ' ' def pre_proc_fusion(text): text = re.sub(u'-|\u2010|\u2011|\u2012|\u2013|\u2014|\u2015|%|\[|\]|:|\(|\)|/', space_extend, text) text = text.strip(' \n') text = re.sub('\s+', ' ', text) return text extra_split_chars = (u'-', u'£', u'€', u'¥', u'¢', u'₹', u'\u2212', u'\u2014', u'\u2013', u'/', u'~', u'"', u"'", u'\ud01C', u'\u2019', u'\u201D', u'\u2018', u'\u00B0') extra_split_tokens = ( u'``', u'(?<=[^_])_(?=[^_])', u"''", u'[' + u''.join(extra_split_chars) + ']') extra_split_chars_re = re.compile(u'(' + u'|'.join(extra_split_tokens) + u')')
MIT License
google/clusterfuzz
src/clusterfuzz/_internal/build_management/revisions.py
_add_components_from_dict
python
def _add_components_from_dict(deps_dict, vars_dict, revisions_dict): if not deps_dict: return for key, value in six.iteritems(deps_dict): url = rev = None if isinstance(value, str): url, _, rev = value.partition('@') elif isinstance(value, dict): if 'revision' in value: url = value['url'] rev = value['revision'] elif 'url' in value and value['url'] is not None: url, _, rev = value['url'].partition('@') if url and rev: url = url.format(**vars_dict) rev = rev.format(**vars_dict) revisions_dict[key] = { 'name': _get_component_display_name(key), 'rev': rev, 'url': url }
Add components from a dict representing a DEPS file.
https://github.com/google/clusterfuzz/blob/e9e105d66f009356c4f3fe9ae7873ffff126b234/src/clusterfuzz/_internal/build_management/revisions.py#L50-L74
import ast import base64 import bisect import os import re import time import urllib.parse import six from clusterfuzz._internal.base import memoize from clusterfuzz._internal.base import utils from clusterfuzz._internal.build_management import overrides from clusterfuzz._internal.build_management import source_mapper from clusterfuzz._internal.config import local_config from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment CHROMIUM_GIT_ROOT_URL = 'https://chromium.googlesource.com' CRREV_NUMBERING_URL = ( 'https://cr-rev.appspot.com/_ah/api/crrev/v1/get_numbering') CLANK_URL = 'https://chrome-internal.googlesource.com/clank/internal/apps.git' CLANK_REVISION_FILE_COMPONENT_REGEX = re.compile( r'.*["]([^"]+)["]\s*:\s*["]([^"]+)["]') COMPONENT_NAMES_BLACKLIST = [ 'api', 'bin', 'data', 'dist', 'lib', 'pylib', 'source', 'src' ] DISK_CACHE_SIZE = 1000 SOURCE_MAP_EXTENSION = '.srcmap.json' FIND_BRANCHED_FROM = re.compile(r'Cr-Branched-From:.*master@\{#(\d+)\}')
Apache License 2.0
secynic/ipwhois
ipwhois/net.py
Net.get_asn_whois
python
def get_asn_whois(self, retry_count=3): try: conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM) conn.settimeout(self.timeout) log.debug('ASN query for {0}'.format(self.address_str)) conn.connect((CYMRU_WHOIS, 43)) conn.send(( ' -r -a -c -p -f {0}{1}'.format( self.address_str, '\r\n') ).encode()) data = '' while True: d = conn.recv(4096).decode() data += d if not d: break conn.close() return str(data) except (socket.timeout, socket.error) as e: log.debug('ASN query socket error: {0}'.format(e)) if retry_count > 0: log.debug('ASN query retrying (count: {0})'.format( str(retry_count))) return self.get_asn_whois(retry_count - 1) else: raise ASNLookupError( 'ASN lookup failed for {0}.'.format(self.address_str) ) except: raise ASNLookupError( 'ASN lookup failed for {0}.'.format(self.address_str) )
The function for retrieving ASN information for an IP address from Cymru via port 43/tcp (WHOIS). Args: retry_count (:obj:`int`): The number of times to retry in case socket errors, timeouts, connection resets, etc. are encountered. Defaults to 3. Returns: str: The raw ASN data. Raises: ASNLookupError: The ASN lookup failed.
https://github.com/secynic/ipwhois/blob/a5d5b65ce3b1d4b2c20bba2e981968f54e1b5e9e/ipwhois/net.py#L282-L346
import sys import socket import dns.resolver import json from collections import namedtuple import logging from time import sleep import dns.rdtypes.ANY.TXT from .exceptions import (IPDefinedError, ASNLookupError, BlacklistError, WhoisLookupError, HTTPLookupError, HostLookupError, HTTPRateLimitError, WhoisRateLimitError) from .whois import RIR_WHOIS from .asn import ASN_ORIGIN_WHOIS from .utils import ipv4_is_defined, ipv6_is_defined if sys.version_info >= (3, 3): from ipaddress import (ip_address, IPv4Address, IPv6Address) else: from ipaddr import (IPAddress as ip_address, IPv4Address, IPv6Address) try: from urllib.request import (OpenerDirector, ProxyHandler, build_opener, Request, URLError, HTTPError) from urllib.parse import urlencode except ImportError: from urllib2 import (OpenerDirector, ProxyHandler, build_opener, Request, URLError, HTTPError) from urllib import urlencode log = logging.getLogger(__name__) ARIN = 'http://whois.arin.net/rest/nets;q={0}?showDetails=true&showARIN=true' CYMRU_WHOIS = 'whois.cymru.com' IPV4_DNS_ZONE = '{0}.origin.asn.cymru.com' IPV6_DNS_ZONE = '{0}.origin6.asn.cymru.com' BLACKLIST = [ 'root.rwhois.net' ] ORG_MAP = { 'ARIN': 'arin', 'VR-ARIN': 'arin', 'RIPE': 'ripencc', 'APNIC': 'apnic', 'LACNIC': 'lacnic', 'AFRINIC': 'afrinic', 'DNIC': 'arin' } class Net: def __init__(self, address, timeout=5, proxy_opener=None): if isinstance(address, IPv4Address) or isinstance( address, IPv6Address): self.address = address else: self.address = ip_address(address) self.timeout = timeout self.dns_resolver = dns.resolver.Resolver() self.dns_resolver.timeout = timeout self.dns_resolver.lifetime = timeout if isinstance(proxy_opener, OpenerDirector): self.opener = proxy_opener else: handler = ProxyHandler() self.opener = build_opener(handler) self.address_str = self.address.__str__() self.version = self.address.version if self.version == 4: is_defined = ipv4_is_defined(self.address_str) if is_defined[0]: raise IPDefinedError( 'IPv4 address {0} is already defined as {1} via ' '{2}.'.format( self.address_str, is_defined[1], is_defined[2] ) ) split = self.address_str.split('.') split.reverse() self.reversed = '.'.join(split) self.dns_zone = IPV4_DNS_ZONE.format(self.reversed) else: is_defined = ipv6_is_defined(self.address_str) if is_defined[0]: raise IPDefinedError( 'IPv6 address {0} is already defined as {1} via ' '{2}.'.format( self.address_str, is_defined[1], is_defined[2] ) ) exploded = self.address.exploded groups = exploded.split(':') for index, value in reversed(list(enumerate(groups))): if value == '0000': del groups[index] else: break exploded = ':'.join(groups) val = str(exploded).replace(':', '') val = val[::-1] self.reversed = '.'.join(val) self.dns_zone = IPV6_DNS_ZONE.format(self.reversed) def get_asn_dns(self): try: log.debug('ASN query for {0}'.format(self.dns_zone)) data = self.dns_resolver.query(self.dns_zone, 'TXT') return list(data) except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers, dns.resolver.NoAnswer, dns.exception.Timeout) as e: raise ASNLookupError( 'ASN lookup failed (DNS {0}) for {1}.'.format( e.__class__.__name__, self.address_str) ) except: raise ASNLookupError( 'ASN lookup failed for {0}.'.format(self.address_str) ) def get_asn_verbose_dns(self, asn=None): if asn[0:2] != 'AS': asn = 'AS{0}'.format(asn) zone = '{0}.asn.cymru.com'.format(asn) try: log.debug('ASN verbose query for {0}'.format(zone)) data = self.dns_resolver.query(zone, 'TXT') return str(data[0]) except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers, dns.resolver.NoAnswer, dns.exception.Timeout) as e: raise ASNLookupError( 'ASN lookup failed (DNS {0}) for {1}.'.format( e.__class__.__name__, asn) ) except: raise ASNLookupError( 'ASN lookup failed for {0}.'.format(asn) )
BSD 2-Clause Simplified License
rhayes777/pyautofit
autofit/non_linear/paths/database.py
DatabasePaths.parent
python
def parent( self, parent: "DatabasePaths" ): if not ( parent is None or isinstance(parent, DatabasePaths) ): raise TypeError( "The parent of search that uses the database must also use the database" ) self._parent = parent
The search performed before this search. For example, a search that is then compared to searches during a grid search. For database paths the parent must also be database paths.
https://github.com/rhayes777/pyautofit/blob/1eb6819cc60df8f6fb7d03bd95eaf074409d9e49/autofit/non_linear/paths/database.py#L35-L52
import shutil from typing import Optional from sqlalchemy.orm.exc import NoResultFound from .abstract import AbstractPaths from ...database.model import Fit class DatabasePaths(AbstractPaths): def __init__( self, session, name: Optional[str] = None, path_prefix: Optional[str] = None, is_identifier_in_paths=True, parent=None, save_all_samples=False, unique_tag: Optional["str"] = None ): super().__init__( name=name, path_prefix=path_prefix, is_identifier_in_paths=is_identifier_in_paths, parent=parent ) self.session = session self._fit = None self.save_all_samples = save_all_samples self.unique_tag = unique_tag parent: "DatabasePaths" @AbstractPaths.parent.setter
MIT License
spotify/luigi
luigi/task_register.py
Register.get_task_cls
python
def get_task_cls(cls, name): task_cls = cls._get_reg().get(name) if not task_cls: raise TaskClassNotFoundException(cls._missing_task_msg(name)) if task_cls == cls.AMBIGUOUS_CLASS: raise TaskClassAmbigiousException('Task %r is ambiguous' % name) return task_cls
Returns an unambiguous class or raises an exception.
https://github.com/spotify/luigi/blob/ad5ddc9875e54cca8209863a8ec7bcc5d13ece8a/luigi/task_register.py#L172-L182
import abc import logging logger = logging.getLogger('luigi-interface') class TaskClassException(Exception): pass class TaskClassNotFoundException(TaskClassException): pass class TaskClassAmbigiousException(TaskClassException): pass class Register(abc.ABCMeta): __instance_cache = {} _default_namespace_dict = {} _reg = [] AMBIGUOUS_CLASS = object() def __new__(metacls, classname, bases, classdict): cls = super(Register, metacls).__new__(metacls, classname, bases, classdict) cls._namespace_at_class_time = metacls._get_namespace(cls.__module__) metacls._reg.append(cls) return cls def __call__(cls, *args, **kwargs): def instantiate(): return super(Register, cls).__call__(*args, **kwargs) h = cls.__instance_cache if h is None: return instantiate() params = cls.get_params() param_values = cls.get_param_values(params, args, kwargs) k = (cls, tuple(param_values)) try: hash(k) except TypeError: logger.debug("Not all parameter values are hashable so instance isn't coming from the cache") return instantiate() if k not in h: h[k] = instantiate() return h[k] @classmethod def clear_instance_cache(cls): cls.__instance_cache = {} @classmethod def disable_instance_cache(cls): cls.__instance_cache = None @property def task_family(cls): if not cls.get_task_namespace(): return cls.__name__ else: return "{}.{}".format(cls.get_task_namespace(), cls.__name__) @classmethod def _get_reg(cls): reg = dict() for task_cls in cls._reg: if not task_cls._visible_in_registry: continue name = task_cls.get_task_family() if name in reg and (reg[name] == Register.AMBIGUOUS_CLASS or not issubclass(task_cls, reg[name])): reg[name] = Register.AMBIGUOUS_CLASS else: reg[name] = task_cls return reg @classmethod def _set_reg(cls, reg): cls._reg = [task_cls for task_cls in reg.values() if task_cls is not cls.AMBIGUOUS_CLASS] @classmethod def task_names(cls): return sorted(cls._get_reg().keys()) @classmethod def tasks_str(cls): return ','.join(cls.task_names()) @classmethod
Apache License 2.0
qiskit/qiskit-aqua
qiskit/aqua/operators/list_ops/list_op.py
ListOp.to_matrix_op
python
def to_matrix_op(self, massive: bool = False) -> OperatorBase: if self.__class__ == ListOp: return ListOp( [op.to_matrix_op(massive=massive) for op in self.oplist], **self._state() ).reduce() return self.__class__( [op.to_matrix_op(massive=massive) for op in self.oplist], coeff=self.coeff, abelian=self.abelian ).reduce()
Returns an equivalent Operator composed of only NumPy-based primitives, such as ``MatrixOp`` and ``VectorStateFn``.
https://github.com/qiskit/qiskit-aqua/blob/5ccf0e20129880e78a57f2f78c59b9a362ebb208/qiskit/aqua/operators/list_ops/list_op.py#L461-L472
from functools import reduce from typing import List, Union, Optional, Callable, Iterator, Set, Dict, cast from numbers import Number import numpy as np from scipy.sparse import spmatrix from qiskit.circuit import QuantumCircuit, ParameterExpression from ..legacy.base_operator import LegacyBaseOperator from ..operator_base import OperatorBase from ... import AquaError from ...utils import arithmetic class ListOp(OperatorBase): def __init__(self, oplist: List[OperatorBase], combo_fn: Callable = lambda x: x, coeff: Union[int, float, complex, ParameterExpression] = 1.0, abelian: bool = False, grad_combo_fn: Optional[Callable] = None) -> None: super().__init__() self._oplist = oplist self._combo_fn = combo_fn self._coeff = coeff self._abelian = abelian self._grad_combo_fn = grad_combo_fn def _state(self, coeff: Optional[Union[int, float, complex, ParameterExpression]] = None, combo_fn: Optional[Callable] = None, abelian: Optional[bool] = None, grad_combo_fn: Optional[Callable] = None) -> Dict: return { 'coeff': coeff if coeff is not None else self.coeff, 'combo_fn': combo_fn if combo_fn is not None else self.combo_fn, 'abelian': abelian if abelian is not None else self.abelian, 'grad_combo_fn': grad_combo_fn if grad_combo_fn is not None else self.grad_combo_fn } @property def oplist(self) -> List[OperatorBase]: return self._oplist @property def combo_fn(self) -> Callable: return self._combo_fn @property def grad_combo_fn(self) -> Optional[Callable]: return self._grad_combo_fn @property def abelian(self) -> bool: return self._abelian @property def distributive(self) -> bool: return True @property def coeff(self) -> Union[int, float, complex, ParameterExpression]: return self._coeff def primitive_strings(self) -> Set[str]: return reduce(set.union, [op.primitive_strings() for op in self.oplist]) @property def num_qubits(self) -> int: num_qubits0 = self.oplist[0].num_qubits if not all(num_qubits0 == op.num_qubits for op in self.oplist): raise ValueError('Operators in ListOp have differing numbers of qubits.') return num_qubits0 def add(self, other: OperatorBase) -> OperatorBase: if self == other: return self.mul(2.0) from .summed_op import SummedOp return SummedOp([self, other]) def adjoint(self) -> OperatorBase: if self.__class__ == ListOp: return ListOp([op.adjoint() for op in self.oplist], **self._state(coeff=np.conj(self.coeff))) return self.__class__([op.adjoint() for op in self.oplist], coeff=np.conj(self.coeff), abelian=self.abelian) def traverse(self, convert_fn: Callable, coeff: Optional[Union[int, float, complex, ParameterExpression]] = None) -> OperatorBase: if coeff is None: coeff = self.coeff if self.__class__ == ListOp: return ListOp([convert_fn(op) for op in self.oplist], **self._state(coeff=coeff)) return self.__class__([convert_fn(op) for op in self.oplist], coeff=coeff, abelian=self.abelian) def equals(self, other: OperatorBase) -> bool: if not isinstance(other, type(self)) or not len(self.oplist) == len(other.oplist): return False return self.coeff == other.coeff and all( op1 == op2 for op1, op2 in zip(self.oplist, other.oplist)) __array_priority__ = 10000 def mul(self, scalar: Union[int, float, complex, ParameterExpression]) -> OperatorBase: if not isinstance(scalar, (int, float, complex, ParameterExpression)): raise ValueError('Operators can only be scalar multiplied by float or complex, not ' '{} of type {}.'.format(scalar, type(scalar))) if self.__class__ == ListOp: return ListOp(self.oplist, **self._state(coeff=scalar * self.coeff)) return self.__class__(self.oplist, coeff=scalar * self.coeff, abelian=self.abelian) def tensor(self, other: OperatorBase) -> OperatorBase: from .tensored_op import TensoredOp return TensoredOp([self, other]) def tensorpower(self, other: int) -> Union[OperatorBase, int]: if other == 0: return 1 if not isinstance(other, int) or other <= 0: raise TypeError('Tensorpower can only take positive int arguments') from .tensored_op import TensoredOp return TensoredOp([self] * other) def _expand_dim(self, num_qubits: int) -> 'ListOp': oplist = [op._expand_dim(num_qubits + self.num_qubits - op.num_qubits) for op in self.oplist] return ListOp(oplist, **self._state()) def permute(self, permutation: List[int]) -> 'ListOp': new_self = self circuit_size = max(permutation) + 1 try: if self.num_qubits != len(permutation): raise AquaError("New index must be defined for each qubit of the operator.") except ValueError: raise AquaError("Permute is only possible if all operators in the ListOp have the " "same number of qubits.") from ValueError if self.num_qubits < circuit_size: new_self = self._expand_dim(circuit_size - self.num_qubits) qc = QuantumCircuit(circuit_size) permutation = list(filter(lambda x: x not in permutation, range(circuit_size))) + permutation transpositions = arithmetic.transpositions(permutation) for trans in transpositions: qc.swap(trans[0], trans[1]) from qiskit.aqua.operators import CircuitOp return CircuitOp(qc.reverse_ops()) @ new_self @ CircuitOp(qc) def compose(self, other: OperatorBase, permutation: Optional[List[int]] = None, front: bool = False) -> OperatorBase: new_self, other = self._expand_shorter_operator_and_permute(other, permutation) new_self = cast(ListOp, new_self) if front: return other.compose(new_self) from .composed_op import ComposedOp return ComposedOp([new_self, other]) def power(self, exponent: int) -> OperatorBase: if not isinstance(exponent, int) or exponent <= 0: raise TypeError('power can only take positive int arguments') from .composed_op import ComposedOp return ComposedOp([self] * exponent) def to_matrix(self, massive: bool = False) -> np.ndarray: OperatorBase._check_massive('to_matrix', True, self.num_qubits, massive) mat = self.combo_fn( np.asarray([op.to_matrix(massive=massive) * self.coeff for op in self.oplist], dtype=object)) if isinstance(mat, Number): mat = [mat] return np.asarray(mat, dtype=complex) def to_spmatrix(self) -> Union[spmatrix, List[spmatrix]]: return self.combo_fn( [op.to_spmatrix() for op in self.oplist]) * self.coeff def eval(self, front: Optional[Union[str, Dict[str, complex], OperatorBase]] = None ) -> Union[OperatorBase, float, complex, list]: from ..state_fns.dict_state_fn import DictStateFn from ..state_fns.vector_state_fn import VectorStateFn if not self.distributive: raise NotImplementedError("ListOp's eval function is only defined for distributive " "ListOps.") evals = [(self.coeff * op).eval(front) for op in self.oplist] if self._combo_fn != ListOp([])._combo_fn: if all(isinstance(op, DictStateFn) for op in evals) or all(isinstance(op, VectorStateFn) for op in evals): if not all(isinstance(op, type(evals[0])) for op in evals): raise NotImplementedError("Combo_fn not yet supported for mixed " "VectorStateFn primitives") if not all(op.is_measurement == evals[0].is_measurement for op in evals): raise NotImplementedError("Combo_fn not yet supported for mixed measurement " "and non-measurement StateFns") return self.combo_fn(evals) if all(isinstance(op, OperatorBase) for op in evals): return self.__class__(evals) elif any(isinstance(op, OperatorBase) for op in evals): raise TypeError('Cannot handle mixed scalar and Operator eval results.') else: return self.combo_fn(evals) def exp_i(self) -> OperatorBase: if type(self) == ListOp: return ListOp([op.exp_i() for op in self.oplist], **self._state(abelian=False)) from qiskit.aqua.operators import EvolvedOp return EvolvedOp(self) def log_i(self, massive: bool = False) -> OperatorBase: if self.__class__.__name__ == ListOp.__name__: return ListOp([op.log_i(massive=massive) for op in self.oplist], **self._state(abelian=False)) return self.to_matrix_op(massive=massive).log_i(massive=massive) def __str__(self) -> str: content_string = ',\n'.join([str(op) for op in self.oplist]) main_string = "{}([\n{}\n])".format( self.__class__.__name__, self._indent(content_string, indentation=self.INDENTATION)) if self.abelian: main_string = 'Abelian' + main_string if self.coeff != 1.0: main_string = '{} * '.format(self.coeff) + main_string return main_string def __repr__(self) -> str: return "{}({}, coeff={}, abelian={})".format(self.__class__.__name__, repr(self.oplist), self.coeff, self.abelian) @property def parameters(self): params = set() for op in self.oplist: params.update(op.parameters) if isinstance(self.coeff, ParameterExpression): params.update(self.coeff.parameters) return params def assign_parameters(self, param_dict: dict) -> OperatorBase: param_value = self.coeff if isinstance(self.coeff, ParameterExpression): unrolled_dict = self._unroll_param_dict(param_dict) if isinstance(unrolled_dict, list): return ListOp([self.assign_parameters(param_dict) for param_dict in unrolled_dict]) if self.coeff.parameters <= set(unrolled_dict.keys()): binds = {param: unrolled_dict[param] for param in self.coeff.parameters} param_value = float(self.coeff.bind(binds)) return self.traverse(lambda x: x.assign_parameters(param_dict), coeff=param_value) def reduce(self) -> OperatorBase: reduced_ops = [op.reduce() for op in self.oplist] if self.__class__ == ListOp: return ListOp(reduced_ops, **self._state()) return self.__class__(reduced_ops, coeff=self.coeff, abelian=self.abelian)
Apache License 2.0
nervanasystems/ngraph-neon
examples/walk_through/gendata.py
MixtureGenerator.multinomial
python
def multinomial(self, ys): i = 0 for i in range(ys.size): ys[i] = self.multichoose()
Initialize y with multinomial values distributed per pvals. Arguments: ys: 1-d tensor.
https://github.com/nervanasystems/ngraph-neon/blob/8988ab90ee81c8b219ea5c374702e56d7f383302/examples/walk_through/gendata.py#L47-L56
import numpy as np import numbers class MixtureGenerator(object): def __init__(self, pvals, shape, seed=0): if isinstance(shape, numbers.Integral): shape = (shape,) self.__rng = np.random.RandomState(seed) self.nclasses = len(pvals) self.shape = shape self.size = 1 for s in shape: self.size = self.size * s self.As = self.__rng.uniform(-1, 1, (self.size, self.size, self.nclasses,)) self.bs = self.__rng.uniform(-1, 1, (self.size, self.nclasses,)) self.accum = [] s = 0 for pval in pvals: s = s + pval self.accum.append(s) self.accum[-1] = 2 def multichoose(self): x = self.__rng.uniform(0, 1) for i, aval in enumerate(self.accum): if x < aval: return i
Apache License 2.0
giuliorossetti/cdlib
cdlib/algorithms/internal/CONGA.py
check_for_split
python
def check_for_split(G, edge): if edge[0] == edge[1]: return False return not G.edge_disjoint_paths(source=edge[0], target=edge[1])
Given an edge in tuple form, check if it splits the graph into two disjoint clusters. If so, it returns True. Otherwise, False.
https://github.com/giuliorossetti/cdlib/blob/313b8380cf6c7783e9ce518ba7573b159fd885b0/cdlib/algorithms/internal/CONGA.py#L369-L378
import numpy as np import collections as co try: import igraph as ig except ModuleNotFoundError: ig = None import operator import itertools import argparse def nepusz_modularity(G, cover): raise NotImplementedError("See the CONGA 2010 paper") def zhang_modularity(G, cover): raise NotImplementedError( """See 'Identification of overlapping algorithms structure in complex networks using fuzzy C-means clustering'""" ) def nicosia_modularity(G, cover): raise NotImplementedError( """See 'Extending the definition of modularity to directed graphs with overlapping communities'""" ) def count_communities(G, cover): counts = {i.index: 0 for i in G.vs} for community in cover: for v in community: counts[v] += 1 return counts def get_weights(G): try: weights = G.es["weight"] except KeyError: weights = [1 for e in G.es] return weights def get_single_lazar_modularity(G, community, weights, counts): totalInternalWeight = sum(weights[G.es[e].index] for e in community) numVerticesInCommunity = len(community) numPossibleInternalEdges = numVerticesInCommunity * (numVerticesInCommunity - 1) / 2 if numPossibleInternalEdges == 0: return 0 edgeDensity = ( totalInternalWeight / numPossibleInternalEdges / numVerticesInCommunity ) interVsIntra = 0 comm = set(community) for v in community: interVsIntraInternal = 0 neighbors = G.neighbors(v) degree = len(neighbors) numCommunitiesWithin = counts[v] for n in neighbors: weight = weights[G.get_eid(v, n)] if n in comm: interVsIntraInternal += weight else: interVsIntraInternal -= weight interVsIntraInternal /= degree * numCommunitiesWithin interVsIntra += interVsIntraInternal return edgeDensity * interVsIntra def lazar_modularity(G, cover): numCommunities = len(cover) totalModularity = 0 weights = get_weights(G) counts = count_communities(G, cover) for c in cover: totalModularity += get_single_lazar_modularity(G, c, weights, counts) averageModularity = 1 / numCommunities * totalModularity return averageModularity class CrispOverlap(object): def __init__( self, graph, covers, modularities=None, optimal_count=None, modularity_measure="lazar", ): self._measureDict = {"lazar": lazar_modularity} self._covers = covers self._graph = graph self._optimal_count = optimal_count self._modularities = modularities if modularity_measure in self._measureDict: self._modularity_measure = modularity_measure else: raise KeyError("Modularity measure not found.") def __getitem__(self, numClusters): if not numClusters: raise KeyError("Number of clusters must be a positive integer.") return self._covers[numClusters] def __iter__(self): return (v for v in list(self._covers.values())) def __len__(self): return len(self._covers) def __bool__(self): return bool(self._covers) def __str__(self): return "{0} vertices in {1} possible covers.".format( len(self._graph.vs), len(self._covers) ) def as_cover(self): return self._covers[self.optimal_count] def recalculate_modularities(self): modDict = {} for cover in self._covers.values(): modDict[len(cover)] = self._measureDict[self._modularity_measure]( self._graph, cover ) self._modularities = modDict self._optimal_count = max( iter(self._modularities.items()), key=operator.itemgetter(1) )[0] return self._modularities @property def modularities(self): if self._modularities: return self._modularities self._modularities = self.recalculate_modularities() return self._modularities @property def optimal_count(self): if self._optimal_count is not None: return self._optimal_count else: modularities = self.modularities self._optimal_count = max( list(modularities.items()), key=operator.itemgetter(1) )[0] return self._optimal_count def pretty_print_cover(self, numClusters, label="CONGA_index"): cover = self._covers[numClusters] pp = [self._graph.vs[num] for num in [cluster for cluster in cover]] for count, comm in enumerate(pp): print("Community {0}:".format(count)) for v in comm: print("\t {0}".format(v.index if label == "CONGA_index" else v[label])) print() def make_fuzzy(self): pass def conga(OG, calculate_modularities=None, optimal_count=None): G = OG.copy() comm = G.components() nClusters = len(comm) G.vs["CONGA_orig"] = [i.index for i in OG.vs] allCovers = {nClusters: ig.VertexCover(OG)} while G.es: split = remove_edge_or_split_vertex(G) if split: comm = G.components().membership cover = get_cover(G, OG, comm) nClusters += 1 allCovers[nClusters] = cover if calculate_modularities is None: calculate_modularities = "lazar" return CrispOverlap( OG, allCovers, modularity_measure=calculate_modularities, optimal_count=optimal_count, ) def remove_edge_or_split_vertex(G): split = False eb = G.edge_betweenness() maxIndex, maxEb = max(enumerate(eb), key=operator.itemgetter(1)) vb = G.betweenness() vi = [i for i, b in enumerate(vb) if b > maxEb] edge = G.es[maxIndex].tuple if not vi: split = delete_edge(G, edge) else: pb = pair_betweenness(G, vi) maxSplit, vNum, splitInstructions = max_split_betweenness(G, pb) if maxSplit > maxEb: split = split_vertex(G, vNum, splitInstructions[0]) else: split = delete_edge(G, edge) return split def get_cover(G, OG, comm): coverDict = co.defaultdict(list) for i, community in enumerate(comm): coverDict[community].append(int(G.vs[i]["CONGA_orig"])) return ig.clustering.VertexCover(OG, clusters=list(coverDict.values())) def delete_edge(G, edge): G.delete_edges(edge) return check_for_split(G, edge)
BSD 2-Clause Simplified License
pyansys/pyaedt
pyaedt/edb_core/siwave.py
SiwaveDCSetupTemplate.pos_term_to_ground
python
def pos_term_to_ground(self): return self._pos_term_to_ground
Set positive terminals to ground. Parameters ---------- terms : list, str List of terminals with positive nodes to ground.
https://github.com/pyansys/pyaedt/blob/817c7d706a2d10942470ccac959645e16e9ea971/pyaedt/edb_core/siwave.py#L66-L74
import os import time import warnings from pyaedt import is_ironpython from pyaedt.generic.general_methods import aedt_exception_handler, generate_unique_name, retry_ntimes try: from System import String from System.Collections.Generic import Dictionary except ImportError: warnings.warn("This module requires pythonnet.") class SiwaveDCSetupTemplate(object): def __init__(self): self.name = "DC IR 1" self.dcreport_show_active_devices = True self.export_dcthermal_data = False self.full_dcreport_path = "" self.use_loopres_forperpin = True self.via_report_path = "" self.compute_inductance = True self.accuracy_level = 1 self.plotjv = True self.min_passes = 1 self.max_passes = 5 self.percent_localrefinement = 20 self.energy_error = 2 self.refine_bondwires = False self.refine_vias = False self.num_bondwire_sides = 8 self.num_via_sides = 8 self.mesh_bondwires = False self.mesh_vias = False self.perform_adaptive_refinement = False self.use_dc_custom_settings = False self._source_terms_to_ground = None self._pos_term_to_ground = [] self._neg_term_to_ground = [] @property
MIT License
robinhood/aiokafka
aiokafka/producer/sender.py
Sender._sender_routine
python
async def _sender_routine(self): tasks = set() txn_task = None try: while True: await self._maybe_wait_for_pid() waiters = set() txn_manager = self._txn_manager muted_partitions = self._muted_partitions if txn_manager is not None and txn_manager.transactional_id is not None: if txn_task is None or txn_task.done(): txn_task = self._maybe_do_transactional_request() if txn_task is not None: tasks.add(txn_task) else: waiters.add(txn_manager.make_task_waiter()) muted_partitions = ( muted_partitions | txn_manager.partitions_to_add() ) batches, unknown_leaders_exist = self._message_accumulator.drain_by_nodes( ignore_nodes=self._in_flight, muted_partitions=muted_partitions) for node_id, batches in batches.items(): task = ensure_future( self._send_produce_req(node_id, batches), loop=self._loop) self._in_flight.add(node_id) for tp in batches: self._muted_partitions.add(tp) tasks.add(task) if unknown_leaders_exist: fut = self.client.force_metadata_update() waiters |= tasks.union([fut]) else: fut = self._message_accumulator.data_waiter() waiters |= tasks.union([fut]) done, _ = await asyncio.wait( waiters, return_when=asyncio.FIRST_COMPLETED, loop=self._loop) for task in done: task.result() tasks -= done except asyncio.CancelledError: for task in tasks: await task except (ProducerFenced, OutOfOrderSequenceNumber, TransactionalIdAuthorizationFailed): raise except Exception: log.error("Unexpected error in sender routine", exc_info=True) raise KafkaError("Unexpected error during batch delivery")
Background task, that sends pending batches to leader nodes for batch's partition. This incapsulates same logic as Java's `Sender` background thread. Because we use asyncio this is more event based loop, rather than counting timeout till next possible even like in Java.
https://github.com/robinhood/aiokafka/blob/10e4119104ee32b726ab750b3e7120a85d2c7f25/aiokafka/producer/sender.py#L78-L166
import asyncio import collections import logging import aiokafka.errors as Errors from aiokafka.client import ConnectionGroup, CoordinationType from aiokafka.errors import ( KafkaError, UnknownTopicOrPartitionError, CoordinatorNotAvailableError, NotCoordinatorError, CoordinatorLoadInProgressError, InvalidProducerEpoch, ProducerFenced, InvalidProducerIdMapping, InvalidTxnState, ConcurrentTransactions, DuplicateSequenceNumber, RequestTimedOutError, OutOfOrderSequenceNumber, TopicAuthorizationFailedError, GroupAuthorizationFailedError, TransactionalIdAuthorizationFailed, OperationNotAttempted) from aiokafka.protocol.produce import ProduceRequest from aiokafka.protocol.transaction import ( InitProducerIdRequest, AddPartitionsToTxnRequest, EndTxnRequest, AddOffsetsToTxnRequest, TxnOffsetCommitRequest ) from aiokafka.structs import TopicPartition from aiokafka.util import ensure_future log = logging.getLogger(__name__) BACKOFF_OVERRIDE = 0.02 class Sender: def __init__( self, client, *, acks, txn_manager, message_accumulator, retry_backoff_ms, linger_ms, request_timeout_ms, loop): self.client = client self._txn_manager = txn_manager self._acks = acks self._message_accumulator = message_accumulator self._sender_task = None self._in_flight = set() self._muted_partitions = set() self._coordinators = {} self._loop = loop self._retry_backoff = retry_backoff_ms / 1000 self._request_timeout_ms = request_timeout_ms self._linger_time = linger_ms / 1000 async def start(self): await self._maybe_wait_for_pid() self._sender_task = ensure_future( self._sender_routine(), loop=self._loop) self._sender_task.add_done_callback(self._fail_all) def _fail_all(self, task): if task.exception() is not None: self._message_accumulator.fail_all(task.exception()) if self._txn_manager is not None: self._txn_manager.fatal_error(task.exception()) @property def sender_task(self): return self._sender_task async def close(self): if self._sender_task is not None: if not self._sender_task.done(): self._sender_task.cancel() await self._sender_task
Apache License 2.0
winedarksea/autots
autots/models/ensemble.py
mosaic_classifier
python
def mosaic_classifier(df_train, known): known.index.name = "forecast_period" upload = pd.melt( known, var_name="series_id", value_name="model_id", ignore_index=False, ).reset_index(drop=False) upload['forecast_period'] = upload['forecast_period'].astype(int) missing_cols = df_train.columns[ ~df_train.columns.isin(upload['series_id'].unique()) ] if not missing_cols.empty: forecast_p = np.arange(upload['forecast_period'].max() + 1) p_full = np.tile(forecast_p, len(missing_cols)) missing_rows = pd.DataFrame( { 'forecast_period': p_full, 'series_id': np.repeat(missing_cols.values, len(forecast_p)), 'model_id': np.nan, }, index=None if len(p_full) > 1 else [0], ) upload = pd.concat([upload, missing_rows]) X = fill_median( (summarize_series(df_train).transpose()).merge( upload, left_index=True, right_on="series_id" ) ) X.set_index("series_id", inplace=True) to_predict = X[X['model_id'].isna()].drop(columns=['model_id']) X = X[~X['model_id'].isna()] Y = X['model_id'] Xf = X.drop(columns=['model_id']) from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier() clf.fit(Xf, Y) predicted = clf.predict(to_predict) result = pd.concat( [to_predict.reset_index(drop=False), pd.Series(predicted, name="model_id")], axis=1, ) cols_needed = ['model_id', 'series_id', 'forecast_period'] final = pd.concat( [X.reset_index(drop=False)[cols_needed], result[cols_needed]], sort=True, axis=0 ) final['forecast_period'] = final['forecast_period'].astype(str) final = final.pivot(values="model_id", columns="series_id", index="forecast_period") try: final = final[df_train.columns] if final.isna().to_numpy().sum() > 0: raise KeyError("NaN in mosaic generalization") except KeyError as e: raise ValueError( f"mosaic_classifier failed to generalize for all columns: {repr(e)}" ) return final
CLassify unknown series with the appropriate model for mosaic ensembles.
https://github.com/winedarksea/autots/blob/97176c3ab7cc9c930b1a76dc877f8a8138abae27/autots/models/ensemble.py#L215-L275
import datetime import numpy as np import pandas as pd import json from autots.models.base import PredictionObject from autots.models.model_list import no_shared from autots.tools.impute import fill_median horizontal_aliases = ['horizontal', 'probabilistic'] def summarize_series(df): df_sum = df.describe(percentiles=[0.1, 0.25, 0.5, 0.75, 0.9]) return df_sum def mosaic_or_horizontal(all_series: dict): first_value = all_series[next(iter(all_series))] if isinstance(first_value, dict): return "mosaic" else: return "horizontal" def parse_horizontal(all_series: dict, model_id: str = None, series_id: str = None): if model_id is None and series_id is None: raise ValueError( "either series_id or model_id must be specified in parse_horizontal." ) if mosaic_or_horizontal(all_series) == 'mosaic': if model_id is not None: return [ser for ser, mod in all_series.items() if model_id in mod.values()] else: return list(set(all_series[series_id].values())) else: if model_id is not None: return [ser for ser, mod in all_series.items() if mod == model_id] else: return [all_series[series_id]] def BestNEnsemble( ensemble_params, forecasts_list, forecasts, lower_forecasts, upper_forecasts, forecasts_runtime, prediction_interval, ): startTime = datetime.datetime.now() model_count = len(forecasts.keys()) if model_count < 1: raise ValueError("BestN failed, no component models available.") sample_df = next(iter(forecasts.values())) columnz = sample_df.columns indices = sample_df.index ens_df = pd.DataFrame(0, index=indices, columns=columnz) for idx, x in forecasts.items(): ens_df = ens_df + x ens_df = ens_df / model_count ens_df_lower = pd.DataFrame(0, index=indices, columns=columnz) for idx, x in lower_forecasts.items(): ens_df_lower = ens_df_lower + x ens_df_lower = ens_df_lower / model_count ens_df_upper = pd.DataFrame(0, index=indices, columns=columnz) for idx, x in upper_forecasts.items(): ens_df_upper = ens_df_upper + x ens_df_upper = ens_df_upper / model_count ens_runtime = datetime.timedelta(0) for x in forecasts_runtime.values(): ens_runtime = ens_runtime + x ens_result = PredictionObject( model_name="Ensemble", forecast_length=len(ens_df.index), forecast_index=ens_df.index, forecast_columns=ens_df.columns, lower_forecast=ens_df_lower, forecast=ens_df, upper_forecast=ens_df_upper, prediction_interval=prediction_interval, predict_runtime=datetime.datetime.now() - startTime, fit_runtime=ens_runtime, model_parameters=ensemble_params, ) return ens_result def DistEnsemble( ensemble_params, forecasts_list, forecasts, lower_forecasts, upper_forecasts, forecasts_runtime, prediction_interval, ): forecasts = list(forecasts.values()) lower_forecasts = list(lower_forecasts.values()) upper_forecasts = list(upper_forecasts.values()) forecasts_runtime = list(forecasts_runtime.values()) first_model_index = forecasts_list.index(ensemble_params['FirstModel']) second_model_index = forecasts_list.index(ensemble_params['SecondModel']) forecast_length = forecasts[0].shape[0] dis_frac = ensemble_params['dis_frac'] first_bit = int(np.ceil(forecast_length * dis_frac)) second_bit = int(np.floor(forecast_length * (1 - dis_frac))) ens_df = ( forecasts[first_model_index] .head(first_bit) .append(forecasts[second_model_index].tail(second_bit)) ) ens_df_lower = ( lower_forecasts[first_model_index] .head(first_bit) .append(lower_forecasts[second_model_index].tail(second_bit)) ) ens_df_upper = ( upper_forecasts[first_model_index] .head(first_bit) .append(upper_forecasts[second_model_index].tail(second_bit)) ) id_list = list(ensemble_params['models'].keys()) model_indexes = [idx for idx, x in enumerate(forecasts_list) if x in id_list] ens_runtime = datetime.timedelta(0) for idx, x in enumerate(forecasts_runtime): if idx in model_indexes: ens_runtime = ens_runtime + forecasts_runtime[idx] ens_result_obj = PredictionObject( model_name="Ensemble", forecast_length=len(ens_df.index), forecast_index=ens_df.index, forecast_columns=ens_df.columns, lower_forecast=ens_df_lower, forecast=ens_df, upper_forecast=ens_df_upper, prediction_interval=prediction_interval, predict_runtime=datetime.timedelta(0), fit_runtime=ens_runtime, model_parameters=ensemble_params, ) return ens_result_obj def horizontal_classifier(df_train, known: dict, method: str = "whatever"): columnz = df_train.columns.tolist() X = summarize_series(df_train).transpose() X = fill_median(X) known_l = list(known.keys()) unknown = list(set(columnz) - set(known_l)) Xt = X.loc[known_l] Xf = X.loc[unknown] Y = np.array(list(known.values())) from sklearn.naive_bayes import GaussianNB clf = GaussianNB() clf.fit(Xt, Y) result = clf.predict(Xf) result_d = dict(zip(Xf.index.tolist(), result)) final = {**result_d, **known} return final
MIT License
google/clusterfuzz
docker/oss-fuzz/host/start_host.py
start_bot_instance
python
def start_bot_instance(instance_num): env = os.environ.copy() host_name = os.getenv('HOSTNAME', socket.gethostname()) bot_name = '%s-%d' % (host_name, instance_num) env['BOT_NAME'] = bot_name env['HOST_INSTANCE_NAME'] = host_name env['HOST_INSTANCE_NUM'] = str(instance_num) bot_directory = os.path.join(BOT_BASEDIR, bot_name) bot_root_directory = os.path.join(bot_directory, 'clusterfuzz') tmp_directory = os.path.join(bot_directory, 'tmp') if not os.path.exists(bot_directory): os.mkdir(bot_directory) os.mkdir(tmp_directory) env['ROOT_DIR'] = bot_root_directory env['BOT_TMPDIR'] = tmp_directory env['PYTHONPATH'] = os.path.join(bot_root_directory, 'src') if os.path.exists(bot_root_directory): shutil.rmtree(bot_root_directory) shutil.copytree(SRC_DIR, bot_root_directory) while True: bot_proc = subprocess.Popen( sys.executable + ' src/python/bot/startup/run.py 2>&1 > console.txt', shell=True, env=env, cwd=bot_root_directory) bot_proc.wait() print('Instance %i exited.' % instance_num, file=sys.stderr)
Set up bot directory.
https://github.com/google/clusterfuzz/blob/e9e105d66f009356c4f3fe9ae7873ffff126b234/docker/oss-fuzz/host/start_host.py#L41-L74
import os import shutil import socket import subprocess import sys import threading import time MNT_DIR = '/mnt/scratch0' SRC_DIR = os.path.join(MNT_DIR, 'clusterfuzz') BOT_BASEDIR = os.path.join(MNT_DIR, 'bots') NUM_WORKERS_PER_HOST = int(os.environ['NUM_WORKERS_PER_HOST']) def setup_environment(): os.environ['QUEUE_OVERRIDE'] = 'LINUX_UNTRUSTED' os.environ['WORKER_ROOT_DIR'] = os.path.join(MNT_DIR, 'clusterfuzz') os.environ['WORKER_BOT_TMPDIR'] = os.path.join(MNT_DIR, 'tmp') if not os.path.exists(BOT_BASEDIR): os.mkdir(BOT_BASEDIR)
Apache License 2.0
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/models/v2beta2_metric_spec.py
V2beta2MetricSpec.type
python
def type(self, type): if self.local_vars_configuration.client_side_validation and type is None: raise ValueError("Invalid value for `type`, must not be `None`") self._type = type
Sets the type of this V2beta2MetricSpec. type is the type of metric source. It should be one of \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. # noqa: E501 :param type: The type of this V2beta2MetricSpec. # noqa: E501 :type: str
https://github.com/tomplus/kubernetes_asyncio/blob/22bf0f4ec775b920abc9cee86bb38abcfc57506d/kubernetes_asyncio/client/models/v2beta2_metric_spec.py#L170-L181
import pprint import re import six from kubernetes_asyncio.client.configuration import Configuration class V2beta2MetricSpec(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'external': 'V2beta2ExternalMetricSource', 'object': 'V2beta2ObjectMetricSource', 'pods': 'V2beta2PodsMetricSource', 'resource': 'V2beta2ResourceMetricSource', 'type': 'str' } attribute_map = { 'external': 'external', 'object': 'object', 'pods': 'pods', 'resource': 'resource', 'type': 'type' } def __init__(self, external=None, object=None, pods=None, resource=None, type=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._external = None self._object = None self._pods = None self._resource = None self._type = None self.discriminator = None if external is not None: self.external = external if object is not None: self.object = object if pods is not None: self.pods = pods if resource is not None: self.resource = resource self.type = type @property def external(self): return self._external @external.setter def external(self, external): self._external = external @property def object(self): return self._object @object.setter def object(self, object): self._object = object @property def pods(self): return self._pods @pods.setter def pods(self, pods): self._pods = pods @property def resource(self): return self._resource @resource.setter def resource(self, resource): self._resource = resource @property def type(self): return self._type @type.setter
Apache License 2.0
matteoferla/fragmenstein
fragmenstein/monster/_collapse_ring.py
_MonsterRing._place_ring_atoms
python
def _place_ring_atoms(self, mol, rings: List[Dict[str, Union[Chem.Atom, List[Any]]]]): conf = mol.GetConformer() for ring in rings: ringcore = ring['atom'] indices = [] for i in range(len(ring['elements'])): collapsed_atom_data = self._get_expansion_for_atom(ring, i) if self._is_present(mol, collapsed_atom_data['ori_i']): natom = self._get_new_index(mol, collapsed_atom_data['ori_i'], search_collapsed=False) self.journal.debug(f"{natom} (formerly {collapsed_atom_data['ori_i']} existed already." + "Fused ring or similar.") else: n = mol.AddAtom(Chem.Atom(collapsed_atom_data['element'])) natom = mol.GetAtomWithIdx(n) conf.SetAtomPosition(n, Point3D(collapsed_atom_data['x'], collapsed_atom_data['y'], collapsed_atom_data['z'])) natom.SetIntProp('_ori_i', collapsed_atom_data['ori_i']) natom.SetDoubleProp('_x', collapsed_atom_data['x']) natom.SetDoubleProp('_y', collapsed_atom_data['y']) natom.SetDoubleProp('_z', collapsed_atom_data['z']) natom.SetProp('_ori_name', collapsed_atom_data['ori_name']) natom.SetIntProp('_ring_i', ringcore.GetIdx()) indices.append(n) ringcore.SetIntProp('_ring_i', ringcore.GetIdx()) ringcore.SetProp('_current_is', json.dumps(indices)) ring['current_is'] = indices
Plase all atoms stored in rings. :param mol: :param rings: :return:
https://github.com/matteoferla/fragmenstein/blob/151bde01f4ebd930880cb7ad234bab68ac4a3e76/fragmenstein/monster/_collapse_ring.py#L289-L323
__doc__ = """ This is the ring collapsing code. In Pymol, a ring diameter is 2.7894375324249268 Å. fab F print cmd.distance('/obj01///PHE`1/CE1','/obj01///PHE`1/CD2') """ import itertools import json from collections import defaultdict from functools import partial from typing import Optional, Dict, List, Any, Tuple, Union, Callable import numpy as np from rdkit import Chem from rdkit.Geometry.rdGeometry import Point3D from ._join_neighboring import _MonsterJoinNeigh from .bond_provenance import BondProvenance class _MonsterRing( _MonsterJoinNeigh): def collapse_mols(self, mols: List[Chem.Mol]): mols = [self.collapse_ring(mol) for mol in mols] [self.offset(mol) for mol in mols] return mols def collapse_ring(self, mol: Chem.Mol) -> Chem.Mol: self.store_positions(mol) mol = Chem.RWMol(mol) conf = mol.GetConformer() center_idxs = [] morituri = [] old2center = defaultdict(list) for atomset in mol.GetRingInfo().AtomRings(): morituri.extend(atomset) neighs = [] neighbonds = [] bonds = [] xs = [] ys = [] zs = [] elements = [] c = mol.AddAtom(Chem.Atom('C')) center_idxs.append(c) central = mol.GetAtomWithIdx(c) name = mol.GetProp('_Name') if mol.HasProp('_Name') else '???' central.SetProp('_ori_name', name), for i in atomset: old2center[i].append(c) atom = mol.GetAtomWithIdx(i) neigh_i = [a.GetIdx() for a in atom.GetNeighbors()] neighs.append(neigh_i) bond = [mol.GetBondBetweenAtoms(i, j).GetBondType().name for j in neigh_i] bonds.append(bond) pos = conf.GetAtomPosition(i) xs.append(pos.x) ys.append(pos.y) zs.append(pos.z) elements.append(atom.GetSymbol()) central.SetIntProp('_ori_i', -1) central.SetProp('_ori_is', json.dumps(atomset)) central.SetProp('_neighbors', json.dumps(neighs)) central.SetProp('_xs', json.dumps(xs)) central.SetProp('_ys', json.dumps(ys)) central.SetProp('_zs', json.dumps(zs)) central.SetProp('_elements', json.dumps(elements)) central.SetProp('_bonds', json.dumps(bonds)) conf.SetAtomPosition(c, Point3D(*[sum(axis) / len(axis) for axis in (xs, ys, zs)])) for atomset, center_i in zip(mol.GetRingInfo().AtomRings(), center_idxs): central = mol.GetAtomWithIdx(center_i) neighss = json.loads(central.GetProp('_neighbors')) bondss = json.loads(central.GetProp('_bonds')) for neighs, bonds in zip(neighss, bondss): for neigh, bond in zip(neighs, bonds): if neigh not in atomset: bt = getattr(Chem.BondType, bond) if neigh not in morituri: mol.AddBond(center_i, neigh, bt) new_bond = mol.GetBondBetweenAtoms(center_i, neigh) BondProvenance.set_bond(new_bond, 'original') else: for other_center_i in old2center[neigh]: if center_i != other_center_i: if not mol.GetBondBetweenAtoms(center_i, other_center_i): mol.AddBond(center_i, other_center_i, bt) new_bond = mol.GetBondBetweenAtoms(center_i, other_center_i) BondProvenance.set_bond(new_bond, 'original') break else: raise ValueError(f'Cannot find what {neigh} became') for i in sorted(set(morituri), reverse=True): mol.RemoveAtom(self._get_new_index(mol, i)) return mol.GetMol() def expand_ring(self, mol: Chem.Mol) -> Chem.Mol: self.journal.debug('Starting ring expansion') mol = Chem.RWMol(mol) rings = self._get_expansion_data(mol) self._place_ring_atoms(mol, rings) self._restore_original_bonding(mol, rings) self.keep_copy(mol, 'Rings expanded and original bonding restored.') self._add_novel_bonding(mol, rings) self._delete_collapsed(mol) self._detriangulate(mol) try: mol = self._emergency_joining(mol) except ConnectionError as error: if self.throw_on_discard: raise error else: self.journal.info('Disconnect ignored due to keep_all=False') mol = self.get_largest_fragment(mol) if mol is None: raise ValueError('(Impossible) Failed at some point...') elif isinstance(mol, Chem.RWMol): return mol.GetMol() else: return mol def offset(self, mol: Chem.Mol): self._collapsed_ring_offset += 100 old2new = {} for atom in mol.GetAtoms(): if atom.GetIntProp('_ori_i') != -1: o = atom.GetIntProp('_ori_i') n = o + self._collapsed_ring_offset atom.SetIntProp('_ori_i', n) old2new[o] = n else: pass for atom in self._get_collapsed_atoms(mol): old = json.loads(atom.GetProp('_ori_is')) new = [i + self._collapsed_ring_offset for i in old] atom.SetProp('_ori_is', json.dumps(new)) old2new = {**old2new, **dict(zip(old, new))} for atom in self._get_collapsed_atoms(mol): old_neighss = json.loads(atom.GetProp('_neighbors')) new_neighss = [[old2new[i] for i in old_neighs if i in old2new] for old_neighs in old_neighss] atom.SetProp('_neighbors', json.dumps(new_neighss)) pass def _renumber_original_indices(self, mol: Chem.Mol, mapping: Dict[int, int], name_restriction: Optional[str] = None): for atom in mol.GetAtoms(): if name_restriction is not None and atom.HasProp('_ori_name') and atom.GetProp('_ori_name') != name_restriction: continue i = atom.GetIntProp('_ori_i') if i == -1: ori = json.loads(atom.GetProp('_ori_is')) alt = [dd if dd not in mapping else mapping[dd] for dd in ori] atom.SetProp('_ori_is', json.dumps(alt)) ori = json.loads(atom.GetProp('_neighbors')) alt = [[dd if dd not in mapping else mapping[dd] for dd in inner] for inner in ori] atom.SetProp('_neighbors', json.dumps(alt)) elif i in mapping: atom.SetIntProp('_ori_i', mapping[i]) else: pass def _get_expansion_data(self, mol: Chem.Mol) -> List[Dict[str, List[Any]]]: return [ dict(atom=atom, ori_name=atom.GetProp('_ori_name'), elements=json.loads(atom.GetProp('_elements')), neighbors=json.loads(atom.GetProp('_neighbors')), ori_is=json.loads(atom.GetProp('_ori_is')), xs=json.loads(atom.GetProp('_xs')), ys=json.loads(atom.GetProp('_ys')), zs=json.loads(atom.GetProp('_zs')), bonds=json.loads(atom.GetProp('_bonds'))) for atom in self._get_collapsed_atoms(mol)] def _get_expansion_for_atom(self, ring: Dict[str, List[Any]], i: int) -> Dict[str, Any]: try: return {k.replace('s', ''): ring[k][i] if isinstance(ring[k], list) else ring[k] for k in ring} except IndexError: troublesome = [k for k in ring if isinstance(ring[k], list) and len(ring[k]) <= i] if len(troublesome) == 0: raise IndexError(f'There is a major issue with ring data for index {i}: {ring}') elif troublesome[0] == 'current_is': self.journal.warning(f'One atom lacks a current index!' + 'This is a fallback that should not happen') mol = ring['atom'].GetOwningMol() ring['current_is'] = [self._get_new_index(mol, old_i, search_collapsed=False) for old_i in ring['ori_is']] return self._get_expansion_for_atom(ring, i) else: raise IndexError(f'The indices of the collapsed atom do not extend to {i} for {troublesome}')
MIT License
deepsphere/deepsphere-weather
modules/utils_zarr.py
get_nbytes_stored_zarr_variables
python
def get_nbytes_stored_zarr_variables(fpath): ds = xr.open_zarr(fpath) source_group = zarr.open(fpath) variables = list(ds.data_vars.keys()) nbytes_stored = {} for variable in variables: nbytes_stored[variable] = _get_zarr_nbytes_stored(source_group[variable]) return nbytes_stored
Return nbytes stored each variable.
https://github.com/deepsphere/deepsphere-weather/blob/a9c75de9c9852a2832883cd998efd16d6542b083/modules/utils_zarr.py#L458-L466
import os import shutil import time import itertools import dask import zarr import numcodecs import numpy as np import xarray as xr from rechunker import rechunk from dask.diagnostics import ProgressBar def is_numcodecs(compressor): if type(compressor).__module__.find("numcodecs") == -1: return False else: return True def check_compressor(compressor, variable_names, default_compressor = None): if not isinstance(variable_names, (list, str)): raise TypeError("'variable_names' must be a string or a list") if isinstance(variable_names, str): variable_names = [variable_names] if not all([isinstance(s, str) for s in variable_names]): raise TypeError("Specify all variable names as string within the 'variable_names' list.") if not (isinstance(compressor, (str, dict, type(None))) or is_numcodecs(compressor)): raise TypeError("'compressor' must be a dictionary, numcodecs compressor, 'auto' string or None.") if isinstance(compressor, str): if not compressor == "auto": raise ValueError("If 'compressor' is specified as string, must be 'auto'.") if isinstance(compressor, dict): if not np.all(np.isin(list(compressor.keys()), variable_names)): raise ValueError("The 'compressor' dictionary must contain the keys {}".format(variable_names)) if not (isinstance(default_compressor, (dict, type(None))) or is_numcodecs(default_compressor)): raise TypeError("'default_compressor' must be a numcodecs compressor or None.") if isinstance(default_compressor, dict): if not np.all(np.isin(list(default_compressor.keys()), variable_names)): raise ValueError("The 'default_compressor' dictionary must contain the keys {}".format(variable_names)) if isinstance(compressor, str): if compressor == "auto": compressor = default_compressor if isinstance(compressor, dict): if not all([is_numcodecs(cmp) or isinstance(cmp, type(None)) for cmp in compressor.values()]): raise ValueError("The compressors specified in the 'compressor' dictionary must be numcodecs (or None).") if is_numcodecs(compressor) or isinstance(compressor, type(None)): compressor = {var: compressor for var in variable_names} return compressor def _all_valid_chunks_values(values): bool_list = [] for x in values: if isinstance(x, str): if x == "auto": bool_list.append(True) else: bool_list.append(False) elif isinstance(x, int): bool_list.append(True) elif isinstance(x, type(None)): bool_list.append(True) else: bool_list.append(False) return all(bool_list) def get_ds_chunks(ds): variable_names = list(ds.data_vars.keys()) chunks = {} for var in variable_names: if ds[var].chunks is not None: chunks[var] = {dim: v[0] for dim,v in zip(ds[var].dims, ds[var].chunks)} else: chunks[var] = None return chunks def check_chunks(ds, chunks, default_chunks = None): if not isinstance(chunks, (str, dict, type(None))): raise TypeError("'chunks' must be a dictionary, 'auto' or None.") if isinstance(chunks, str): if not chunks == "auto": raise ValueError("If 'chunks' is specified as string, must be 'auto'.") if not isinstance(default_chunks, (dict, type(None))): raise TypeError("'default_chunks' must be either a dictionary or None.") if not isinstance(ds, xr.Dataset): raise TypeError("'ds' must be an xarray Dataset.") variable_names = list(ds.data_vars.keys()) dim_names = list(ds.dims) if isinstance(chunks, dict): CHUNKS_PER_VARIABLE = np.all(np.isin(list(chunks.keys()), variable_names)) CHUNKS_DIMS = np.all(np.isin(list(chunks.keys()), dim_names)) if not CHUNKS_PER_VARIABLE and not CHUNKS_DIMS: if np.any(np.isin(list(chunks.keys()), dim_names)): print("The 'chunks' dictionary must contain the keys {}".format(dim_names)) raise ValueError("Please specify specific chunks for each Dataset dimension.") if np.any(np.isin(list(chunks.keys()), variable_names)): print("The 'chunks' dictionary must contain the keys {}".format(variable_names)) raise ValueError("Please specify specific chunks for each Dataset variable.") if isinstance(default_chunks, dict): DEFAULT_CHUNKS_PER_VARIABLE = np.all(np.isin(list(default_chunks.keys()), variable_names)) DEFAULT_CHUNKS_DIMS = np.all(np.isin(list(default_chunks.keys()), dim_names)) if not DEFAULT_CHUNKS_PER_VARIABLE and not DEFAULT_CHUNKS_DIMS: if np.any(np.isin(list(default_chunks.keys()), dim_names)): raise ValueError("Please specify specific default_chunks for each Dataset dimension.") if np.any(np.isin(list(default_chunks.keys()), variable_names)): raise ValueError("Please specify specific default_chunks for each Dataset variable.") if isinstance(chunks, str): if isinstance(default_chunks, dict): chunks = default_chunks CHUNKS_PER_VARIABLE = DEFAULT_CHUNKS_PER_VARIABLE CHUNKS_DIMS = DEFAULT_CHUNKS_DIMS else: chunks = {dim: "auto" for dim in dim_names} CHUNKS_PER_VARIABLE = False CHUNKS_DIMS = True if isinstance(chunks, type(None)): chunks = get_ds_chunks(ds) CHUNKS_PER_VARIABLE = True CHUNKS_DIMS = False if isinstance(chunks, dict): if CHUNKS_PER_VARIABLE: for var in variable_names: if not np.all(np.isin(list(chunks[var].keys()), list(ds[var].dims))): raise ValueError("The 'chunks' dictionary of {} must contain the keys {}".format(var, list(ds[var].dims))) if not _all_valid_chunks_values(list(chunks[var].values())): raise ValueError("Unvalid 'chunks' values for {}.".format(var)) elif not CHUNKS_PER_VARIABLE: if not _all_valid_chunks_values(list(chunks.values())): raise ValueError("Unvalid 'chunks' values") new_chunks = {} for var in variable_names: new_chunks[var] = {dim: chunks[dim] for dim in ds[var].dims} chunks = new_chunks else: raise ValueError("This chunks option has not been implemented.") return chunks def check_rounding(rounding, variable_names): if not isinstance(variable_names, (list, str)): raise TypeError("'variable_names' must be a string or a list.") if isinstance(variable_names, str): variable_names = [variable_names] if not all([isinstance(s,str) for s in variable_names]): raise ValueError("Specify all variable names as string within the 'variable_names' list.") if not isinstance(rounding, (int, dict, type(None))): raise TypeError("'rounding' must be a dictionary, integer or None.") if isinstance(rounding, dict): if not np.all(np.isin(list(rounding.keys()), variable_names)): raise ValueError("The 'rounding' dictionary must contain the keys {}.".format(variable_names)) if not all([isinstance(v, (int, type(None))) for v in rounding.values()]): raise ValueError("The rounding decimals specified in the 'rounding' dictionary must be integers (or None).") if any([v < 0 for v in rounding.values() if v is not None]): raise ValueError("The rounding decimals specified in the 'rounding' dictionary must be positive integers (or None).") if isinstance(rounding, int): if rounding < 0: raise ValueError("'rounding' decimal value must be larger than 0.") return rounding def write_zarr(zarr_fpath, ds, chunks="auto", default_chunks = None, compressor="auto", default_compressor = None, rounding = None, consolidated=True, append=False, append_dim = None, show_progress=True): if not zarr_fpath.endswith(".zarr"): zarr_fpath = zarr_fpath + ".zarr" ZARR_EXIST = os.path.exists(zarr_fpath) if not isinstance(append, bool): raise TypeError("'append' must be either True or False'.") if not append and ZARR_EXIST: raise ValueError(zarr_fpath + " already exists!") if append and not ZARR_EXIST: append = False if append: if not isinstance(append_dim, str): raise TypeError("Please specify the 'append_dim' (as a string).") else: append_dim = None chunks = check_chunks(ds, chunks = chunks, default_chunks = default_chunks) compressor = check_compressor(compressor = compressor, default_compressor = default_compressor, variable_names = list(ds.data_vars.keys())) rounding = check_rounding(rounding = rounding, variable_names = list(ds.data_vars.keys())) if rounding is not None: if isinstance(rounding, int): ds = ds.round(decimals=rounding) elif isinstance(rounding, dict): for var, decimal in rounding.items(): if decimal is not None: ds[var] = ds[var].round(decimal) else: raise NotImplementedError("'rounding' should be int, dict or None.") for dim in list(ds.dims.keys()): ds[dim].encoding['filters'] = None for var, chunk in chunks.items(): if chunk is not None: ds[var] = ds[var].chunk(chunk) for var, comp in compressor.items(): ds[var].encoding['compressor'] = comp compute = not show_progress if not append: zarr_store = zarr.DirectoryStore(zarr_fpath) r = ds.to_zarr(store=zarr_store, mode='w', synchronizer=None, group=None, consolidated = consolidated, compute=compute) if show_progress: with ProgressBar(): r.compute() else: r = ds.to_zarr(store = zarr_fpath, mode="a", append_dim = append_dim, synchronizer = None, group = None, consolidated = consolidated, compute=compute) if show_progress: with ProgressBar(): r.compute() def rechunk_Dataset(ds, chunks, target_store, temp_store, max_mem = '1GB', force=False): if os.path.exists(target_store): if force: shutil.rmtree(target_store) else: raise ValueError("A zarr store already exists at {}. If you want to overwrite, specify force=True".format(target_store)) if os.path.exists(temp_store): shutil.rmtree(temp_store) target_chunks = check_chunks(ds=ds, chunks=chunks, default_chunks=None) dict_dims = dict(ds.dims) for var in target_chunks.keys(): if target_chunks[var] is not None: for k, v in target_chunks[var].items(): if v == -1: target_chunks[var][k] = dict_dims[k] r = rechunk(ds, target_chunks=target_chunks, max_mem=max_mem, target_store=target_store, temp_store=temp_store) with ProgressBar(): r.execute() shutil.rmtree(temp_store) def _get_zarr_array_info_dict(arr): return {k:v for k,v in arr.info_items()} def _get_zarr_array_storage_ratio(arr): return float(_get_zarr_array_info_dict(arr)['Storage ratio']) def _get_zarr_nbytes_stored(arr): return round(arr.info.obj.nbytes_stored/1024/1024,3) def _get_zarr_nbytes(arr): return round(arr.info.obj.nbytes/1024/1024,3)
MIT License
schemaorg/sdopythonapp
lib/rdflib/plugins/parsers/ntriples.py
NTriplesParser.readline
python
def readline(self): if not self.buffer: buffer = self.file.read(bufsiz) if not buffer: return None self.buffer = buffer while True: m = r_line.match(self.buffer) if m: self.buffer = self.buffer[m.end():] return m.group(1) else: buffer = self.file.read(bufsiz) if not buffer and not self.buffer.isspace(): buffer += "\n" elif not buffer: return None self.buffer += buffer
Read an N-Triples line from buffered input.
https://github.com/schemaorg/sdopythonapp/blob/128be97d359178b26e5211a3e758933ff3a7b3df/lib/rdflib/plugins/parsers/ntriples.py#L168-L190
from __future__ import absolute_import from __future__ import division from __future__ import print_function __doc__ = """ N-Triples Parser License: GPL 2, W3C, BSD, or MIT Author: Sean B. Palmer, inamidst.com """ import re import codecs from rdflib.term import URIRef as URI from rdflib.term import BNode as bNode from rdflib.term import Literal from rdflib.compat import cast_bytes from rdflib.compat import decodeUnicodeEscape from rdflib.compat import ascii from six import BytesIO from six import string_types from six import text_type from six import unichr __all__ = ['unquote', 'uriquote', 'Sink', 'NTriplesParser'] uriref = r'<([^:]+:[^\s"<>]*)>' literal = r'"([^"\\]*(?:\\.[^"\\]*)*)"' litinfo = r'(?:@([a-zA-Z]+(?:-[a-zA-Z0-9]+)*)|\^\^' + uriref + r')?' r_line = re.compile(r'([^\r\n]*)(?:\r\n|\r|\n)') r_wspace = re.compile(r'[ \t]*') r_wspaces = re.compile(r'[ \t]+') r_tail = re.compile(r'[ \t]*\.[ \t]*(#.*)?') r_uriref = re.compile(uriref) r_nodeid = re.compile(r'_:([A-Za-z0-9_:]([-A-Za-z0-9_:\.]*[-A-Za-z0-9_:])?)') r_literal = re.compile(literal + litinfo) bufsiz = 2048 validate = False class Node(text_type): pass class ParseError(Exception): pass class Sink(object): def __init__(self): self.length = 0 def triple(self, s, p, o): self.length += 1 print(s, p, o) quot = {'t': u'\t', 'n': u'\n', 'r': u'\r', '"': u'"', '\\': u'\\'} r_safe = re.compile(r'([\x20\x21\x23-\x5B\x5D-\x7E]+)') r_quot = re.compile(r'\\(t|n|r|"|\\)') r_uniquot = re.compile(r'\\u([0-9A-F]{4})|\\U([0-9A-F]{8})') def unquote(s): if not validate: if isinstance(s, text_type): s = decodeUnicodeEscape(s) else: s = s.decode('unicode-escape') return s else: result = [] while s: m = r_safe.match(s) if m: s = s[m.end():] result.append(m.group(1)) continue m = r_quot.match(s) if m: s = s[2:] result.append(quot[m.group(1)]) continue m = r_uniquot.match(s) if m: s = s[m.end():] u, U = m.groups() codepoint = int(u or U, 16) if codepoint > 0x10FFFF: raise ParseError("Disallowed codepoint: %08X" % codepoint) result.append(unichr(codepoint)) elif s.startswith('\\'): raise ParseError("Illegal escape at: %s..." % s[:10]) else: raise ParseError("Illegal literal character: %r" % s[0]) return u''.join(result) r_hibyte = re.compile(r'([\x80-\xFF])') def uriquote(uri): if not validate: return uri else: return r_hibyte.sub( lambda m: '%%%02X' % ord(m.group(1)), uri) class NTriplesParser(object): _bnode_ids = {} def __init__(self, sink=None): if sink is not None: self.sink = sink else: self.sink = Sink() def parse(self, f): if not hasattr(f, 'read'): raise ParseError("Item to parse must be a file-like object.") f = codecs.getreader('utf-8')(f) self.file = f self.buffer = '' while True: self.line = self.readline() if self.line is None: break try: self.parseline() except ParseError: raise ParseError("Invalid line: %r" % self.line) return self.sink def parsestring(self, s): if not isinstance(s, string_types): raise ParseError("Item to parse must be a string instance.") f = BytesIO() f.write(cast_bytes(s)) f.seek(0) self.parse(f)
Apache License 2.0
alan-turing-institute/sktime
extension_templates/annotation.py
MySeriesAnnotator._fit
python
def _fit(self, X, Y=None):
Fit to training data. core logic Parameters ---------- X : pd.DataFrame training data to fit model to, time series Y : pd.Series, optional ground truth annotations for training if annotator is supervised Returns ------- self : returns a reference to self State change ------------ creates fitted model (attributes ending in "_")
https://github.com/alan-turing-institute/sktime/blob/b09d2db201f5380907088c6ffce036eab7083327/extension_templates/annotation.py#L120-L141
from sktime.annotation.base import BaseSeriesAnnotator class MySeriesAnnotator(BaseSeriesAnnotator): def __init__( self, est, parama, est2=None, paramb="default", paramc=None, fmt="dense", labels="indicator", ): self.est = est self.parama = parama self.paramb = paramb self.paramc = paramc super(MySeriesAnnotator, self).__init__(fmt=fmt, labels=labels)
BSD 3-Clause New or Revised License
nuagenetworks/vspk-python
vspk/v6/nuzfbrequest.py
NUZFBRequest.original_gateway_name
python
def original_gateway_name(self, value): self._original_gateway_name = value
Set original_gateway_name value. Notes: For an NSG that is self-rebootstrapping following a quarantine action, this field represents the original name the gateway had before revoking. This attribute is named `originalGatewayName` in VSD API.
https://github.com/nuagenetworks/vspk-python/blob/375cce10ae144ad6017104e57fcd3630898cc2a6/vspk/v6/nuzfbrequest.py#L818-L828
from .fetchers import NUPermissionsFetcher from .fetchers import NUMetadatasFetcher from .fetchers import NUGlobalMetadatasFetcher from .fetchers import NUJobsFetcher from bambou import NURESTObject class NUZFBRequest(NURESTObject): __rest_name__ = "zfbrequest" __resource_name__ = "zfbrequests" CONST_ZFB_APPROVAL_STATUS_DENIED = "DENIED" CONST_REQUEST_TYPE_SELF_REBOOTSTRAP = "SELF_REBOOTSTRAP" CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL" CONST_REQUEST_TYPE_ZFB = "ZFB" CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE" CONST_ZFB_APPROVAL_STATUS_UNASSIGNED = "UNASSIGNED" CONST_ZFB_APPROVAL_STATUS_APPROVED = "APPROVED" CONST_REQUEST_TYPE_MIGRATING = "MIGRATING" CONST_ZFB_APPROVAL_STATUS_ASSIGNED = "ASSIGNED" CONST_ASSOCIATED_ENTITY_TYPE_GATEWAY = "GATEWAY" CONST_ASSOCIATED_ENTITY_TYPE_NSGATEWAY = "NSGATEWAY" def __init__(self, **kwargs): super(NUZFBRequest, self).__init__() self._mac_address = None self._zfb_approval_status = None self._zfb_bootstrap_enabled = None self._zfb_info = None self._zfb_request_retry_timer = None self._sku = None self._ip_address = None self._cpu_type = None self._nsg_version = None self._uuid = None self._family = None self._last_connected_time = None self._last_updated_by = None self._last_updated_date = None self._registration_url = None self._request_type = None self._serial_number = None self._embedded_metadata = None self._entity_scope = None self._hostname = None self._creation_date = None self._original_enterprise_name = None self._original_gateway_datapath_id = None self._original_gateway_name = None self._original_uplink_connection_info = None self._associated_enterprise_id = None self._associated_enterprise_name = None self._associated_entity_type = None self._associated_gateway_id = None self._associated_gateway_name = None self._associated_ns_gateway_id = None self._associated_ns_gateway_name = None self._status_string = None self._owner = None self._external_id = None self.expose_attribute(local_name="mac_address", remote_name="MACAddress", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="zfb_approval_status", remote_name="ZFBApprovalStatus", attribute_type=str, is_required=False, is_unique=False, choices=[u'APPROVED', u'ASSIGNED', u'DENIED', u'UNASSIGNED']) self.expose_attribute(local_name="zfb_bootstrap_enabled", remote_name="ZFBBootstrapEnabled", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="zfb_info", remote_name="ZFBInfo", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="zfb_request_retry_timer", remote_name="ZFBRequestRetryTimer", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="sku", remote_name="SKU", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="ip_address", remote_name="IPAddress", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="cpu_type", remote_name="CPUType", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="nsg_version", remote_name="NSGVersion", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="uuid", remote_name="UUID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="family", remote_name="family", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="last_connected_time", remote_name="lastConnectedTime", attribute_type=float, is_required=False, is_unique=False) self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="registration_url", remote_name="registrationURL", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="request_type", remote_name="requestType", attribute_type=str, is_required=False, is_unique=False, choices=[u'MIGRATING', u'SELF_REBOOTSTRAP', u'ZFB']) self.expose_attribute(local_name="serial_number", remote_name="serialNumber", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False) self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL']) self.expose_attribute(local_name="hostname", remote_name="hostname", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="original_enterprise_name", remote_name="originalEnterpriseName", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="original_gateway_datapath_id", remote_name="originalGatewayDatapathID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="original_gateway_name", remote_name="originalGatewayName", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="original_uplink_connection_info", remote_name="originalUplinkConnectionInfo", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="associated_enterprise_id", remote_name="associatedEnterpriseID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="associated_enterprise_name", remote_name="associatedEnterpriseName", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="associated_entity_type", remote_name="associatedEntityType", attribute_type=str, is_required=False, is_unique=False, choices=[u'GATEWAY', u'NSGATEWAY']) self.expose_attribute(local_name="associated_gateway_id", remote_name="associatedGatewayID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="associated_gateway_name", remote_name="associatedGatewayName", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="associated_ns_gateway_id", remote_name="associatedNSGatewayID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="associated_ns_gateway_name", remote_name="associatedNSGatewayName", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="status_string", remote_name="statusString", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True) self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self.jobs = NUJobsFetcher.fetcher_with_object(parent_object=self, relationship="child") self._compute_args(**kwargs) @property def mac_address(self): return self._mac_address @mac_address.setter def mac_address(self, value): self._mac_address = value @property def zfb_approval_status(self): return self._zfb_approval_status @zfb_approval_status.setter def zfb_approval_status(self, value): self._zfb_approval_status = value @property def zfb_bootstrap_enabled(self): return self._zfb_bootstrap_enabled @zfb_bootstrap_enabled.setter def zfb_bootstrap_enabled(self, value): self._zfb_bootstrap_enabled = value @property def zfb_info(self): return self._zfb_info @zfb_info.setter def zfb_info(self, value): self._zfb_info = value @property def zfb_request_retry_timer(self): return self._zfb_request_retry_timer @zfb_request_retry_timer.setter def zfb_request_retry_timer(self, value): self._zfb_request_retry_timer = value @property def sku(self): return self._sku @sku.setter def sku(self, value): self._sku = value @property def ip_address(self): return self._ip_address @ip_address.setter def ip_address(self, value): self._ip_address = value @property def cpu_type(self): return self._cpu_type @cpu_type.setter def cpu_type(self, value): self._cpu_type = value @property def nsg_version(self): return self._nsg_version @nsg_version.setter def nsg_version(self, value): self._nsg_version = value @property def uuid(self): return self._uuid @uuid.setter def uuid(self, value): self._uuid = value @property def family(self): return self._family @family.setter def family(self, value): self._family = value @property def last_connected_time(self): return self._last_connected_time @last_connected_time.setter def last_connected_time(self, value): self._last_connected_time = value @property def last_updated_by(self): return self._last_updated_by @last_updated_by.setter def last_updated_by(self, value): self._last_updated_by = value @property def last_updated_date(self): return self._last_updated_date @last_updated_date.setter def last_updated_date(self, value): self._last_updated_date = value @property def registration_url(self): return self._registration_url @registration_url.setter def registration_url(self, value): self._registration_url = value @property def request_type(self): return self._request_type @request_type.setter def request_type(self, value): self._request_type = value @property def serial_number(self): return self._serial_number @serial_number.setter def serial_number(self, value): self._serial_number = value @property def embedded_metadata(self): return self._embedded_metadata @embedded_metadata.setter def embedded_metadata(self, value): self._embedded_metadata = value @property def entity_scope(self): return self._entity_scope @entity_scope.setter def entity_scope(self, value): self._entity_scope = value @property def hostname(self): return self._hostname @hostname.setter def hostname(self, value): self._hostname = value @property def creation_date(self): return self._creation_date @creation_date.setter def creation_date(self, value): self._creation_date = value @property def original_enterprise_name(self): return self._original_enterprise_name @original_enterprise_name.setter def original_enterprise_name(self, value): self._original_enterprise_name = value @property def original_gateway_datapath_id(self): return self._original_gateway_datapath_id @original_gateway_datapath_id.setter def original_gateway_datapath_id(self, value): self._original_gateway_datapath_id = value @property def original_gateway_name(self): return self._original_gateway_name @original_gateway_name.setter
BSD 3-Clause New or Revised License
autodesk/nanodesign
nanodesign/visualizer/model.py
VisModel.show_structure_geometry
python
def show_structure_geometry(self, show): if show and len(self.structure_geometry) == 0: self._create_structure_geometry() for geom in self.structure_geometry: geom.visible = show self.graphics.display()
Show the structure geometry.
https://github.com/autodesk/nanodesign/blob/54c470a2c10011182b297dec969fadadfc0ba2b7/nanodesign/visualizer/model.py#L317-L323
import os import numpy as np from .atomic_struct import VisAtomicStructure from .cmd import VisCommand from .extent import VisExtent from .geometry import VisGeometryBox, VisGeometryCircle, VisGeometryPath, VisGeometryNumber, VisGeometryPolygon, VisGeometryLines, VisGeometryCylinder from .graphics import VisGraphics from .helix import VisHelix from .menu import VisMenu,VisMenuItem from .strand import VisStrand try: from OpenGL.GL import * from OpenGL.GLU import * from OpenGL.GLUT import * except ImportError as e: print "Could not import PyOpenGL." raise e class VisModelRepType: UNKNOWN = 'unknown' BOUNDING_BOX = 'Bounding box' GEOMETRY = 'Geometry' HELIX_NUMBERS = 'Virtual helix numbers' HELIX_PROJECTION = 'Virtual helix projection' class VisModel(object): def __init__(self, file_name, cmd_file_name, commands, dna_structure, atomic_structure): self.name = os.path.basename(file_name) self.file_name = file_name self.cmd_file_name = cmd_file_name self.commands = commands self.dna_structure = dna_structure self.extent = VisExtent() self.command = VisCommand(self, cmd_file_name, commands) self.graphics = VisGraphics(self.name, self.command) self.menu = None self.helices = {} self.helix_names = [] self.strands = {} self.strand_names = [] self.atomic_structure = atomic_structure self.atomic_structures = {} self.atomic_structure_names = [] self.bounding_box_geometry = [] self.helix_numbers_geometry = [] self.helix_projection_geometry = [] self.structure_geometry = [] self.domains_temperature_range = None self._logger = logging.getLogger(__name__) self._set_extent() self.dna_structure.compute_aux_data() self._create_helices() self._create_strands() self._create_atomic_structures() def _create_helices(self): dna_structure = self.dna_structure helix_list = sorted(list(dna_structure.structure_helices_map.values()), key=lambda x: x.lattice_num) self._logger.info("Number of virtual helices %d" % (len(helix_list))) self.helix_names.append(VisMenuItem.ALL) self.helix_names.append(VisMenuItem.NONE) for helix in helix_list: vis_helix = VisHelix(self, self.graphics, dna_structure, helix) self.helices[vis_helix.name] = vis_helix self.helix_names.append(vis_helix.name) def _create_strands(self): dna_structure = self.dna_structure strand_list = [] for strand in dna_structure.strands: vis_strand = VisStrand(self, self.graphics, dna_structure, strand) self.strands[vis_strand.name] = vis_strand strand_list.append(vis_strand) strand_list.sort(VisStrand.compare) self._logger.info("Number of strands %d" % (len(strand_list))) self.strand_names.append(VisMenuItem.ALL) self.strand_names.append(VisMenuItem.NONE) for strand in strand_list: self.strand_names.append(strand.name) def _create_atomic_structures(self): if not self.atomic_structure: return dna_structure = self.dna_structure self.atomic_structure_names.append(VisMenuItem.ALL) self.atomic_structure_names.append(VisMenuItem.NONE) molecules = self.atomic_structure.generate_structure_ss() self._logger.info("Number of molecules %d" % (len(molecules))) id = 1 atomic_struct_list = [] for molecule in molecules: atomic_struct = VisAtomicStructure(id, self, molecule, self.graphics) self.atomic_structures[atomic_struct.strand_name] = atomic_struct atomic_struct_list.append(atomic_struct) id += 1 atomic_struct_list.sort(VisAtomicStructure.compare) for atomic_struct in atomic_struct_list: self.atomic_structure_names.append(atomic_struct.strand_name) def start_interactive(self): self.graphics.set_extent(self.extent) cell_width = 0.1 self.graphics.initialize_graphics() self._create_menu() self.command.execute_file_cmds() self.command.execute_cmds() self.command.generate_model_cmd(VisModelRepType.BOUNDING_BOX, "true") self.command.generate_model_cmd(VisModelRepType.HELIX_NUMBERS, "true") self.graphics.start_interactive() def _set_extent(self): for helix in self.dna_structure.structure_helices_map.itervalues(): for coord in helix.helix_axis_coords: self.extent.update(coord[0], coord[1], coord[2]) xmin,xmax,ymin,ymax,zmin,zmax = self.extent.get_bounds() self._logger.info("Extent xmin %f ymin %f zmin %f" % (xmin, ymin, zmin)) self._logger.info(" xmax %f ymax %f zmax %f" % (xmax, ymax, zmax)) def _create_menu(self): self.menu = VisMenu(self.command, self.helix_names, self.strand_names, self.atomic_structure_names) self.graphics.menu = self.menu def show_helix(self, name, rep, attributes): show = None color = None for key,value in attributes: if key == 'show': show = value elif key == 'color': color = value if name not in self.helix_names: self._logger.error("Unknown helix named \'%s\' " % name) return if name == VisMenuItem.ALL: self._logger.info("Show all ") display = False for i,helix in enumerate(self.helices.values()): helix.show(rep,show,display) else: helix = self.helices[name] if color != None: helix.set_color(rep, color) if show != None: helix.show(rep,show) self.graphics.display() def show_atomic_struct(self, name, rep, show): if name not in self.atomic_structure_names: self._logger.error("Unknown atomic structure named \'%s\' " % name) return if name == VisMenuItem.ALL: display = False for atom_struct in self.atomic_structures.values(): atom_struct.show(rep,show,display) self.graphics.display() else: atomic_struct = self.atomic_structures[name] atomic_struct.show(rep,show) def show_strand(self, name, rep, attributes): show = None color = None line_width = None for key,value in attributes: if key == 'show': show = value elif key == 'color': color = value elif key == 'line_width': line_width = value if name not in self.strand_names: self._logger.error("Unknown strand named \'%s\' " % name) return if name == VisMenuItem.ALL: self._logger.info("Show all ") display = False for strand in self.strands.values(): strand.show(rep,show,display) self.graphics.display() else: strand = self.strands[name] if color != None: strand.set_color(rep, color) if line_width != None: strand.set_line_width(rep, line_width) if show != None: strand.show(rep,show) self.graphics.display() def show_bounding_box(self, show): self._logger.info("Show bounding box %s " % str(show)) if show and len(self.bounding_box_geometry) == 0: self._create_bounding_box() for geom in self.bounding_box_geometry: geom.visible = show self.graphics.display()
Apache License 2.0
wil3/gymfc
examples/gymfc_nf/envs/base.py
BaseEnv.step
python
def step(self, action): self.action = action.copy() self.y = self.action_to_control_signal(self.action, 0, 1) self.obs = self.step_sim(self.y) self.angular_rate = (self.imu_angular_velocity_rpy.copy() + self.sample_noise(self)) self.true_error = self.angular_rate_sp - self.imu_angular_velocity_rpy self.measured_error = self.angular_rate_sp - self.angular_rate done = self.sim_time >= self.max_sim_time reward = self.compute_reward() self.update_setpoint() state = self.state_fn(self) self.last_measured_error = self.measured_error.copy() self.last_y = self.y.copy() self.step_counter += 1 if self.step_callback: self.step_callback(self, state, reward, done) return state, reward, done, {}
Step the simulator and apply the provided action. Args: action: numpy array where each value in the array is the action indexed by the acutuator index defined in the models SDF.
https://github.com/wil3/gymfc/blob/08df94b06a5d7f8fb6d2cb2c155751f5336720e7/examples/gymfc_nf/envs/base.py#L67-L105
import math import numpy as np import gym from gym import spaces from gym.utils import seeding from gymfc.envs.fc_env import FlightControlEnv import time class BaseEnv(FlightControlEnv, gym.Env): def __init__(self, max_sim_time = 30, state_fn = None): self.max_sim_time = max_sim_time self.state_fn = state_fn self.np_random = None self._init() self.action_space = spaces.Box(-np.ones(4), np.ones(4), dtype=np.float32) self.action = self.action_space.low num_inputs = len(self.state_fn(self)) self.observation_space = spaces.Box(-np.inf, np.inf, shape=(num_inputs,), dtype=np.float32) self.sample_noise = lambda _: 0 self.step_callback = None def set_aircraft_model(self, model): super().__init__(aircraft_config=model)
MIT License
boschresearch/bcai_kaggle_champs
src/pipeline_pre.py
_create_embedding
python
def _create_embedding(series): types = sorted(series.unique().tolist()) assert "<None>" not in types emb_index = dict(zip(["<None>"] + types , range(len(types)+1))) return emb_index
Create a one-hot encoding embedding. Args: series: A DataFrame series (column). Returns: dict: Mapping of the entries (or "<None>") to the index number.
https://github.com/boschresearch/bcai_kaggle_champs/blob/4a42e18b5b88043fb40ec15289216a1d88789698/src/pipeline_pre.py#L501-L514
import collections import gzip import itertools import json import os import pickle import sys import numpy as np import pandas as pd import rdkit import xyz2mol as x2m bond_order_dict = { rdkit.Chem.rdchem.BondType.SINGLE: 1, rdkit.Chem.rdchem.BondType.AROMATIC: 1.5, rdkit.Chem.rdchem.BondType.DOUBLE: 2, rdkit.Chem.rdchem.BondType.TRIPLE: 3} root = '../' with open(os.path.join(root,'SETTINGS.json')) as f: settings = json.load(f) with open(os.path.join(root,settings['CONFIG_DIR'],'manual_bond_order_fix.json')) as f: manual_bond_order_dict = json.load(f) atomic_num_dict = { 'H':1, 'C':6, 'N':7, 'O':8, 'F':9 } classification_corrections = { '1JHN_2_2_1_1':'1JHN_3_2_2_1', '3JHN_4.5_3_1.5_1.5':'3JHN_4_3_1.5_1.5', '2JHC_3_3_1_1':'2JHC_4_3_2_1', '3JHC_3_3_1_1':'3JHC_4_3_2_1', '3JHC_4_2_2_2':'3JHC_4_2_3_1'} small_longtypes = {'2JHN_4.5_2_3_1.5', '3JHN_4_2_3_1', '2JHN_4_2_3_1', '2JHN_4.5_3_1.5_1.5', '2JHN_4_3_2_1', '3JHN_4_4_1_1', '3JHN_4_3_2_1', '2JHN_4_4_1_1', '3JHN_4.5_2_3_1.5', '2JHN_4_2_2_2', '3JHN_4_2_2_2', '1JHN_4_3_2_1', '1JHN_4_4_1_1', '2JHN_3_1_3_0'} (MAX_ATOM_COUNT,MAX_BOND_COUNT,MAX_TRIPLET_COUNT,MAX_QUAD_COUNT) = (29, 406, 54, 117) def make_structure_dict(atoms_dataframe): atoms = atoms_dataframe.sort_values(["molecule_name", "atom_index"]) structure_dict = collections.defaultdict(lambda: {"symbols":[],"positions":[]}) for index,row in atoms.iterrows(): structure_dict[row["molecule_name"]]["symbols"].append(row["atom"]) structure_dict[row["molecule_name"]]["positions"].append([row["x"],row["y"],row["z"]]) return structure_dict def enhance_structure_dict(structure_dict): import pybel for molecule_name in structure_dict: molecule = structure_dict[molecule_name] positions = np.array(molecule['positions']) n_atom = positions.shape[0] molecule['positions'] = positions pos1 = np.tile(positions, (n_atom,1,1) ) pos2 = np.transpose(pos1, (1,0,2) ) dist = np.linalg.norm(pos1 - pos2, axis=-1) molecule['distances'] = dist sorted_j = np.argsort(dist, axis=-1) relpos1 = positions[sorted_j[:,1],:] - positions[sorted_j[:,0],:] relpos2 = positions[sorted_j[:,2],:] - positions[sorted_j[:,0],:] cos = np.sum(relpos1*relpos2,axis=1) / (np.linalg.norm(relpos1,axis=1) * np.linalg.norm(relpos2,axis=1)) angle = np.arccos( np.clip(cos,-1.0,1.0) ).reshape((n_atom,1)) / np.pi molecule['angle'] = angle[:,0] molecule['bond_orders'] = np.zeros((n_atom,n_atom)) atomicNumList = [atomic_num_dict[symbol] for symbol in molecule['symbols']] if molecule_name in manual_bond_order_dict: molecule['bond_orders'] = np.array(manual_bond_order_dict[molecule_name],dtype=float) else: mol = x2m.xyz2mol(atomicNumList,0,positions,True,True) for bond in mol.GetBonds(): atom0, atom1 = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx() bond_order = bond.GetBondType() molecule['bond_orders'][atom0,atom1] = bond_order_dict[bond_order] molecule['bond_orders'][atom1,atom0] = bond_order_dict[bond_order] molecule['top_bonds'] = np.sort(molecule['bond_orders'],axis=-1)[:,-1:-5:-1] molecule['bond_ids'] = np.hstack((molecule['top_bonds'].sum(axis=-1)[:,np.newaxis], np.sum(molecule['top_bonds']>1e-3,axis=-1)[:,np.newaxis], molecule['top_bonds'][:,:2])) molecule['long_symbols'] = ['_'.join([ molecule['symbols'][i]]+[str(x) for x in molecule['bond_ids'][i]]) for i in range(n_atom)] chem_bond_atoms = [sorted([molecule['symbols'][i] for i in molecule['bond_orders'][atom_index].nonzero()[0]]) for atom_index in range(n_atom)] molecule['sublabel_atom'] = ['-'.join([molecule['long_symbols'][atom_index]]+chem_bond_atoms[atom_index]) for atom_index in range(n_atom)] xyz = str(n_atom)+'\n\n' + '\n'.join([ ' '.join( [ str(molecule['symbols'][i]), str(molecule['positions'][i,0]), str(molecule['positions'][i,1]), str(molecule['positions'][i,2])] ) for i in range(n_atom)]) mol = pybel.readstring('xyz',xyz) molecule['charges'] = [mol.atoms[i].partialcharge for i in range(n_atom)] molecule['spins'] = [mol.atoms[i].spin for i in range(n_atom)] molecule['heavyvalences'] = [mol.atoms[i].heavyvalence for i in range(n_atom)] molecule['heterovalences'] = [mol.atoms[i].heterovalence for i in range(n_atom)] molecule['valences'] = [mol.atoms[i].valence for i in range(n_atom)] molecule['hyb_types'] = [mol.atoms[i].type for i in range(n_atom)] return structure_dict def enhance_atoms(atoms_dataframe,structure_dict): assert int(atoms_dataframe.groupby("molecule_name").count().max()[0]) <= MAX_ATOM_COUNT for key in ['distances','angle', 'bond_orders', 'top_bonds', 'bond_ids', 'long_symbols','sublabel_atom', 'charges', 'spins', 'heavyvalences', 'heterovalences', 'valences', 'hyb_types']: newkey = key if key[-1]!='s' else key[:-1] atoms_dataframe[newkey] = atoms_dataframe.apply(lambda x: structure_dict[x['molecule_name']][key][x['atom_index']], axis=1) atoms_dataframe.rename(columns={'long_symbol':'labeled_atom'},inplace=True) return atoms_dataframe def enhance_bonds(bond_dataframe,structure_dict): bond_dataframe.sort_values(['molecule_name','atom_index_0','atom_index_1'],inplace=True) assert int(bond_dataframe.groupby("molecule_name").count().max()[0]) <= MAX_BOND_COUNT new_columns = collections.defaultdict(list) for index,row in bond_dataframe.iterrows(): molecule_name, iatom0, iatom1 = row['molecule_name'],row['atom_index_0'],row['atom_index_1'] if 'predict' not in structure_dict[molecule_name]: structure_dict[molecule_name]['predict'] = structure_dict[molecule_name]['bond_orders'] * 0 structure_dict[molecule_name]['predict'][iatom0,iatom1] = 1 structure_dict[molecule_name]['predict'][iatom1,iatom0] = 1 long_symbols = [structure_dict[molecule_name]['long_symbols'][x] for x in [iatom0,iatom1]] if all([x[0]=='H' for x in long_symbols]): lt = row['type'] elif not any([x[0]=='H' for x in long_symbols]): raise ValueError("No hydrogen found in {}".format(row)) else: ls = [x for x in long_symbols if x[0]!='H'][0] lt = row["type"] + ls[1:].replace('.0','') if lt in classification_corrections: lt = classification_corrections[lt] if lt in small_longtypes: lt = lt.split('_')[0] new_columns["labeled_type"].append(lt) new_columns["sublabel_type"].append(row['type'] + '-'+ '-'.join(sorted(long_symbols))) new_columns["bond_order"].append(structure_dict[molecule_name]['bond_orders'][iatom0,iatom1]) new_columns["predict"].append(1) for key in new_columns: bond_dataframe[key] = new_columns[key] return bond_dataframe def add_all_pairs(bond_dataframe,structure_dict): iadd = -1 new_data = collections.defaultdict(list) for molecule_name in bond_dataframe["molecule_name"].unique(): n_atom = len(structure_dict[molecule_name]["symbols"]) for iatom1,iatom0 in itertools.combinations(range(n_atom),r=2): if 'predict' not in structure_dict[molecule_name]: raise KeyError('{} has no "predict" value'.format(molecule_name)) if structure_dict[molecule_name]['predict'][iatom0,iatom1]: continue symbols = [structure_dict[molecule_name]['symbols'][i] for i in [iatom0,iatom1]] bond_order = structure_dict[molecule_name]['bond_orders'][iatom0,iatom1] nottype = '-'.join(sorted(symbols)) + '_' + str(bond_order) row = {'id':iadd,'molecule_name':molecule_name,'atom_index_0':iatom0,'atom_index_1':iatom1, 'type':nottype,'labeled_type':nottype,'sublabel_type':nottype, 'bond_order': bond_order, 'predict':0} if 'scalar_coupling_constant' in bond_dataframe: row['scalar_coupling_constant'] = 0. for k,v in row.items(): new_data[k].append(v) iadd -= 1 new_data = pd.DataFrame(new_data) if bond_dataframe.index.name!='id': bond_dataframe = bond_dataframe.set_index('id') new_data.set_index('id',inplace=True) all_data = bond_dataframe.append(new_data,verify_integrity=True,sort=False) return all_data def make_triplets(molecule_list,structure_dict): new_data = collections.defaultdict(list) for molecule_name in molecule_list: molecule = structure_dict[molecule_name] bond_orders = molecule['bond_orders'] short = molecule['symbols'] long = molecule['long_symbols'] for i, atom_bond_order in enumerate(bond_orders): connection_indices = atom_bond_order.nonzero()[0] pairs = itertools.combinations(connection_indices,2) for pair in pairs: j, k = pair[0], pair[1] atom0_short = short[i] + long[i].split('_')[2] atom1_short = short[j] + long[j].split('_')[2] atom2_short = short[k] + long[k].split('_')[2] atom0_long = long[i] atom1_long = long[j] atom2_long = long[k] labels = [atom1_short,atom2_short] labels.sort() label = '-'.join([atom0_short]+labels) sublabels = [atom1_long,atom2_long] sublabels.sort() sublabel = '-'.join([atom0_long]+sublabels) r10 = molecule['positions'][j] - molecule['positions'][i] r20 = molecule['positions'][k] - molecule['positions'][i] angle = np.sum(r10*r20) / (np.linalg.norm(r10)*np.linalg.norm(r20)) angle = np.arccos( np.clip(angle,-1.0,1.0) ) row = {'molecule_name':molecule_name,'atom_index_0':i,'atom_index_1':j,'atom_index_2':k, 'label':label,'sublabel':sublabel,'angle':angle} for k,v in row.items(): new_data[k].append(v) ans = pd.DataFrame(new_data) ans.sort_values(['molecule_name','atom_index_0','atom_index_1','atom_index_2']) assert int(ans.groupby("molecule_name").count().max()[0]) <= MAX_TRIPLET_COUNT return ans def make_quadruplets(molecule_list,structure_dict): new_data = collections.defaultdict(list) icount = 0 for molecule_name in molecule_list: molecule = structure_dict[molecule_name] bond_orders = molecule['bond_orders'] short = molecule['symbols'] long = molecule['long_symbols'] pos = molecule['positions'] for i,j in zip(*bond_orders.nonzero()): if i > j: continue for i_nei,j_nei in itertools.product( bond_orders[i].nonzero()[0],bond_orders[j].nonzero()[0]): if j_nei==i or i_nei==j: continue mode = 'fast' assert ['test','fast','slow'].count(mode),'Mode must be one of: test, fast, slow' if ['test','slow'].count(mode): plane_1 = np.cross( pos[i_nei]-pos[i], pos[j]-pos[i]) plane_2 = np.cross( pos[i]-pos[j],pos[j_nei]-pos[j]) if np.allclose(plane_1,0.) or np.allclose(plane_2,0.): continue costheta = np.dot(plane_1,plane_2) / ( np.linalg.norm(plane_1)*np.linalg.norm(plane_2)) costheta1 = costheta if ['test','fast'].count(mode): ijpos = np.array([ pos[i_nei] - pos[i], pos[j] - pos[i], pos[j_nei] - pos[j], ]) dots = np.dot(ijpos,ijpos.T) numerator = dots[1,1]*dots[0,2] - dots[0,1]*dots[1,2] denominator = np.sqrt( ( dots[0,0]*dots[1,1]-dots[0,1]**2) * ( dots[2,2]*dots[1,1]-dots[2,1]**2 )) if abs(denominator) < 1e-7: continue costheta = numerator / denominator if mode=='test': assert abs(costheta-costheta1)<1e-4,"Fancy algebra failed" icount += 1 if icount > 50000: raise Exception("50K counts confirmed.") assert abs(costheta)<1.0001,'Cos theta too large' dihedral = np.arccos( np.clip(costheta,-1.0,1.0) ) label = '_'.join(sorted([ '_'.join([short[i],short[i_nei]]), '_'.join([short[j],short[j_nei]]), ])) sublabel4 = '_'.join(sorted([ '_'.join([short[i],short[i_nei],str(bond_orders[i,i_nei].round(1))]), '_'.join([short[j],short[j_nei],str(bond_orders[j,j_nei].round(1))]), ]) + [str(bond_orders[i,j].round(1))] ).replace('.0','') sublabel = '_'.join(sorted([ '_'.join([short[i],short[i_nei]]), '_'.join([short[j],short[j_nei]]), ]) + [str(bond_orders[i,j].round(1))] ).replace('.0','') sublabel2 = '_'.join(sorted([ '_'.join([short[i]+long[i].split('_')[1],short[i_nei]]), '_'.join([short[j]+long[j].split('_')[1],short[j_nei]]), ]) + [str(bond_orders[i,j].round(1))] ).replace('.0','') sublabel3 = '_'.join(sorted([ '_'.join([short[i]+long[i].split('_')[1],short[i_nei]]), '_'.join([short[j]+long[j].split('_')[1],short[j_nei]]), ])).replace('.0','') row = {'molecule_name':molecule_name, 'atom_index_0':i,'atom_index_1':j,'atom_index_2':i_nei,'atom_index_3':j_nei, 'label':label,'sublabel':sublabel,'sublabel2':sublabel2,'sublabel3':sublabel3, 'sublabel4':sublabel4,'angle':dihedral} for k,v in row.items(): new_data[k].append(v) ans = pd.DataFrame(new_data) ans.sort_values(['molecule_name','atom_index_0','atom_index_1','atom_index_2','atom_index_3']) assert int(ans.groupby("molecule_name").count().max()[0]) <= MAX_QUAD_COUNT return ans def write_csv(directory,label,atoms,bonds,triplets,quadruplets): filename = os.path.join(directory,'new_big_{}.csv.bz2') if atoms is not None and len(atoms): atoms = atoms.sort_values(["molecule_name",'atom_index']) for i in range(4): atoms["top_bond_{}".format(i)] = [x[i] if len(x)>i else 0.0 for x in atoms["top_bond"].values] for i in ["x","y","z"]: atoms[i] = atoms[i].values.round(10) renames = {k:k[:-1] for k in atoms.columns if k[-1]=='s'} renames.update({'long_symbols':'labeled_atom'}) atoms = atoms.rename(columns=renames) atoms.to_csv(filename.format('structures'),index=False,columns= 'molecule_name,atom_index,atom,x,y,z,labeled_atom,angle,top_bond_0,top_bond_1,top_bond_2,top_bond_3,sublabel_atom,charge,spin,heavyvalence,heterovalence,valence,hyb_type'.split(',')) if bonds is not None and len(bonds): bonds = bonds.reset_index() bond_columns = 'id,molecule_name,atom_index_0,atom_index_1,type,scalar_coupling_constant,labeled_type,sublabel_type,bond_order,predict'.split(',') if 'scalar_coupling_constant' not in bonds.columns: bond_columns = [x for x in bond_columns if x!='scalar_coupling_constant'] bonds = bonds.sort_values(["predict","molecule_name",'atom_index_0','atom_index_1'], ascending=[False,True,True,True]) bonds.to_csv(filename.format(label),index=False,columns=bond_columns) if triplets is not None and len(triplets): triplets = triplets.sort_values(["molecule_name",'atom_index_0','atom_index_1','atom_index_2']) triplets.to_csv(filename.format(label+'_triplets'),index=False,columns= 'molecule_name,atom_index_0,atom_index_1,atom_index_2,label,sublabel,angle'.split(',')) if quadruplets is not None and len(quadruplets): quadruplets = quadruplets.sort_values(["molecule_name",'atom_index_0','atom_index_1', 'atom_index_2','atom_index_3']) quadruplets.to_csv(filename.format(label+'_quadruplets'),index=False,columns= 'molecule_name,atom_index_0,atom_index_1,atom_index_2,atom_index_3,label,sublabel,sublabel2,sublabel3,sublabel4,angle'.split(','))
MIT License
jorgemf/kaggle_redefining_cancer_treatment
src/preprocess_data.py
load_or_parse_numbers_dataset
python
def load_or_parse_numbers_dataset(filename, dataset, saving_fn=save_csv_dataset, loading_fn=load_csv_dataset): if not os.path.exists(os.path.join(DIR_GENERATED_DATA, filename)): for datasample in dataset: words = datasample.text.split() parsed_words = [] for word in words: try: number = float(word) parsed_words.append(encode_number(number)) except ValueError: parsed_words.append(word) datasample.text = ' '.join(parsed_words) saving_fn(filename, dataset) return loading_fn(filename)
Loads the parsed dataset from a file or parses a dataset of DataSample or WikipediaGene to transform all the numbers into symbols and saves it into the file. :param filename: name of the file :param List[DataSample|WikipediaGene] dataset: the datset of DataSample or WikipediaGene :param saving_fn: The function used to save the dataset :param loading_fn: The function used to load the dataset :return List[DataSample|WikipediaGene]: the datset
https://github.com/jorgemf/kaggle_redefining_cancer_treatment/blob/d9e6b37231c5b3706f94de4d71bd9d9358e147a0/src/preprocess_data.py#L425-L449
import zipfile import re import os import unicodecsv as csv import sys if sys.version_info >= (3,0): import urllib.request as urllib else: import urllib from bs4 import BeautifulSoup import unicodedata import copy import nltk import pandas as pd from .configuration import * csv.field_size_limit(sys.maxsize) def extract_zip_file(filepath, directory): zip_ref = zipfile.ZipFile(filepath, 'r') zip_ref.extractall(directory) zip_ref.close() def extract_zip_files(): files = ['training_text', 'training_variants', 'test_text', 'test_variants'] for file in files: filepath = os.path.join(DIR_DATA, file) if not os.path.exists(filepath): extract_zip_file('{}.zip'.format(file), DIR_DATA) def load_csv_dataset(filename): dataset = [] with open(os.path.join(DIR_GENERATED_DATA, filename), 'rb') as file: reader = csv.reader(file, delimiter=';', quotechar='"', quoting=csv.QUOTE_MINIMAL, errors='ignore') for row in reader: id = int(row[0]) text = row[1] gene = row[2] variation = row[3] try: real_class = int(row[4]) except: real_class = None dataset.append(DataSample(id, text, gene, variation, real_class)) return dataset def save_csv_dataset(filename, dataset): with open(os.path.join(DIR_GENERATED_DATA, filename), 'wb') as file: writer = csv.writer(file, delimiter=';', quotechar='"', quoting=csv.QUOTE_MINIMAL) for i, d in enumerate(dataset): writer.writerow([str(d.id), d.text, d.gene, d.variation, str(d.real_class)]) def load_csv_wikipedia_gen(filename): dataset = [] with open(os.path.join(DIR_GENERATED_DATA, filename)) as file: reader = csv.reader(file, delimiter=';', quotechar='"', quoting=csv.QUOTE_MINIMAL) for row in reader: dataset.append(WikipediaGene(row[0], row[1])) return dataset def save_csv_wikipedia_gen(filename, wikipedia_genes): with open(os.path.join(DIR_GENERATED_DATA, filename), 'wb') as file: writer = csv.writer(file, delimiter=';', quotechar='"', quoting=csv.QUOTE_MINIMAL) for d in wikipedia_genes: writer.writerow([str(d.gene), d.text]) class DataSample(object): def __init__(self, id, text, gene, variation, real_class=None): self.id = id self.text = text self.gene = gene self.variation = variation self.real_class = real_class def __copy__(self): text_copy = copy.deepcopy(self.text) return DataSample(self.id, text_copy, self.gene, self.variation, self.real_class) def load_raw_dataset(text_file, variants_file, ignore_empty=False): dp_txt = pd.read_csv(os.path.join(DIR_DATA, text_file), sep='\|\|', header=None, skiprows=1, names=["ID", "Text"], encoding = 'utf8') dp_var = pd.read_csv(os.path.join(DIR_DATA, variants_file), encoding = 'utf8') dp = pd.merge(dp_var, dp_txt, how='left', on='ID') data = [] for i in range(len(dp)): id = int(dp['ID'][i]) text = dp['Text'][i].strip() if ignore_empty and len(text) < 10: continue gene = dp['Gene'][i].strip() variation = dp['Variation'][i].strip() if 'Class' in dp: real_class = int(dp['Class'][i]) else: real_class = None data.append(DataSample(id, text, gene, variation, real_class)) return data def load_or_clean_text_dataset(filename, dataset, saving_fn=save_csv_dataset, loading_fn=load_csv_dataset): if not os.path.exists(os.path.join(DIR_GENERATED_DATA, filename)): for datasample in dataset: datasample.text = clean_text(datasample.text) saving_fn(filename, dataset) return loading_fn(filename) RE_BIBLIOGRAPHIC_REFERENCE_1 = re.compile(r"\s*\[[\d\s,]+\]\s*") RE_BIBLIOGRAPHIC_REFERENCE_2 = re.compile(r"\s*\(([a-zA-Z\s\.,]+\d{2,4}\s*;?)+\s*\)\s*") RE_BIBLIOGRAPHIC_REFERENCE_3 = re.compile(r"\s*\[([a-zA-Z\s\.,]+\d{2,4}\s*;?)+\s*\]\s*") RE_BIBLIOGRAPHIC_REFERENCE_4 = re.compile(r"\s*\([\d,\s]+\)\s*") RE_BIBLIOGRAPHIC_REFERENCE_5 = re.compile(r"\s*(\w+ et al\.,?)+") RE_FIGURES = re.compile(r"\s*(Fig(ure)?\.? [\w,]+)\s*") RE_TABLES = re.compile(r"\s*(Table\.? [\w,]+)\s*") RE_WHITE_SPACES = re.compile(r"\s+") RE_EMTPY_PARENTHESES = re.compile(r"\(\s*(and)?\s*\)") RE_URLS = re.compile(r"((http|ftp)s?:\/\/(?:www\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|(www|ftp)\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|(http|ftp)s?:\/\/(?:www\.|(?!www)|(ftp))[a-zA-Z0-9]\.[^\s]{2,}|(www|ftp)\.[a-zA-Z0-9]\.[^\s]{2,})") def clean_text(text): text = re.sub(RE_BIBLIOGRAPHIC_REFERENCE_1, ' ', text) text = re.sub(RE_BIBLIOGRAPHIC_REFERENCE_2, ' ', text) text = re.sub(RE_BIBLIOGRAPHIC_REFERENCE_3, ' ', text) text = re.sub(RE_BIBLIOGRAPHIC_REFERENCE_4, ' ', text) text = re.sub(RE_BIBLIOGRAPHIC_REFERENCE_5, ' ', text) text = re.sub(RE_FIGURES, '', text) text = re.sub(RE_TABLES, '', text) text = re.sub(RE_URLS, ' ', text) text = re.sub(RE_EMTPY_PARENTHESES, '', text) text = text.replace('...', '.') for symbol in ['(', ')', '/', '-', '\xe2', '\'', '\"', '%', ':', '?', ', ', '. ', '<', '>', '=', '-', ';', '!', '°C', '*', '[', ']' ]: text = text.replace(symbol, ' {} '.format(symbol)) text = re.sub(RE_WHITE_SPACES, ' ', text) return text def group_count(elements, group=None): if group is None: group = {} for e in elements: if isinstance(e, list): group = group_count(e, group) elif e in group: group[e] += 1 else: group[e] = 1 return group def show_stats(train_set, test_set): print("{} samples in the training set".format(len(train_set))) print("{} samples in the test set".format(len(test_set))) classes = [d.real_class for d in train_set] classes_group = group_count(classes) classes_string = ", ".join( ["{}:{}".format(k, classes_group[k]) for k in sorted(classes_group.keys())]) print("{} different classes: {}".format(len(set(classes)), classes_string)) train_genes = [d.gene for d in train_set] test_genes = [d.gene for d in test_set] print("{} genes in training set".format(len(set(train_genes)))) print("{} genes in test set".format(len(set(test_genes)))) print("{} genes in test and train set".format(len(set(test_genes + train_genes)))) class WikipediaGene(object): def __init__(self, gene, text): self.gene = gene self.text = text def get_genes_articles_from_wikipedia(genes): data = [] for gen in genes: filename = os.path.join(DIR_WIKIPEDIA_GENES, 'wikipedia_gen_{}'.format(gen)) if not os.path.exists(filename): url = 'https://en.wikipedia.org/wiki/{}'.format(gen) try: html = BeautifulSoup(urllib.urlopen(url).read(), 'lxml') html_data = html.find(id='mw-content-text').div.find_all('p') text_data = [h.get_text().strip() for h in html_data] text_data = [t for t in text_data if len(t) > 30 and len(t.split()) > 10] text_data = [unicodedata.normalize('NFKD', l) .encode('ascii', 'ignore') for l in text_data] except: text_data = [''] with open(filename, 'wb') as f: f.writelines(text_data) with open(filename, 'r') as f: text_lines = f.readlines() text = '\n'.join(text_lines) data.append(WikipediaGene(gen, text)) return data def load_or_parse_mutations_dataset(filename, dataset, genes, saving_fn=save_csv_dataset, loading_fn=load_csv_dataset): if not os.path.exists(os.path.join(DIR_GENERATED_DATA, filename)): for datasample in dataset: words = datasample.text parsed_words = [] for word in words: if is_mutation(word, genes): parsed_words.extend(split_mutation(word)) else: parsed_words.append(word) datasample.text = ' '.join(parsed_words) parsed_variation = [] if isinstance(datasample, DataSample): for word in datasample.variation.split(): if is_mutation(word, genes): parsed_variation.extend(split_mutation(word)) else: parsed_variation.append(word) datasample.variation = ' '.join(parsed_variation) saving_fn(filename, dataset) return loading_fn(filename) def is_mutation(word, genes): word = word.strip() if len(word) >= 3 and word not in genes: has_hyphen_minus = '_' in word has_hyphen = '-' in word has_digits = any(ch.isdigit() for ch in word) has_three_digits = sum(1 for ch in word if ch.isdigit()) > 2 has_upper_case = any(ch.isupper() for ch in word) has_two_upper_case = sum(1 for ch in word if ch.isupper()) > 1 has_lower_case = any(ch.islower() for ch in word) has_symbols = any(not ch.isalnum() for ch in word) return has_hyphen_minus or (has_digits and has_two_upper_case) or (has_three_digits and has_upper_case) or (has_digits and has_upper_case and has_symbols) or (has_digits and has_lower_case) or (has_hyphen and has_two_upper_case) or (has_lower_case and has_two_upper_case) return False def split_mutation(word): word = word.strip() for symbol in ['del', 'ins', 'dup', 'trunc', 'splice', 'fs', 'null', 'Fusion', '#', '+']: word = word.replace(symbol, ' >{} '.format(symbol)) i = 0 new_words = [] while i < len(word): if word[i] == '>': j = i + 1 while j < len(word) and word[j] != ' ': j += 1 new_words.append(u'{}'.format(word[i:j])) i = j elif word[i] != ' ': new_words.append(u'>{}'.format(word[i])) i += 1 else: i += 1 return new_words
MIT License
hertogp/imagine
pandoc_imagine.py
Flydraw.image
python
def image(self): self.im_fmt = 'gif' self.outfile = self.basename + '.%s' % self.im_fmt self.msg(4, "im_fmt", self.im_fmt) self.msg(4, "im_opt", self.im_opt) self.im_out = [x for x in self.im_out if x not in ['stdout']] args = self.im_opt if self.cmd(self.im_prg, stdin=self.code, *args): if self.stdout: self.write('wb', self.stdout, self.outfile) return self.result()
flydraw {im_opt} < code-text
https://github.com/hertogp/imagine/blob/fe3b783d34d387642180e2bdc27c1fa7e1ca5cf5/pandoc_imagine.py#L693-L706
from __future__ import print_function import os import sys import stat from textwrap import wrap from subprocess import Popen, CalledProcessError, PIPE from six import with_metaclass import pandocfilters as pf __version__ = '0.1.7' def to_str(s, enc='ascii'): err = 'replace' if isinstance(s, str): return s if isinstance(s, bytes): return s.decode(enc, err) try: return s.encode(enc, err) except AttributeError: return to_str(str(s)) def to_bytes(s, enc='ascii'): err = 'replace' if isinstance(s, bytes): try: return s.decode(enc, err).encode(enc, err) except AttributeError: return s.encode(enc, err) if isinstance(s, str): try: return s.encode(enc, err) except UnicodeDecodeError: return s.decode(enc, err).encode(enc, err) try: return to_bytes(str(s), sys.getdefaultencoding()) except UnicodeEncodeError: return s.encode(enc, err) class HandlerMeta(type): def __init__(cls, name, bases, dct): super(HandlerMeta, cls).__init__(name, bases, dct) for klass in dct.get('cmdmap', {}): cls.workers[klass.lower()] = cls class Handler(with_metaclass(HandlerMeta, object)): severity = 'error warn note info debug'.split() workers = {} klass = None meta = {} cmdmap = {} output = 'img' im_dir = 'pd' im_fmt = 'png' im_log = 0 im_opt = '' im_out = 'img' im_prg = None def __call__(self, codec, fmt, meta): self.msg(4, 'dispatch request for', codec[0]) try: _, klasses, keyvals = codec[0] except Exception as e: self.msg(0, 'Fatal: invalid codeblock passed in', codec) raise e for klass in klasses: worker = self.workers.get(klass.lower(), None) if worker is not None: worker.klass = klass.lower() self.msg(4, '- dispatched by class to', worker) return worker(codec, fmt, meta) if keyvals: prog, _ = pf.get_value(keyvals, 'im_prg', '') worker = self.workers.get(prog.lower(), None) if worker is not None: self.msg(4, codec[0], 'dispatched by prog to', worker) return worker(codec, fmt, meta) self.msg(4, codec[0], 'dispatched by default to', self) return self def __init__(self, codec, fmt, meta): self.codec = codec self.fmt = fmt self.stdout = '' self.stderr = '' if codec is None: return cb = self.get_cb_opts(codec) kd = self.get_md_opts(meta).get(self.klass, {}) md = self.md_opts opts = [x for x in dir(self) if x.startswith('im_')] for opt in opts: val = cb.get(opt, kd.get(opt, md.get(opt, getattr(self, opt)))) setattr(self, opt, val) self.im_opt = self.im_opt.split() self.im_out = self.im_out.lower().replace(',', ' ').split() self.im_log = int(self.im_log) self.im_fmt = pf.get_extension(fmt, self.im_fmt) if not self.im_prg: self.im_prg = self.cmdmap.get(self.klass, None) if self.im_prg is None: self.msg(0, self.klass, 'not listed in', self.cmdmap) raise Exception('no worker found for %s' % self.klass) self.basename = pf.get_filename4code(self.im_dir, str(codec)) self.outfile = self.basename + '.%s' % self.im_fmt self.inpfile = self.basename + '.%s' % self.klass if not os.path.isfile(self.inpfile): self.write('w', self.code, self.inpfile) def get_md_opts(self, meta): dct = {} try: sep = "." for k,v in meta.items(): if not k.lower().startswith("imagine."): continue if k.count(sep) == 1: _, opt = k.split(sep) dct[opt] = pf.stringify(v) elif k.count(sep) == 2: _, klass, opt = k.split(sep) if not dct.get(klass): dct[klass] = {} dct[klass][opt] = pf.stringify(v) except AttributeError: pass self.msg(4, "meta-data:", dct) self.md_opts = dct return dct def get_cb_opts(self, codec): dct = {} opts = [x for x in dir(self) if x.startswith('im_')] (self.id_, self.classes, self.keyvals), self.code = codec self.caption, self.typef, self.keyvals = pf.get_caption(self.keyvals) self.classes = [k for k in self.classes if k not in self.workers] for opt in opts: val, self.keyvals = pf.get_value(self.keyvals, opt, None) if val: dct[opt] = val self.cb_opts = dct self.msg(4, "codeblock:", dct) return dct def read(self, mode, src): try: with open(src, mode) as f: return f.read() except (OSError, IOError) as e: self.msg(0, 'fail: could not read %si (%s)' % (src, repr(e))) return '' return '' def write(self, mode, dta, dst): if not dta: self.msg(3, 'skipped writing 0 bytes to', dst) return False try: with open(dst, mode) as f: f.write(dta) self.msg(3, 'wrote:', len(dta), 'bytes to', dst) except (OSError, IOError) as e: self.msg(0, 'fail: could not write', len(dta), 'bytes to', dst) self.msg(0, 'exception:', e) return False return True def msg(self, level, *a): if level > self.im_log: return level %= len(self.severity) msg = '%s[%-9s:%5s] %s' % ('Imagine', self.__class__.__name__, self.severity[level], ' '.join(to_str(s) for s in a)) print(msg, file=sys.stderr) sys.stderr.flush() def url(self): return pf.Image([self.id_, self.classes, self.keyvals], self.caption, [self.outfile, self.typef]) def anon_codeblock(self): (id_, klasses, keyvals), code = self.codec id_ = '#' + id_ if id_ else id_ klasses = ' '.join('.%s' % c for c in klasses) keyvals = ' '.join('%s="%s"' % (k, v) for k, v in keyvals) attr = '{%s}' % ' '.join(a for a in [id_, klasses, keyvals] if a) attr = attr if attr.find(' ') > -1 else attr[2:-1] codeblock = '```%s\n%s\n```' % (attr, code) return pf.CodeBlock(['', [], []], codeblock) def result(self): rv = [] enc = sys.getdefaultencoding() for output_elm in self.im_out: if output_elm == 'img': if os.path.isfile(self.outfile): rv.append(pf.Para([self.url()])) else: msg = '?? missing %s' % self.outfile self.msg(1, msg) rv.append(pf.Para([pf.Str(msg)])) elif output_elm == 'fcb': rv.append(self.anon_codeblock()) elif output_elm == 'ocb': attr = ['', self.classes, self.keyvals] rv.append(pf.CodeBlock(attr, self.codec[1])) elif output_elm == 'stdout': if self.stdout: attr = ['', self.classes, self.keyvals] rv.append(pf.CodeBlock(attr, to_str(self.stdout, enc))) else: self.msg(1, 'stdout requested, but saw nothing') elif output_elm == 'stderr': if self.stderr: attr = ['', self.classes, self.keyvals] rv.append(pf.CodeBlock(attr, to_str(self.stderr, enc))) else: self.msg(1, 'stderr requested, but saw nothing') if not rv: return None if len(rv) > 1: return rv return rv[0] def cmd(self, *args, **kwargs): forced = kwargs.get('forced', False) stdin = kwargs.get('stdin', None) if os.path.isfile(self.outfile) and forced is False: self.msg(4, 're-use: {!r}'.format(self.outfile)) return True try: self.msg(4, 'exec: ', *args) pipes = {'stdin': None if stdin is None else PIPE, 'stdout': PIPE, 'stderr': PIPE} p = Popen(args, **pipes) out, err = p.communicate(to_bytes(stdin)) self.stdout = out self.stderr = err for line in self.stderr.splitlines(): self.msg(4, 'stderr>', line) self.msg(2, 'stderr>', 'saw {} bytes'.format(len(self.stderr))) for line in self.stdout.splitlines(): self.msg(4, 'stdout>', line) self.msg(2, 'stdout>', 'saw {} bytes'.format(len(self.stdout))) if os.path.isfile(self.outfile): self.msg(4, 'created: {!r}'.format(self.outfile)) return p.returncode == 0 except (OSError, CalledProcessError) as e: try: os.remove(self.outfile) except OSError: pass self.msg(1, 'fail:', *args) self.msg(1, 'msg:', self.im_prg, str(e)) return False def image(self): self.msg(2, 'CodeBlock ignored, keeping as-is') return None class Asy(Handler): cmdmap = {'asy': 'asy', 'asymptote': 'asy'} im_fmt = 'png' def image(self): args = ['-o', self.outfile] + self.im_opt + [self.inpfile] if self.cmd(self.im_prg, *args): return self.result() class Boxes(Handler): cmdmap = {'boxes': 'boxes'} im_fmt = 'boxed' output = 'stdout' def image(self): self.im_out = [x for x in self.im_out if x not in ['img']] args = self.im_opt + [self.inpfile] if self.cmd(self.im_prg, *args): if self.stdout: self.write('w', to_str(self.stdout), self.outfile) else: self.stdout = self.read('r', self.outfile) return self.result() class BlockDiag(Handler): progs = 'blockdiag seqdiag rackdiag nwdiag packetdiag actdiag'.split() cmdmap = dict(zip(progs, progs)) def image(self): args = self.im_opt + ['-T', self.im_fmt, self.inpfile, '-o', self.outfile] if self.cmd(self.im_prg, *args): return self.result() class Ctioga2(Handler): cmdmap = {'ctioga2': 'ctioga2'} im_fmt = 'pdf' def image(self): args = self.im_opt + ['-f', self.inpfile] if self.cmd(self.im_prg, *args): return self.result() class Ditaa(Handler): cmdmap = {'ditaa': 'ditaa'} def image(self): args = [self.inpfile, self.outfile] + self.im_opt if self.cmd(self.im_prg, *args): return self.result() class Figlet(Handler): cmdmap = {'figlet': 'figlet'} im_fmt = 'figled' def image(self): self.im_out = [x for x in self.im_out if x not in ['img']] args = self.im_opt if self.cmd(self.im_prg, stdin=self.code, *args): if self.stdout: self.write('w', to_str(self.stdout), self.outfile) else: self.stdout = self.read('r', self.outfile) return self.result() class Flydraw(Handler): cmdmap = {'flydraw': 'flydraw'}
MIT License
botfront/rasa-for-botfront
rasa/cli/arguments/train.py
set_train_core_arguments
python
def set_train_core_arguments(parser: argparse.ArgumentParser) -> None: add_stories_param(parser) add_domain_param(parser) _add_core_config_param(parser) add_out_param(parser, help_text="Directory where your models should be stored.") add_augmentation_param(parser) add_debug_plots_param(parser) add_force_param(parser) _add_model_name_param(parser) compare_arguments = parser.add_argument_group("Comparison Arguments") _add_compare_params(compare_arguments) add_finetune_params(parser)
Specifies CLI arguments for `rasa train core`.
https://github.com/botfront/rasa-for-botfront/blob/6e0e48d0059e197b5f686df1e27935769c3641b7/rasa/cli/arguments/train.py#L35-L51
import argparse from typing import Union from rasa.cli.arguments.default_arguments import ( add_config_param, add_stories_param, add_nlu_data_param, add_out_param, add_domain_param, ) from rasa.shared.constants import DEFAULT_CONFIG_PATH, DEFAULT_DATA_PATH USE_LATEST_MODEL_FOR_FINE_TUNING = True def set_train_arguments(parser: argparse.ArgumentParser) -> None: add_data_param(parser) add_config_param(parser) add_domain_param(parser) add_out_param(parser, help_text="Directory where your models should be stored.") add_dry_run_param(parser) add_augmentation_param(parser) add_debug_plots_param(parser) _add_num_threads_param(parser) _add_model_name_param(parser) add_persist_nlu_data_param(parser) add_force_param(parser) add_finetune_params(parser)
Apache License 2.0
shudeng/point-gnn.pytorch
dataset/kitti_dataset.py
box3d_to_normals
python
def box3d_to_normals(label, expend_factor=(1.0, 1.0, 1.0)): box3d_points = box3d_to_cam_points(label, expend_factor) box3d_points_xyz = box3d_points.xyz wx = box3d_points_xyz[[0], :] - box3d_points_xyz[[4], :] lx = np.matmul(wx, box3d_points_xyz[4, :]) ux = np.matmul(wx, box3d_points_xyz[0, :]) wy = box3d_points_xyz[[0], :] - box3d_points_xyz[[1], :] ly = np.matmul(wy, box3d_points_xyz[1, :]) uy = np.matmul(wy, box3d_points_xyz[0, :]) wz = box3d_points_xyz[[0], :] - box3d_points_xyz[[3], :] lz = np.matmul(wz, box3d_points_xyz[3, :]) uz = np.matmul(wz, box3d_points_xyz[0, :]) return(np.concatenate([wx, wy, wz], axis=0), np.concatenate([lx, ly, lz]), np.concatenate([ux, uy, uz]))
Project a 3D box into camera coordinates, compute the center of the box and normals. Args: label: a dictionary containing "x3d", "y3d", "z3d", "yaw", "height", "width", "lenth". Returns: a numpy array [3, 3] containing [wx, wy, wz]^T, a [3] lower bound and a [3] upper bound.
https://github.com/shudeng/point-gnn.pytorch/blob/674e488fc468b85203b6df0ce45d84a5a77c88d3/dataset/kitti_dataset.py#L118-L141
import os import time from os.path import isfile, join import random from collections import namedtuple, defaultdict import numpy as np import open3d import cv2 Points = namedtuple('Points', ['xyz', 'attr']) def downsample_by_average_voxel(points, voxel_size): xmax, ymax, zmax = np.amax(points.xyz, axis=0) xmin, ymin, zmin = np.amin(points.xyz, axis=0) xyz_offset = np.asarray([[xmin, ymin, zmin]]) xyz_zeros = np.asarray([0, 0, 0], dtype=np.float32) xyz_idx = (points.xyz - xyz_offset) // voxel_size xyz_idx = xyz_idx.astype(np.int32) dim_x, dim_y, dim_z = np.amax(xyz_idx, axis=0) + 1 keys = xyz_idx[:, 0] + xyz_idx[:, 1]*dim_x + xyz_idx[:, 2]*dim_y*dim_x order = np.argsort(keys) keys = keys[order] points_xyz = points.xyz[order] unique_keys, lens = np.unique(keys, return_counts=True) indices = np.hstack([[0], lens[:-1]]).cumsum() downsampled_xyz = np.add.reduceat( points_xyz, indices, axis=0)/lens[:,np.newaxis] include_attr = points.attr is not None if include_attr: attr = points.attr[order] downsampled_attr = np.add.reduceat( attr, indices, axis=0)/lens[:,np.newaxis] if include_attr: return Points(xyz=downsampled_xyz, attr=downsampled_attr) else: return Points(xyz=downsampled_xyz, attr=None) def downsample_by_random_voxel(points, voxel_size, add_rnd3d=False): xmax, ymax, zmax = np.amax(points.xyz, axis=0) xmin, ymin, zmin = np.amin(points.xyz, axis=0) xyz_offset = np.asarray([[xmin, ymin, zmin]]) xyz_zeros = np.asarray([0, 0, 0], dtype=np.float32) if not add_rnd3d: xyz_idx = (points.xyz - xyz_offset) // voxel_size else: xyz_idx = (points.xyz - xyz_offset + voxel_size*np.random.random((1,3))) // voxel_size dim_x, dim_y, dim_z = np.amax(xyz_idx, axis=0) + 1 keys = xyz_idx[:, 0] + xyz_idx[:, 1]*dim_x + xyz_idx[:, 2]*dim_y*dim_x num_points = xyz_idx.shape[0] voxels_idx = {} for pidx in range(len(points.xyz)): key = keys[pidx] if key in voxels_idx: voxels_idx[key].append(pidx) else: voxels_idx[key] = [pidx] downsampled_xyz = [] downsampled_attr = [] for key in voxels_idx: center_idx = random.choice(voxels_idx[key]) downsampled_xyz.append(points.xyz[center_idx]) downsampled_attr.append(points.attr[center_idx]) return Points(xyz=np.array(downsampled_xyz), attr=np.array(downsampled_attr)) def box3d_to_cam_points(label, expend_factor=(1.0, 1.0, 1.0)): yaw = label['yaw'] R = np.array([[np.cos(yaw), 0, np.sin(yaw)], [0, 1, 0 ], [-np.sin(yaw), 0, np.cos(yaw)]]); h = label['height'] delta_h = h*(expend_factor[0]-1) w = label['width']*expend_factor[1] l = label['length']*expend_factor[2] corners = np.array([[ l/2, delta_h/2, w/2], [ l/2, delta_h/2, -w/2], [-l/2, delta_h/2, -w/2], [-l/2, delta_h/2, w/2], [ l/2, -h-delta_h/2, w/2], [ l/2, -h-delta_h/2, -w/2], [-l/2, -h-delta_h/2, -w/2], [-l/2, -h-delta_h/2, w/2]]) r_corners = corners.dot(np.transpose(R)) tx = label['x3d'] ty = label['y3d'] tz = label['z3d'] cam_points_xyz = r_corners+np.array([tx, ty, tz]) return Points(xyz = cam_points_xyz, attr = None)
MIT License
armmbed/yotta
yotta/lib/settings.py
_JSONConfigParser.set
python
def set(self, path, value=None, filename=None): if filename is None: config = self._firstConfig()[1] else: config = self.configs[filename] path = _splitPath(path) for el in path[:-1]: if el in config: config = config[el] else: config[el] = OrderedDict() config = config[el] config[path[-1]] = value
Set a configuration value. If no filename is specified, the property is set in the first configuration file. Note that if a filename is specified and the property path is present in an earlier filename then set property will be hidden. usage: set('section.property', value='somevalue') Note that currently array indexes are not supported. You must set the whole array.
https://github.com/armmbed/yotta/blob/82d854b43d391abb5a006b05e7beffe7d0d6ffbf/yotta/lib/settings.py#L100-L124
import logging import os import threading from collections import OrderedDict from yotta.lib import ordered_json from yotta.lib import folders user_config_file = os.path.join(folders.userSettingsDirectory(), 'config.json') dir_config_file = os.path.join('.','.yotta.json') config_files = [ dir_config_file, user_config_file, ] if os.name == 'nt': config_files += [ os.path.expanduser(os.path.join(folders.prefix(),'yotta.json')) ] else: config_files += [ os.path.expanduser(os.path.join(folders.prefix(),'etc','yotta.json')), os.path.join('etc','yotta.json') ] parser = None parser_lock = threading.Lock() class _JSONConfigParser(object): def __init__(self): self.configs = OrderedDict() def read(self, filenames): for fn in filenames: try: self.configs[fn] = ordered_json.load(fn) except IOError: self.configs[fn] = OrderedDict() except Exception as e: self.configs[fn] = OrderedDict() logging.warning( "Failed to read settings file %s, it will be ignored. The error was: %s", fn, e ) def get(self, path): path = _splitPath(path) for config in self.configs.values(): cur = config for el in path: if el in cur: cur = cur[el] else: cur = None break if cur is not None: return cur return None
Apache License 2.0
golemhq/golem
golem/core/tags_manager.py
get_project_unique_tags
python
def get_project_unique_tags(project): tests_tags = get_all_project_tests_tags(project) unique_tags = [] for test, tags in tests_tags.items(): for tag in tags: if tag not in unique_tags: unique_tags.append(tag) return unique_tags
Get a list of the unique tags used by all the tests of a project
https://github.com/golemhq/golem/blob/ab6a08ee54d2c5d27ab6af15b833ce3d2575d3e3/golem/core/tags_manager.py#L157-L165
import ast import json import os from golem.core import file_manager from golem.core import session from golem.core.test import Test class InvalidTagExpression(Exception): pass class TagExpressionConstructor: def __init__(self, expression, tags): self.parsed = ast.parse(expression) self.tags = tags def run(self): return self._evaluate(self.parsed.body[0]) def _evaluate(self, expr): if isinstance(expr, ast.Expr): return self._evaluate(expr.value) elif isinstance(expr, ast.BoolOp): if isinstance(expr.op, ast.Or): evaluated = ['({})'.format(self._evaluate(v)) for v in expr.values] return ' or '.join(evaluated) elif isinstance(expr.op, ast.And): evaluated = ['({})'.format(self._evaluate(v)) for v in expr.values] return ' and '.join(evaluated) elif isinstance(expr, ast.UnaryOp): if isinstance(expr.op, ast.Not): return 'not {}'.format(self._evaluate(expr.operand)) elif isinstance(expr, ast.Num): return '"{}" in {}'.format(str(expr.n), self.tags) elif isinstance(expr, ast.Str): return '"{}" in {}'.format(expr.s, self.tags) elif isinstance(expr, ast.Name): return '"{}" in {}'.format(expr.id, self.tags) else: msg = ('unknown expression {}, the only valid operators for tag expressions ' 'are: \'and\', \'or\' & \'not\''.format(type(expr))) raise InvalidTagExpression(msg) def filter_tests_by_tags(project, tests, tags): def _construct_tag_expr(tags): cleaned = [] for tag in tags: try: ast.parse(tag) cleaned.append(tag) except SyntaxError: cleaned.append(f'"{tag}"') return ' and '.join(cleaned) def _test_matches_tag_query(query, tags): result = TagExpressionConstructor(query, tags).run() return eval(result) result = [] tag_expr = _construct_tag_expr(tags) tests_tags = get_tests_tags(project, tests) if tag_expr: for test in tests: if _test_matches_tag_query(tag_expr, tests_tags[test]): result.append(test) return result def get_test_tags(project, full_test_case_name): result = [] test_module = Test(project, full_test_case_name).module if hasattr(test_module, 'tags'): result = getattr(test_module, 'tags') return result def get_tests_tags(project, tests): cache_file_path = os.path.join(session.testdir, 'projects', project, '.tags') cache_tags = {} if os.path.isfile(cache_file_path): with open(cache_file_path, encoding='utf-8') as f: cache_tags_file_content = f.read() try: cache_tags = json.loads(cache_tags_file_content) except json.JSONDecodeError: os.remove(cache_file_path) return get_tests_tags(project, tests) for test in tests: t = Test(project, test) last_modified_time = os.path.getmtime(t.path) if test in cache_tags: cache_timestamp = cache_tags[test]['timestamp'] if last_modified_time != cache_timestamp: cache_tags[test] = { 'tags': get_test_tags(project, test), 'timestamp': last_modified_time } else: cache_tags[test] = { 'tags': get_test_tags(project, test), 'timestamp': last_modified_time } with open(cache_file_path, 'w', encoding='utf-8') as f: json.dump(cache_tags, f, indent=2, ensure_ascii=False) tags = {test: cache_tags[test]['tags'] for test in cache_tags} return tags def get_all_project_tests_tags(project): tests_folder_path = os.path.join(session.testdir, 'projects', project, 'tests') tests = file_manager.get_files_dot_path(tests_folder_path, extension='.py') return get_tests_tags(project, tests)
MIT License
facebookresearch/mtenv
local_dm_control_suite/swimmer.py
Swimmer.get_reward
python
def get_reward(self, physics): target_size = physics.named.model.geom_size["target", 0] return rewards.tolerance( physics.nose_to_target_dist(), bounds=(0, target_size), margin=5 * target_size, sigmoid="long_tail", )
Returns a smooth reward.
https://github.com/facebookresearch/mtenv/blob/4a6d9d6fdfb321f1b51f890ef36b5161359e972d/local_dm_control_suite/swimmer.py#L217-L225
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from dm_control import mujoco from dm_control.rl import control from . import base from . import common from dm_control.suite.utils import randomizers from dm_control.utils import containers from dm_control.utils import rewards from lxml import etree import numpy as np from six.moves import range _DEFAULT_TIME_LIMIT = 30 _CONTROL_TIMESTEP = 0.03 SUITE = containers.TaggedTasks() def get_model_and_assets(n_joints): return _make_model(n_joints), common.ASSETS @SUITE.add("benchmarking") def swimmer6(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): return _make_swimmer( 6, time_limit, random=random, environment_kwargs=environment_kwargs ) @SUITE.add("benchmarking") def swimmer15(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): return _make_swimmer( 15, time_limit, random=random, environment_kwargs=environment_kwargs ) def swimmer( n_links=3, time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None ): return _make_swimmer( n_links, time_limit, random=random, environment_kwargs=environment_kwargs ) def _make_swimmer( n_joints, time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None ): model_string, assets = get_model_and_assets(n_joints) physics = Physics.from_xml_string(model_string, assets=assets) task = Swimmer(random=random) environment_kwargs = environment_kwargs or {} return control.Environment( physics, task, time_limit=time_limit, control_timestep=_CONTROL_TIMESTEP, **environment_kwargs ) def _make_model(n_bodies): if n_bodies < 3: raise ValueError("At least 3 bodies required. Received {}".format(n_bodies)) mjcf = etree.fromstring(common.read_model("swimmer.xml")) head_body = mjcf.find("./worldbody/body") actuator = etree.SubElement(mjcf, "actuator") sensor = etree.SubElement(mjcf, "sensor") parent = head_body for body_index in range(n_bodies - 1): site_name = "site_{}".format(body_index) child = _make_body(body_index=body_index) child.append(etree.Element("site", name=site_name)) joint_name = "joint_{}".format(body_index) joint_limit = 360.0 / n_bodies joint_range = "{} {}".format(-joint_limit, joint_limit) child.append(etree.Element("joint", {"name": joint_name, "range": joint_range})) motor_name = "motor_{}".format(body_index) actuator.append(etree.Element("motor", name=motor_name, joint=joint_name)) velocimeter_name = "velocimeter_{}".format(body_index) sensor.append( etree.Element("velocimeter", name=velocimeter_name, site=site_name) ) gyro_name = "gyro_{}".format(body_index) sensor.append(etree.Element("gyro", name=gyro_name, site=site_name)) parent.append(child) parent = child cameras = mjcf.findall("./worldbody/body/camera") scale = n_bodies / 6.0 for cam in cameras: if cam.get("mode") == "trackcom": old_pos = cam.get("pos").split(" ") new_pos = " ".join([str(float(dim) * scale) for dim in old_pos]) cam.set("pos", new_pos) return etree.tostring(mjcf, pretty_print=True) def _make_body(body_index): body_name = "segment_{}".format(body_index) visual_name = "visual_{}".format(body_index) inertial_name = "inertial_{}".format(body_index) body = etree.Element("body", name=body_name) body.set("pos", "0 .1 0") etree.SubElement(body, "geom", {"class": "visual", "name": visual_name}) etree.SubElement(body, "geom", {"class": "inertial", "name": inertial_name}) return body class Physics(mujoco.Physics): def nose_to_target(self): nose_to_target = ( self.named.data.geom_xpos["target"] - self.named.data.geom_xpos["nose"] ) head_orientation = self.named.data.xmat["head"].reshape(3, 3) return nose_to_target.dot(head_orientation)[:2] def nose_to_target_dist(self): return np.linalg.norm(self.nose_to_target()) def body_velocities(self): xvel_local = self.data.sensordata[12:].reshape((-1, 6)) vx_vy_wz = [0, 1, 5] return xvel_local[:, vx_vy_wz].ravel() def joints(self): return self.data.qpos[3:].copy() class Swimmer(base.Task): def __init__(self, random=None): super(Swimmer, self).__init__(random=random) def initialize_episode(self, physics): randomizers.randomize_limited_and_rotational_joints(physics, self.random) close_target = self.random.rand() < 0.2 target_box = 0.3 if close_target else 2 xpos, ypos = self.random.uniform(-target_box, target_box, size=2) physics.named.model.geom_pos["target", "x"] = xpos physics.named.model.geom_pos["target", "y"] = ypos physics.named.model.light_pos["target_light", "x"] = xpos physics.named.model.light_pos["target_light", "y"] = ypos super(Swimmer, self).initialize_episode(physics) def get_observation(self, physics): obs = collections.OrderedDict() obs["joints"] = physics.joints() obs["to_target"] = physics.nose_to_target() obs["body_velocities"] = physics.body_velocities() return obs
MIT License
robotika/osgar
osgar/drivers/imu.py
parse_line
python
def parse_line(line): assert line.startswith(b'$VNYMR'), line assert b'*' in line, line s = line.split(b'*')[0].split(b',') assert len(s) == 13, s arr = [float(x) for x in s[1:]] return [arr[:3], arr[3:6], arr[6:9], arr[9:]]
Parse $VNYMR message: Yaw float deg Calculated attitude heading angle in degrees. Pitch float deg Calculated attitude pitch angle in degrees. Roll float deg Calculated attitude roll angle in degrees. MagX float Gauss Compensated magnetometer measurement in x-axis. MagY float Gauss Compensated magnetometer measurement in y-axis. MagZ float Gauss Compensated magnetometer measurement in z-axis. AccelX float m/s^2 Compensated accelerometer measurement in x-axis. AccelY float m/s^2 Compensated accelerometer measurement in y-axis. AccelZ float m/s^2 Compensated accelerometer measurement in z-axis. GyroX float rad/s Compensated angular rate in x-axis. GyroY float rad/s Compensated angular rate in y-axis. GyroZ float rad/s Compensated angular rate in z-axis.
https://github.com/robotika/osgar/blob/6f4f584d5553ab62c08a1c7bb493fefdc9033173/osgar/drivers/imu.py#L10-L31
from threading import Thread from osgar.bus import BusShutdownException
MIT License
tl-system/plato
examples/rhythm/rhythm_server.py
Server.wrap_up_an_episode
python
async def wrap_up_an_episode(self): if hasattr(Config(), 'results'): new_row = [] for item in self.rl_recorded_items: item_value = { 'episode': self.rl_episode, 'cumulative_reward': self.cumulative_reward, 'rl_training_time': time.perf_counter() - self.rl_episode_start_time }[item] new_row.append(item_value) result_csv_file = Config().result_dir + 'result_rl.csv' csv_processor.write_csv(result_csv_file, new_row) self.wrapped_previous_episode.set() if self.rl_episode >= Config().algorithm.rl_episodes: logging.info( 'RL Agent: Target number of training episodes reached.') await self.close_connections() sys.exit() else: self.new_episode_begin.clear() await self.new_episode_begin.wait() self.new_episode_begin.clear()
Wrapping up when one RL episode (the FL training) is done.
https://github.com/tl-system/plato/blob/cbc5ddc04b554b4b05679a85c6ed6e5fb7f70bef/examples/rhythm/rhythm_server.py#L188-L215
import asyncio import logging import sys import time from plato.config import Config from plato.utils import csv_processor, rl_env from plato.servers import fedavg_cs class Server(fedavg_cs.Server): def __init__(self): super().__init__() if Config().is_central_server(): self.rl_env = rl_env.RLEnv(self) self.rl_episode = 0 self.rl_tuned_para_value = None self.rl_state = None self.is_rl_tuned_para_got = False self.is_rl_episode_done = False self.rl_episode_start_time = None self.cumulative_reward = 0 self.rl_tuned_para_got = asyncio.Event() self.generated_server_response = False self.new_episode_begin = asyncio.Event() self.wrapped_previous_episode = asyncio.Event() self.wrapped_previous_episode.set() if hasattr(Config(), 'results'): self.rl_recorded_items = [ 'episode', 'cumulative_reward', 'rl_training_time' ] def configure(self): if Config().is_central_server(): logging.info("Configuring a RL agent and a %s server...", Config().algorithm.type) logging.info( "This RL agent will tune the number of aggregations on edge servers." ) total_episodes = Config().algorithm.rl_episodes target_reward = Config().algorithm.rl_target_reward if target_reward is not None: logging.info("RL Training: %s episodes or %s%% reward\n", total_episodes, 100 * target_reward) else: logging.info("RL Training: %s episodes\n", total_episodes) if hasattr(Config(), 'results'): result_csv_file = Config().result_dir + 'result_rl.csv' csv_processor.initialize_csv(result_csv_file, self.rl_recorded_items, Config().result_dir) else: super().configure() def start_clients(self, client=None, as_server=False): super().start_clients(client, as_server) if not as_server: loop = asyncio.get_event_loop() loop.run_until_complete(asyncio.gather(self.start_rl())) def start_rl(self): Server.try_a_random_agent(self.rl_env) def reset_rl_env(self): current_loop = asyncio.get_event_loop() task = current_loop.create_task(self.wrapped_previous_episode.wait()) current_loop.run_until_complete(task) self.wrapped_previous_episode.clear() self.current_round = 0 self.is_rl_episode_done = False self.cumulative_reward = 0 self.rl_episode_start_time = time.perf_counter() self.rl_episode += 1 logging.info('\nRL Agent: Starting episode %s...', self.rl_episode) super().configure() self.round_start_time = 0 async def wrap_up(self): if Config().is_central_server(): self.generated_server_response = False self.rl_state = self.accuracy target_accuracy = Config().trainer.target_accuracy if target_accuracy and self.accuracy >= target_accuracy: logging.info("Target accuracy of FL reached.") self.is_rl_episode_done = True if self.current_round >= Config().trainer.rounds: logging.info("Target number of FL training rounds reached.") self.is_rl_episode_done = True self.rl_env.get_state(self.rl_state, self.is_rl_episode_done) await self.rl_env.step_done.wait() self.rl_env.step_done.clear() if self.is_rl_episode_done: await self.wrap_up_an_episode() async def customize_server_response(self, server_response): if Config().is_central_server(): if not self.generated_server_response: await self.update_rl_tuned_parameter() self.generated_server_response = True server_response['local_agg_rounds'] = Config( ).algorithm.local_rounds server_response = await super().customize_server_response( server_response) print("CURRENT GLOBAL ROUND", self.current_round) return server_response async def update_rl_tuned_parameter(self): await self.rl_tuned_para_got.wait() self.rl_tuned_para_got.clear() Config().algorithm = Config().algorithm._replace( local_rounds=self.rl_tuned_para_value) def get_tuned_para(self, rl_tuned_para_value, time_step): assert time_step == self.current_round + 1 self.rl_tuned_para_value = rl_tuned_para_value self.rl_tuned_para_got.set() print("RL Agent: Get tuned para of time step", time_step)
Apache License 2.0
sobhe/hazm
hazm/DegarbayanReader.py
DegarbayanReader.__init__
python
def __init__(self, root, corpus_file='CorpusPair.xml', judge_type='three_class', version=1.0): self._root = root self._corpus_file = corpus_file self._judge_type = judge_type if judge_type != 'three_class' and judge_type != 'two_class': self._judge_type = 'three_class'
:param root: Path to corpus folder. :param corpus_file: Name of corpus pair file. Defaults to 'CorpusPair.xml' :param judge_type: How to return judge value. can be eighter 'two_class' or 'three_class' for two or three class value returns. Defaults to 'three_class' :param version: Corpus version. Defaults to version 1.0 :type root: str :type corpuse_file: str :type judge_type: str :type version: float
https://github.com/sobhe/hazm/blob/bd874199bfd4c18b363774e5c879a24500dc2a40/hazm/DegarbayanReader.py#L17-L33
from __future__ import unicode_literals, print_function import os from xml.dom import minidom class DegarbayanReader():
MIT License
zwicker-group/py-pde
pde/grids/cylindrical.py
CylindricalSymGrid.get_line_data
python
def get_line_data(self, data: np.ndarray, extract: str = "auto") -> Dict[str, Any]: if extract == "auto": extract = "cut_axial" if extract == "cut_z" or extract == "cut_axial": axis = 1 data_y = data[..., 0, :] label_y = "Cut along z" elif extract == "project_z" or extract == "project_axial": axis = 1 data_y = (data.mean(axis=-2),) label_y = "Projection onto z" elif extract == "project_r" or extract == "project_radial": axis = 0 data_y = (data.mean(axis=-1),) label_y = "Projection onto r" else: raise ValueError(f"Unknown extraction method {extract}") return { "data_x": self.axes_coords[axis], "data_y": data_y, "extent_x": self.axes_bounds[axis], "label_x": self.axes[axis], "label_y": label_y, }
return a line cut for the cylindrical grid Args: data (:class:`~numpy.ndarray`): The values at the grid points extract (str): Determines which cut is done through the grid. Possible choices are (default is `cut_axial`): * `cut_z` or `cut_axial`: values along the axial coordinate for :math:`r=0`. * `project_z` or `project_axial`: average values for each axial position (radial average). * `project_r` or `project_radial`: average values for each radial position (axial average) Returns: A dictionary with information about the line cut, which is convenient for plotting.
https://github.com/zwicker-group/py-pde/blob/0549f7c74a52705e1d29e62d27b5578251c2054c/pde/grids/cylindrical.py#L203-L253
import warnings from typing import TYPE_CHECKING, Any, Dict, Generator, Sequence, Tuple, Union import numpy as np from ..tools.cache import cached_property from ..tools.docstrings import fill_in_docstring from .base import DimensionError, GridBase, _check_shape, discretize_interval from .cartesian import CartesianGrid if TYPE_CHECKING: from .boundaries.axes import Boundaries, BoundariesData from .spherical import PolarSymGrid class CylindricalSymGrid(GridBase): dim = 3 num_axes = 2 axes = ["r", "z"] axes_symmetric = ["phi"] coordinate_constraints = [0, 1] def __init__( self, radius: float, bounds_z: Tuple[float, float], shape: Tuple[int, int], periodic_z: bool = False, ): super().__init__() shape_list = _check_shape(shape) if len(shape_list) == 1: self._shape: Tuple[int, int] = (shape_list[0], shape_list[0]) elif len(shape_list) == 2: self._shape = tuple(shape_list) else: raise DimensionError("`shape` must be two integers") if len(bounds_z) != 2: raise ValueError( "Lower and upper value of the axial coordinate must be specified" ) self._periodic_z: bool = bool(periodic_z) self.periodic = [False, self._periodic_z] dr = radius / self.shape[0] rs = (np.arange(self.shape[0]) + 0.5) * dr assert np.isclose(rs[-1] + dr / 2, radius) zs, dz = discretize_interval(*bounds_z, self.shape[1]) assert np.isclose(zs[-1] + dz / 2, bounds_z[1]) self._axes_coords = (rs, zs) self._axes_bounds = ((0.0, radius), tuple(bounds_z)) self._discretization = np.array((dr, dz)) @property def state(self) -> Dict[str, Any]: radius = self.axes_bounds[0][1] return { "radius": radius, "bounds_z": self.axes_bounds[1], "shape": self.shape, "periodic_z": self._periodic_z, } @classmethod def from_state(cls, state: Dict[str, Any]) -> "CylindricalSymGrid": state_copy = state.copy() obj = cls( radius=state_copy.pop("radius"), bounds_z=state_copy.pop("bounds_z"), shape=state_copy.pop("shape"), periodic_z=state_copy.pop("periodic_z"), ) if state_copy: raise ValueError(f"State items {state_copy.keys()} were not used") return obj @property def radius(self) -> float: return self.axes_bounds[0][1] @property def length(self) -> float: return self.axes_bounds[1][1] - self.axes_bounds[1][0] @property def volume(self) -> float: return float(np.pi * self.radius ** 2 * self.length) def get_random_point( self, boundary_distance: float = 0, cartesian: bool = True, avoid_center: bool = False, rng: np.random.Generator = None, ) -> np.ndarray: if rng is None: rng = np.random.default_rng() r_min = boundary_distance if avoid_center else 0 r_mag = self.radius - boundary_distance - r_min z_min, z_max = self.axes_bounds[1] if boundary_distance != 0: z_min += boundary_distance z_max -= boundary_distance if r_mag <= 0 or z_max <= z_min: raise RuntimeError("Random points would be too close to boundary") r = r_mag * rng.random() + r_min z = z_min + (z_max - z_min) * rng.random() point = np.array([r, z]) if cartesian: return self.point_to_cartesian(point) else: return point
MIT License
tanghaibao/jcvi
jcvi/graphics/chromosome.py
Chromosome.__init__
python
def __init__( self, ax, x, y1, y2, width=0.015, ec="k", patch=None, patchcolor="lightgrey", lw=1, zorder=2, ): y1, y2 = sorted((y1, y2)) super(Chromosome, self).__init__(ax) pts, r = self.get_pts(x, y1, y2, width) self.append(Polygon(pts, fill=False, lw=lw, ec=ec, zorder=zorder)) if patch: rr = r * 0.9 for i in range(0, len(patch), 2): if i + 1 > len(patch) - 1: continue p1, p2 = patch[i], patch[i + 1] self.append( Rectangle((x - rr, p1), 2 * rr, p2 - p1, lw=0, fc=patchcolor) ) self.add_patches()
Chromosome with positions given in (x, y1) => (x, y2) The chromosome can also be patched, e.g. to show scaffold composition in alternating shades. Use a list of starting locations to segment.
https://github.com/tanghaibao/jcvi/blob/3b161796234670ce1c4894974eaeb590d35cf2a2/jcvi/graphics/chromosome.py#L34-L67
import logging import sys from itertools import groupby from math import ceil import numpy as np from jcvi.apps.base import OptionGroup, OptionParser, datafile, sample_N from jcvi.formats.base import DictFile, get_number from jcvi.formats.bed import Bed from jcvi.formats.sizes import Sizes from jcvi.graphics.base import ( CirclePolygon, Polygon, Rectangle, latex, markup, plt, savefig, set1_n, set3_n, ) from jcvi.graphics.glyph import BaseGlyph, RoundRect, plot_cap class Chromosome(BaseGlyph):
BSD 2-Clause Simplified License
bcornelusse/microgridrlsimulator
microgridRLsimulator/simulate/gridaction.py
GridAction.__init__
python
def __init__(self, conventional_generation=None, charge=None, discharge=None): self.conventional_generation = conventional_generation self.charge = charge self.discharge = discharge
Action taken by the agent. Each action is defined per device, then per period of the optimization horizon. Each member is defined as a list or as nested lists. :param conventional_generation: Genset generation [kW] :param charge: Action to charge storage devices [kW] :param discharge: Action to discharge storage devices [kW]
https://github.com/bcornelusse/microgridrlsimulator/blob/f2d7a0c77c47b9727d69c14d80fa521dea6a3861/microgridRLsimulator/simulate/gridaction.py#L5-L19
class GridAction():
BSD 2-Clause Simplified License
executablebooks/sphinx-external-toc
sphinx_external_toc/cli.py
parse_toc
python
def parse_toc(toc_file): site_map = parse_toc_yaml(toc_file) click.echo(yaml.dump(site_map.as_json(), sort_keys=False, default_flow_style=False))
Parse a ToC file to a site-map YAML.
https://github.com/executablebooks/sphinx-external-toc/blob/5b13190b478e3e5cd37798256007b2a65d691644/sphinx_external_toc/cli.py#L23-L26
from pathlib import Path, PurePosixPath import click import yaml from sphinx_external_toc import __version__ from sphinx_external_toc.parsing import FILE_FORMATS, create_toc_dict, parse_toc_yaml from sphinx_external_toc.tools import ( create_site_from_toc, create_site_map_from_path, migrate_jupyter_book, ) @click.group(context_settings={"help_option_names": ["-h", "--help"]}) @click.version_option(version=__version__) def main(): @main.command("parse") @click.argument("toc_file", type=click.Path(exists=True, file_okay=True))
MIT License
autogoal/autogoal
autogoal/contrib/keras/_crf.py
CRF._compute_mask_left_boundary
python
def _compute_mask_left_boundary(mask): offset = 1 right_shifted_mask = tf.concat( [tf.zeros_like(mask[:, :offset]), mask[:, :-offset]], axis=1 ) left_boundary = tf.greater( tf.cast(mask, tf.int32), tf.cast(right_shifted_mask, tf.int32) ) return left_boundary
input mask: 0011100, output left_boundary: 0010000.
https://github.com/autogoal/autogoal/blob/9e5eb4e9a31dc45e4a0bf46c1de33be643e874b7/autogoal/contrib/keras/_crf.py#L327-L341
from __future__ import absolute_import, division, print_function import tensorflow as tf from typeguard import typechecked from tensorflow_addons.text.crf import crf_decode, crf_log_likelihood from tensorflow_addons.utils import types class CRF(tf.keras.layers.Layer): @typechecked def __init__( self, units: int, chain_initializer: types.Initializer = "orthogonal", chain_regularizer: types.Regularizer = None, chain_constraint: types.Constraint = None, use_boundary: bool = True, boundary_initializer: types.Initializer = "zeros", boundary_regularizer: types.Regularizer = None, boundary_constraint: types.Constraint = None, use_kernel: bool = True, kernel_initializer: types.Initializer = "glorot_uniform", kernel_regularizer: types.Regularizer = None, kernel_constraint: types.Constraint = None, use_bias: bool = True, bias_initializer: types.Initializer = "zeros", bias_regularizer: types.Regularizer = None, bias_constraint: types.Constraint = None, activation: types.Activation = "linear", **kwargs ): super(CRF, self).__init__(**kwargs) self.supports_masking = True self.units = units self.use_boundary = use_boundary self.use_bias = use_bias self.use_kernel = use_kernel self.activation = tf.keras.activations.get(activation) self.kernel_initializer = tf.keras.initializers.get(kernel_initializer) self.chain_initializer = tf.keras.initializers.get(chain_initializer) self.boundary_initializer = tf.keras.initializers.get(boundary_initializer) self.bias_initializer = tf.keras.initializers.get(bias_initializer) self.kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) self.chain_regularizer = tf.keras.regularizers.get(chain_regularizer) self.boundary_regularizer = tf.keras.regularizers.get(boundary_regularizer) self.bias_regularizer = tf.keras.regularizers.get(bias_regularizer) self.kernel_constraint = tf.keras.constraints.get(kernel_constraint) self.chain_constraint = tf.keras.constraints.get(chain_constraint) self.boundary_constraint = tf.keras.constraints.get(boundary_constraint) self.bias_constraint = tf.keras.constraints.get(bias_constraint) self.input_spec = None self.potentials = None self.sequence_length = None self.mask = None self.chain_kernel = None self._dense_layer = None self.left_boundary = None self.right_boundary = None def build(self, input_shape): input_shape = tuple(tf.TensorShape(input_shape).as_list()) self.input_spec = [tf.keras.layers.InputSpec(shape=input_shape)] self.chain_kernel = self.add_weight( shape=(self.units, self.units), name="chain_kernel", initializer=self.chain_initializer, regularizer=self.chain_regularizer, constraint=self.chain_constraint, ) if self.use_boundary: self.left_boundary = self.add_weight( shape=(self.units,), name="left_boundary", initializer=self.boundary_initializer, regularizer=self.boundary_regularizer, constraint=self.boundary_constraint, ) self.right_boundary = self.add_weight( shape=(self.units,), name="right_boundary", initializer=self.boundary_initializer, regularizer=self.boundary_regularizer, constraint=self.boundary_constraint, ) if self.use_kernel: self._dense_layer = tf.keras.layers.Dense( units=self.units, activation=self.activation, use_bias=self.use_bias, bias_initializer=self.bias_initializer, kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer, kernel_constraint=self.kernel_constraint, bias_constraint=self.bias_constraint, dtype=self.dtype, ) else: self._dense_layer = lambda x: tf.cast(x, dtype=self.dtype) super(CRF, self).build(input_shape) def call(self, inputs, mask=None, **kwargs): if mask is not None: if tf.keras.backend.ndim(mask) != 2: raise ValueError("Input mask to CRF must have dim 2 if not None") first_mask = None if mask is not None: left_boundary_mask = self._compute_mask_left_boundary(mask) first_mask = left_boundary_mask[:, 0] self.mask = mask if first_mask is not None: no_left_padding = tf.math.reduce_all(first_mask) msg = "Currently, CRF layer do not support left padding" with tf.control_dependencies( [ tf.debugging.assert_equal( no_left_padding, tf.constant(True), message=msg ) ] ): self.potentials = self._dense_layer(inputs) else: self.potentials = self._dense_layer(inputs) if self.use_boundary: self.potentials = self.add_boundary_energy( self.potentials, mask, self.left_boundary, self.right_boundary ) self.sequence_length = self._get_sequence_length(inputs, mask) decoded_sequence, _ = self.get_viterbi_decoding( self.potentials, self.sequence_length ) return decoded_sequence def _get_sequence_length(self, input_, mask): if mask is not None: sequence_length = self.mask_to_sequence_length(mask) else: input_energy_shape = tf.shape(input_) raw_input_shape = tf.slice(input_energy_shape, [0], [2]) alt_mask = tf.ones(raw_input_shape) sequence_length = self.mask_to_sequence_length(alt_mask) return sequence_length def mask_to_sequence_length(self, mask): sequence_length = tf.cast(tf.reduce_sum(tf.cast(mask, tf.int8), 1), tf.int64) return sequence_length @staticmethod def _compute_mask_right_boundary(mask): offset = 1 left_shifted_mask = tf.concat( [mask[:, offset:], tf.zeros_like(mask[:, :offset])], axis=1 ) right_boundary = tf.greater(mask, left_shifted_mask) return right_boundary @staticmethod
MIT License
puiterwijk/flask-oidc
flask_oidc/__init__.py
OpenIDConnect.user_loggedin
python
def user_loggedin(self): return g.oidc_id_token is not None
Represents whether the user is currently logged in. Returns: bool: Whether the user is logged in with Flask-OIDC. .. versionadded:: 1.0
https://github.com/puiterwijk/flask-oidc/blob/7f16e27b926fc12953d6b2ae78a9b9cc9b8d1769/flask_oidc/__init__.py#L199-L208
from functools import wraps import os import json from base64 import b64encode, b64decode, urlsafe_b64encode, urlsafe_b64decode import time from copy import copy import logging from warnings import warn import calendar from six.moves.urllib.parse import urlencode from flask import request, session, redirect, url_for, g, current_app, abort from oauth2client.client import flow_from_clientsecrets, OAuth2WebServerFlow, AccessTokenRefreshError, OAuth2Credentials import httplib2 from itsdangerous import JSONWebSignatureSerializer, BadSignature __all__ = ['OpenIDConnect', 'MemoryCredentials'] logger = logging.getLogger(__name__) def _json_loads(content): if not isinstance(content, str): content = content.decode('utf-8') return json.loads(content) class MemoryCredentials(dict): pass class DummySecretsCache(object): def __init__(self, client_secrets): self.client_secrets = client_secrets def get(self, filename, namespace): return self.client_secrets class ErrStr(str): def __nonzero__(self): return False def __bool__(self): return False GOOGLE_ISSUERS = ['accounts.google.com', 'https://accounts.google.com'] class OpenIDConnect(object): def __init__(self, app=None, credentials_store=None, http=None, time=None, urandom=None): self.credentials_store = credentials_store if credentials_store is not None else MemoryCredentials() if http is not None: warn('HTTP argument is deprecated and unused', DeprecationWarning) if time is not None: warn('time argument is deprecated and unused', DeprecationWarning) if urandom is not None: warn('urandom argument is deprecated and unused', DeprecationWarning) self._custom_callback = None if app is not None: self.init_app(app) def init_app(self, app): secrets = self.load_secrets(app) self.client_secrets = list(secrets.values())[0] secrets_cache = DummySecretsCache(secrets) app.config.setdefault('OIDC_SCOPES', ['openid', 'email']) app.config.setdefault('OIDC_GOOGLE_APPS_DOMAIN', None) app.config.setdefault('OIDC_ID_TOKEN_COOKIE_NAME', 'oidc_id_token') app.config.setdefault('OIDC_ID_TOKEN_COOKIE_PATH', '/') app.config.setdefault('OIDC_ID_TOKEN_COOKIE_TTL', 7 * 86400) app.config.setdefault('OIDC_COOKIE_SECURE', True) app.config.setdefault('OIDC_VALID_ISSUERS', (self.client_secrets.get('issuer') or GOOGLE_ISSUERS)) app.config.setdefault('OIDC_CLOCK_SKEW', 60) app.config.setdefault('OIDC_REQUIRE_VERIFIED_EMAIL', False) app.config.setdefault('OIDC_OPENID_REALM', None) app.config.setdefault('OIDC_USER_INFO_ENABLED', True) app.config.setdefault('OIDC_CALLBACK_ROUTE', '/oidc_callback') app.config.setdefault('OVERWRITE_REDIRECT_URI', False) app.config.setdefault("OIDC_EXTRA_REQUEST_AUTH_PARAMS", {}) app.config.setdefault('OIDC_RESOURCE_SERVER_ONLY', False) app.config.setdefault('OIDC_RESOURCE_CHECK_AUD', False) app.config.setdefault('OIDC_INTROSPECTION_AUTH_METHOD', 'client_secret_post') app.config.setdefault('OIDC_TOKEN_TYPE_HINT', 'access_token') if not 'openid' in app.config['OIDC_SCOPES']: raise ValueError('The value "openid" must be in the OIDC_SCOPES') if not app.config['OIDC_RESOURCE_SERVER_ONLY']: app.route(app.config['OIDC_CALLBACK_ROUTE'])(self._oidc_callback) app.before_request(self._before_request) app.after_request(self._after_request) self.flow = flow_from_clientsecrets( app.config['OIDC_CLIENT_SECRETS'], scope=app.config['OIDC_SCOPES'], cache=secrets_cache) assert isinstance(self.flow, OAuth2WebServerFlow) self.extra_data_serializer = JSONWebSignatureSerializer( app.config['SECRET_KEY'], salt='flask-oidc-extra-data') self.cookie_serializer = JSONWebSignatureSerializer( app.config['SECRET_KEY'], salt='flask-oidc-cookie') try: self.credentials_store = app.config['OIDC_CREDENTIALS_STORE'] except KeyError: pass def load_secrets(self, app): content = app.config['OIDC_CLIENT_SECRETS'] if isinstance(content, dict): return content else: return _json_loads(open(content, 'r').read()) @property
BSD 2-Clause Simplified License
matzpersson/opc-rest-api
http2opc/includes/OpenOPC.py
client.connect
python
def connect(self, opc_server=None, opc_host='128.100.1.21'): pythoncom.CoInitialize() if opc_server == None: if self.opc_server == None: if os.environ.has_key('OPC_SERVER'): opc_server = os.environ['OPC_SERVER'] else: opc_server = OPC_SERVER else: opc_server = self.opc_server opc_host = self.opc_host opc_server_list = opc_server.split(';') connected = False for s in opc_server_list: try: if self.trace: self.trace('Connect(%s,%s)' % (s, opc_host)) self._opc.Connect(s, opc_host) connected = True except pythoncom.com_error, err: if len(opc_server_list) == 1: error_msg = 'Connect: %s' % self._get_error_str(err) raise OPCError, error_msg else: try: if self.client_name == None: if os.environ.has_key('OPC_CLIENT'): self._opc.ClientName = os.environ['OPC_CLIENT'] else: self._opc.ClientName = OPC_CLIENT else: self._opc.ClientName = self.client_name except: pass connected = True break if not connected: raise OPCError, 'Connect: Cannot connect to any of the servers in the OPC_SERVER list' time.sleep(0.01) self.opc_server = opc_server if opc_host == 'localhost': opc_host = socket.gethostname() self.opc_host = opc_host self._groups = {} self._group_tags = {} self._group_valid_tags = {} self._group_server_handles = {} self._group_handles_tag = {} self._group_hooks = {} return connected
Connect to the specified OPC server
https://github.com/matzpersson/opc-rest-api/blob/a5a01977b05887546d38abb0a593ecbb48bff112/http2opc/includes/OpenOPC.py#L205-L274
import os import sys import time import types import string import socket import re import Queue __version__ = '1.3.1' current_client = None if os.name == 'nt': try: import win32com.client import win32com.server.util import win32event import pythoncom import pywintypes import SystemHealth vt = dict([(pythoncom.__dict__[vtype], vtype) for vtype in pythoncom.__dict__.keys() if vtype[:2] == "VT"]) win32com.client.gencache.is_readonly = False win32com.client.gencache.Rebuild(verbose=0) except ImportError: win32com_found = False else: win32com_found = True else: win32com_found = False SOURCE_CACHE = 1 SOURCE_DEVICE = 2 OPC_STATUS = (0, 'Running', 'Failed', 'NoConfig', 'Suspended', 'Test') BROWSER_TYPE = (0, 'Hierarchical', 'Flat') ACCESS_RIGHTS = (0, 'Read', 'Write', 'Read/Write') OPC_QUALITY = ('Bad', 'Uncertain', 'Unknown', 'Good') OPC_CLASS = 'Matrikon.OPC.Automation;Graybox.OPC.DAWrapper;HSCOPC.Automation;RSI.OPCAutomation;OPC.Automation' OPC_SERVER = 'Hci.TPNServer;HwHsc.OPCServer;opc.deltav.1;AIM.OPC.1;Yokogawa.ExaopcDAEXQ.1;OSI.DA.1;OPC.PHDServerDA.1;Aspen.Infoplus21_DA.1;National Instruments.OPCLabVIEW;RSLinx OPC Server;KEPware.KEPServerEx.V4;Matrikon.OPC.Simulation;Prosys.OPC.Simulation' OPC_CLIENT = 'OpenOPC' def quality_str(quality_bits): quality = (quality_bits >> 6) & 3 return OPC_QUALITY[quality] def type_check(tags): if type(tags) in (types.ListType, types.TupleType): single = False elif tags == None: tags = [] single = False else: tags = [tags] single = True if len([t for t in tags if type(t) not in types.StringTypes]) == 0: valid = True else: valid = False return tags, single, valid def wild2regex(string): return string.replace('.','\.').replace('*','.*').replace('?','.').replace('!','^') def tags2trace(tags): arg_str = '' for i,t in enumerate(tags[1:]): if i > 0: arg_str += ',' arg_str += '%s' % t return arg_str def exceptional(func, alt_return=None, alt_exceptions=(Exception,), final=None, catch=None): def _exceptional(*args, **kwargs): try: try: return func(*args, **kwargs) except alt_exceptions: return alt_return except: if catch: return catch(sys.exc_info(), lambda:func(*args, **kwargs)) raise finally: if final: final() return _exceptional def get_sessions(host=None, port=7766): if host is None: host = 'localhost' import Pyro.core Pyro.core.initClient(banner = 0) server_obj = Pyro.core.getProxyForURI("PYROLOC://%s:%s/opc" % (host, port)) return server_obj.get_clients() def close_session(guid, host=None, port=7766): if host is None: host = 'localhost' import Pyro.core Pyro.core.initClient(banner = 0) server_obj = Pyro.core.getProxyForURI("PYROLOC://%s:%s/opc" % (host, port)) return server_obj.force_close(guid) def open_client(host='localhost', port=7766): import Pyro.core Pyro.core.initClient(banner=0) server_obj = Pyro.core.getProxyForURI("PYROLOC://%s:%s/opc" % (host, port)) return server_obj.create_client() class TimeoutError(Exception): def __init__(self, txt): Exception.__init__(self, txt) class OPCError(Exception): def __init__(self, txt): Exception.__init__(self, txt) class GroupEvents: def __init__(self): self.client = current_client def OnDataChange(self, TransactionID, NumItems, ClientHandles, ItemValues, Qualities, TimeStamps): self.client.callback_queue.put((TransactionID, ClientHandles, ItemValues, Qualities, TimeStamps)) class client(): def __init__(self, opc_class=None, client_name=None): self.callback_queue = Queue.Queue() pythoncom.CoInitialize() if opc_class == None: if os.environ.has_key('OPC_CLASS'): opc_class = os.environ['OPC_CLASS'] else: opc_class = OPC_CLASS opc_class_list = opc_class.split(';') for i,c in enumerate(opc_class_list): try: self._opc = win32com.client.gencache.EnsureDispatch(c, 0) self.opc_class = c break except pythoncom.com_error, err: if i == len(opc_class_list)-1: error_msg = 'Dispatch: %s' % self._get_error_str(err) raise OPCError, error_msg self._event = win32event.CreateEvent(None,0,0,None) self.opc_server = None self.opc_host = '128.100.1.21' self.client_name = client_name self._groups = {} self._group_tags = {} self._group_valid_tags = {} self._group_server_handles = {} self._group_handles_tag = {} self._group_hooks = {} self._open_serv = None self._open_self = None self._open_host = None self._open_port = None self._open_guid = None self._prev_serv_time = None self._tx_id = 0 self.trace = None self.cpu = None def set_trace(self, trace): if self._open_serv == None: self.trace = trace
Apache License 2.0
traxes/zeno
src/main.py
main
python
def main(): parser = argparse.ArgumentParser(description='Zeno Commandline tool: Searches Automagically for Bugs') group = parser.add_mutually_exclusive_group() group.add_argument('-b', '--blacklist', type=str, help="Provide a blacklist seperated by commas. This will filter out not needed plugins") group.add_argument('-wl', '--whitelist', help='Whitelist modules', type=str) parser.add_argument("--plugin_order", default=None, type=str, dest="plugin_order", help="Provide a file with the plugins in the correct order to be loaded") parser.add_argument('--deep', dest='deep', action='store_true', help='Uses Deep Search mode. ' 'This might take longer but it will also get a grasp of compiler optimizations') parser.add_argument('--fast', dest='fast', action='store_true', help='Uses Fast Search mode. ' 'It will skip a throughout search while slicing and just uses the first it finds') parser.add_argument('--search-path', dest='search_path', default="/lib:/usr/lib", help='":" separated list of paths to search libraries in') parser.add_argument('target', metavar='target-path', nargs='+', help='Binary to be analysed', type=lambda x: is_valid_file(parser, x)) parser.add_argument("--system-root", default="/", dest="root", help="Use paths relative to this root for library searching") parser.add_argument("--dot", default=None, dest="outputfile", help="Write graph to a dotfile") parser.add_argument("--cov", default=None, dest="coverage", help="Provide a coverage file for better filtering") parser.add_argument("--cov_folder", default=None, dest="cov_folder", help="Provide a folder with coverage files for better filtering") plugins = PluginLoader(argparser=parser) args = parser.parse_args() filtered_plugins = plugin_filter(args, [name for name, _ in plugins.available_plugins]) input_file = args.target for filename in input_file: print("Analyzing {0}".format(filename)) bv = binaryninja.BinaryViewType.get_view_of_file(filename) if args.coverage: print("Single Coverage given") cov = DrcovData(args.coverage) cov_bb = cov.get_blocks_by_module(path_leaf(filename)) if args.cov_folder: pass print("arch: {0} | platform: {1}".format(bv.arch, bv.platform)) bv.update_analysis_and_wait() print(filtered_plugins) for name in filtered_plugins: plugin = plugins.get_plugin_instance(name) plugin.vulns = [] plugin.run(bv, args) if args.coverage: plugin.set_traces(cov_bb) del plugin return
Main function. Also used for Commandline Parsing :return:
https://github.com/traxes/zeno/blob/48e72e8884838a171a217336923beec2983853b5/src/main.py#L85-L162
import binaryninja import argparse import os from avd.loader import PluginLoader from avd.helper.drcov import DrcovData import ntpath import errno def is_valid_file(parser, arg): if not os.path.exists(arg): parser.error("The file %s does not exist!" % arg) else: if not os.path.isfile(arg): parser.error("The file %s does not exist!" % arg) else: return arg def path_leaf(path): head, tail = ntpath.split(path) return tail or ntpath.basename(head) def plugin_filter(args, plugins): returning_plugins = list() if args.blacklist: for blacklist_module in args.blacklist.replace(" ", "").split(","): plugins.remove(blacklist_module) elif args.whitelist: for whitelist_module in args.whitelist.replace(" ", "").split(","): if whitelist_module in plugins: returning_plugins.append(whitelist_module) return returning_plugins if args.plugin_order: if "," in args.plugin_order: for plugin_name in args.plugin_order.replace(" ", "").split(","): if plugin_name in plugins: returning_plugins.append(plugin_name) else: if not os.path.exists(args.plugin_order): OSError.NotADirectoryError(errno.ENOENT, os.strerror(errno.ENOENT), args.plugin_order) else: if not os.path.isfile(args.plugin_order): raise OSError.FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), args.plugin_order) else: with open(args.plugin_order) as fin: for plugin_name in fin: if plugin_name in plugins: returning_plugins.append(plugin_name) return returning_plugins if len(returning_plugins) > 0 else plugins
BSD 3-Clause New or Revised License
varianapis/pyesapi
pyesapi/stubs/VMS/TPS/Common/Model/Types.py
BeamNumber.ReadXml
python
def ReadXml(self, reader): pass
ReadXml(self: BeamNumber, reader: XmlReader)
https://github.com/varianapis/pyesapi/blob/c7b1d2986cab9387e85dbb4331a44e5b743b86ea/pyesapi/stubs/VMS/TPS/Common/Model/Types.py#L164-L167
class ApplicationScriptApprovalStatus(Enum, IComparable, IFormattable, IConvertible): def __eq__(self, *args): pass def __format__(self, *args): pass def __ge__(self, *args): pass def __gt__(self, *args): pass def __init__(self, *args): pass def __le__(self, *args): pass def __lt__(self, *args): pass def __ne__(self, *args): pass def __reduce_ex__(self, *args): pass def __str__(self, *args): pass Approved = None ApprovedForEvaluation = None Retired = None Unapproved = None Undefined = None value__ = None class ApplicationScriptType(Enum, IComparable, IFormattable, IConvertible): def __eq__(self, *args): pass def __format__(self, *args): pass def __ge__(self, *args): pass def __gt__(self, *args): pass def __init__(self, *args): pass def __le__(self, *args): pass def __lt__(self, *args): pass def __ne__(self, *args): pass def __reduce_ex__(self, *args): pass def __str__(self, *args): pass ESAPI = None ESAPIActionPack = None ESAPICustomExecutable = None MIRS = None Unknown = None value__ = None class ApprovalHistoryEntry(object): ApprovalDateTime = None ApprovalStatus = None StatusComment = None UserDisplayName = None UserId = None class AxisAlignedMargins(object): def ToString(self): pass @staticmethod def __new__(self, geometry, x1, y1, z1, x2, y2, z2): pass Geometry = None X1 = None X2 = None Y1 = None Y2 = None Z1 = None Z2 = None class BeamNumber(object, IXmlSerializable, IEquatable[BeamNumber]): def Equals(self, other): pass def GetHashCode(self): pass def GetSchema(self): pass
MIT License
scikit-nano/scikit-nano
sknano/core/math/_vector.py
vector_projection
python
def vector_projection(a, b): return dot(a, b) / dot(b, b) * b
Compute the vector projection of :math:`\\mathbf{a}` onto \ :math:`\\mathbf{b}`. Parameters ---------- a, b : `Vector` Returns ------- :class:`Vector`
https://github.com/scikit-nano/scikit-nano/blob/ef9b24165ba37918b3f520657f7311ba139b3e7d/sknano/core/math/_vector.py#L701-L714
from __future__ import absolute_import, division, print_function from __future__ import unicode_literals __docformat__ = 'restructuredtext en' import numbers import warnings import numpy as np np.seterr(all='warn') from sknano.core import Singleton from ._point import Point from ._transforms import rotate, transformation_matrix __all__ = ['Vector', 'angle', 'cross', 'dot', 'scalar_triple_product', 'vector_triple_product', 'scalar_projection', 'vector_projection', 'vector_rejection', 'projection', 'rejection', 'e1', 'e2', 'e3', 'xhat', 'yhat', 'zhat', '_check_vector_compatibility', 'NullVector'] def _check_vector_compatibility(v1, v2): if len(v1) != len(v2): raise ValueError("{!r} and {!r} must have same number " "of components".format(v1, v2)) class Vector(np.ndarray): __array_priority__ = 15.0 def __new__(cls, v=None, nd=None, p0=None, p=None, dtype=None, copy=True): if isinstance(v, Vector): if nd is not None and isinstance(nd, numbers.Number) and len(v) < int(nd): v = np.append(v, np.zeros(int(nd) - len(v))) if dtype is None: intype = v.dtype else: intype = np.dtype(dtype) vec = v.view(cls) if p0 is not None: vec = Vector(np.asarray(vec), nd=nd, p0=Point(p0, nd=nd, dtype=dtype, copy=copy)) if intype != v.dtype: return vec.astype(intype) if copy: return vec.copy() else: return vec dtype = np.dtype(dtype) if isinstance(v, (tuple, list, np.ndarray)): try: for i, coord in enumerate(v[:]): if coord is None: v[i] = 0.0 except TypeError: v = np.zeros(len(v), dtype=dtype) else: v = np.asarray(v, dtype=dtype) if nd is not None and isinstance(nd, numbers.Number) and len(v) < int(nd): v = np.append(v, np.zeros(int(nd) - len(v))) nd = len(v) if p0 is None: p0 = Point(nd=nd, dtype=dtype) else: p0 = Point(p0, nd=nd, dtype=dtype, copy=copy) p = p0 + v else: if p is None and p0 is None and (nd is None or not isinstance(nd, numbers.Number)): nd = 3 if p is None: p = Point(nd=nd, dtype=dtype) else: p = Point(p, nd=nd, dtype=dtype, copy=copy) if p0 is None: p0 = Point(nd=nd, dtype=dtype) else: p0 = Point(p0, nd=nd, dtype=dtype, copy=copy) v = p - p0 arr = np.array(v, dtype=dtype, copy=copy).view(cls) vec = np.ndarray.__new__(cls, arr.shape, arr.dtype, buffer=arr) vec.nd = len(vec) vec._p = p vec._p0 = p0 return vec def __array_finalize__(self, obj): if obj is None: return None self.nd = len(obj) self._p0 = getattr(obj, 'p0', None) self._p = getattr(obj, 'p', None) if self._p0 is not None and self._p is None: try: self._p = self._p0 + self.__array__() except TypeError: try: self._p = self._p0 + np.asarray(obj) except TypeError: pass def __array_wrap__(self, obj, context=None): res = np.ndarray.__array_wrap__(self, obj, context) return self.__class__(res.__array__(), p0=self.p0) def __str__(self): return repr(self) def __repr__(self): try: if np.allclose(self.p0, np.zeros_like(self.p0)): return "Vector({!r})".format(self.tolist()) else: return "Vector({!r}, p0={!r}, p={!r})".format( self.tolist(), self.p0.tolist(), self.p.tolist()) except AttributeError: return "Vector({!r})".format(self.tolist()) def tolist(self): return np.around(self.__array__(), decimals=10).tolist() def __getattr__(self, name): try: nd = len(self) if nd == 2 and name in ('x', 'y'): if name == 'x': return self[0] else: return self[1] elif nd == 3 and name in ('x', 'y', 'z'): if name == 'x': return self[0] elif name == 'y': return self[1] else: return self[2] except TypeError: pass return np.ndarray.__getattribute__(self, name) def __setattr__(self, name, value): nd = getattr(self, 'nd', None) if nd is not None and nd == 2 and name in ('x', 'y'): if name == 'x': self[0] = value try: self._p.x = self._p0.x + value except AttributeError: pass else: self[1] = value try: self._p.y = self._p0.y + value except AttributeError: pass elif nd is not None and nd == 3 and name in ('x', 'y', 'z'): if name == 'x': self[0] = value try: self._p.x = self._p0.x + value except AttributeError: pass elif name == 'y': self[1] = value try: self._p.y = self._p0.y + value except AttributeError: pass else: self[2] = value try: self._p.z = self._p0.z + value except AttributeError: pass else: np.ndarray.__setattr__(self, name, value) def __getitem__(self, index): data = np.ndarray.__getitem__(np.ndarray.view(self, np.ndarray), index) p0 = np.ndarray.__getitem__(np.ndarray.view( np.ndarray.__getattribute__(self, 'p0'), np.ndarray), index) p = np.ndarray.__getitem__(np.ndarray.view( np.ndarray.__getattribute__(self, 'p'), np.ndarray), index) try: data = data.view(type(self)) data._p0 = np.ndarray.view(p0, Point) data._p = np.ndarray.view(p, Point) data._nd = len(data) except (AttributeError, TypeError): pass return data def __setitem__(self, index, value): data = np.ndarray.view(self, np.ndarray) p0 = np.ndarray.view(np.ndarray.__getattribute__(self, 'p0'), np.ndarray) p = np.ndarray.view(np.ndarray.__getattribute__(self, 'p'), np.ndarray) np.ndarray.__setitem__(data, index, value) np.ndarray.__setitem__(p, index, np.ndarray.__getitem__(p0, index) + np.ndarray.__getitem__(data, index)) data = data.view(type(self)) data._p0 = np.ndarray.view(p0, Point) data._p = np.ndarray.view(p, Point) def __eq__(self, other): if not isinstance(other, Vector): other = Vector(other) return self is other or (np.allclose(self.__array__(), other.__array__()) and np.allclose(self.p0, other.p0) and np.allclose(self.p, other.p)) def __lt__(self, other): if not isinstance(other, Vector): other = Vector(other) return self.norm < other.norm def __le__(self, other): return self < other or self == other def __gt__(self, other): return not (self < other or self == other) def __ge__(self, other): return not (self < other) def __ne__(self, other): return not (self == other) def __mul__(self, other): if np.isscalar(other): return self.__class__(self.__array__() * other, p0=self.p0) elif isinstance(other, Vector) and other.nd == self.nd: print("Computing *scalar product* of Vector's:\n" "{!r}\n{!r}".format(self, other)) return self.dot(other) return NotImplemented def __rmul__(self, other): if np.isscalar(other): return self.__class__(other * self.__array__(), p0=self.p0) return NotImplemented def __truediv__(self, other): if np.isscalar(other): return Vector(self.__array__() / other, p0=self.p0) return NotImplemented __div__ = __truediv__ def __floordiv__(self, other): if np.isscalar(other): return Vector(self.__array__() // other, p0=self.p0) return NotImplemented def __pow__(self, other, *modulo): if isinstance(other, numbers.Number): return Vector(self.__array__() ** other, p0=self.p0) return NotImplemented def __iadd__(self, other): super().__iadd__(other) self._update_p() return self def __isub__(self, other): super().__isub__(other) self._update_p() return self def __imul__(self, other): if np.isscalar(other): super().__imul__(other) self._update_p() return self return NotImplemented def __itruediv__(self, other): if np.isscalar(other): super().__itruediv__(other) self._update_p() return self return NotImplemented __idiv__ = __itruediv__ def __ifloordiv__(self, other): if np.isscalar(other): super().__ifloordiv__(other) self._update_p() return self return NotImplemented def __ipow__(self, other): if np.isscalar(other): super().__ipow__(other) self._update_p() return self return NotImplemented def copy(self): return self.__copy__() def __copy__(self): try: return self.__class__(self.__array__(), p0=self.p0.__array__()) except AttributeError: return self.__class__(self.__array__()) def __deepcopy__(self, memo): return self.__copy__() def _update_p(self): self._p[:] = self._p0[:] + self.__array__() @property def length(self): return self.norm @property def magnitude(self): return self.norm @property def mag(self): return self.norm @property def norm(self): return np.sqrt((self.__array__() ** 2).sum()) @property def unit_vector(self): with warnings.catch_warnings(): warnings.filterwarnings('ignore') return self / self.norm @property def p(self): return self._p @p.setter def p(self, value=np.ndarray): self._p[:] = value self[:] = self._p - self._p0 @property def p0(self): return self._p0 @p0.setter def p0(self, value=np.ndarray): self._p0[:] = value self[:] = self._p - self._p0 def _translate_p0(self, t, fix_components=False): if fix_components: self.translate(t) else: self.p0.translate(t) def _translate_p(self, t, fix_components=False): if fix_components: self.translate(t) else: self.p.translate(t) @property def column_matrix(self): return np.matrix(self.__array__().reshape(self.shape[0], 1)) @property def row_matrix(self): return np.matrix(self.__array__()) def angle(self, other): _check_vector_compatibility(self, other) return np.arccos(np.dot(self.__array__(), other.__array__()) / (self.norm * other.norm)) def cross(self, other): _check_vector_compatibility(self, other) val = np.cross(self.__array__(), other.__array__()) if val.shape == (): return val[()] return Vector(val, p0=self.p0) def dot(self, other, out=None): _check_vector_compatibility(self, other) return self.__array__().dot(other.__array__()) def normalize(self): self[:] = self.unit_vector def projection(self, v): u = self return dot(u, v) / dot(v, v) * v def rejection(self, v): u = self return u - self.projection(v) def rezero_components(self, epsilon=1.0e-10): self.rezero(epsilon=epsilon) def rezero(self, epsilon=1.0e-10): self[np.where(np.abs(self.__array__()) <= epsilon)] = 0.0 def rotate(self, angle=None, axis=None, anchor_point=None, rot_point=None, from_vector=None, to_vector=None, degrees=False, transform_matrix=None, fix_anchor_point=False, verbose=False, **kwargs): if transform_matrix is None: transform_matrix = transformation_matrix(angle=angle, axis=axis, anchor_point=anchor_point, rot_point=rot_point, from_vector=from_vector, to_vector=to_vector, degrees=degrees, verbose=verbose, **kwargs) self.p = rotate(self.p, transform_matrix=transform_matrix) if not fix_anchor_point: self.p0 = rotate(self.p0, transform_matrix=transform_matrix) def scale(self): return NotImplemented def translate(self, t, fix_anchor_point=False): self.p += t if not fix_anchor_point: self.p0 += t def angle(u, v): return np.arccos(dot(u, v) / (u.norm * v.norm)) def cross(u, v, p0=None): val = np.cross(np.asarray(u), np.asarray(v)) if p0 is None: p0 = u.p0 if val.shape == (): return val[()] else: return Vector(val, p0=p0) def dot(u, v): return np.dot(np.asarray(u), np.asarray(v)) def scalar_triple_product(u, v, w): return dot(u, cross(v, w)) def vector_triple_product(u, v, w): return cross(u, cross(v, w)) def scalar_projection(a, b): return dot(a, b) / b.norm
BSD 2-Clause Simplified License
bloomreach/briefly
src/briefly/core.py
Pipeline.set_task_done_callback
python
def set_task_done_callback(self, task_done_callback): assert callable(task_done_callback) self.task_done_callback = task_done_callback
Set the callback method, the callback method should be of this form: task_done_callback(node, exception)
https://github.com/bloomreach/briefly/blob/78e9b6682ce936b77e4ff3fef0344beabe4b582a/src/briefly/core.py#L235-L240
import sys import time import traceback import threading import optparse import Queue from properties import * from process import * from defaults import * from coreutils import * import dag BRIEFLY_VERSION = '1.0' class NodeExecutor(threading.Thread): def __init__(self, service, task_done_callback=None): super(NodeExecutor, self).__init__() self.service = service self.daemon = True self.task_done = task_done_callback self.start() def run(self): while True: try: node = self.service.get_next_job() if node is None: break self.execute_node(node) finally: if node is not None: self.service.complete_execute(node) def execute_node(self, node): node.reset_log() if node.prop.test_run: node.test_execute() return if any([dep.exe_error is not None for dep in node.deps]): log(' - %s : skipped (depencency error)', node.hash()) node.exe_error = Exception("Dependency error") return exec_exp = None for i in range(node.prop.num_retry): log(' - %s : executing', node.hash()) if i > 0: node.log('Try again #%d...', i) log(' - %s : try again #%d', node.hash(), i) try: node.check_execute() log(' - %s : done', node.hash()) break except Exception, e: log(' - %s : exception: %s', node.hash(), str(e)) if i == node.prop.num_retry - 1: log(' - %s : %s', node.hash(), traceback.format_exc()) exec_exp = e if self.task_done: self.task_done(node, exec_exp) class ExecutorService(object): def __init__(self, objs, task_done_callback=None): self.number_of_threads = objs.prop.run_threads self.dag = dag.DependencyGraph() self.executor_factory = NodeExecutor self.lock = threading.Lock() self.pending = Queue.PriorityQueue() self.task_done_callback = task_done_callback self.order = 1 def get_next_job(self): order, node = self.pending.get() return node def build_dependency_graph(self, node): if node.executed or node in self.dag: return self.dag.add_node(node, self.order) self.order += 1 for dep in node.deps: self.build_dependency_graph(dep) if not dep.executed: self.dag.add_edge(dep, node) def complete_execute(self, node): if self.empty(): return with self.lock: bridge_nodes = self.dag.get_bridge_nodes(node) self.dag.remove(node) self.enqueue_pending(bridge_nodes) if len(self.dag) == 0: self.terminate() def enqueue_pending(self, nodes): nodes = list(nodes) nodes.sort(key=lambda x: x[1]) ''' Put iterable nodes into pending priority queue.''' for ready_node, order in nodes: self.pending.put((order, ready_node)) def empty(self): with self.lock: return len(self.dag) == 0 def terminate(self): for i in xrange(self.number_of_threads): self.pending.put((sys.maxint, None)) def execute(self, targets): for target in targets: target.check_configure() self.build_dependency_graph(target) if self.empty(): return assert self.pending.empty() self.enqueue_pending(self.dag.get_start_nodes()) running_threads = [] for i in xrange(self.number_of_threads): running_threads.append(self.executor_factory(self, self.task_done_callback)) try: for thread in running_threads: while thread.is_alive() and not thread.join(60): pass finally: self.pending = None class Pipeline(object): resources = set() def __init__(self, name, pprop=None): self.name = name self.targets = None self.task_done_callback = None if pprop: self.prop = pprop.get_copy() else: self.prop = PIPELINE_DEFAULT_PROPERTIES.get_copy() self.load_args() def load_args(self): if not sys.argv[0]: sys.argv[0] = '' parser = optparse.OptionParser(description='Briefly Script Engine. Version %s' % BRIEFLY_VERSION) parser.add_option('-D', '--define', help='Add a property setting.', action='append', default=[]) parser.add_option('-p', '--property', help='property file', action='append', default=[]) opt, args = parser.parse_args() for pf in opt.property: self.prop.load(pf) for d in opt.define: self.prop.parse(d)
Apache License 2.0
jest-community/jest-pytest
src/__tests__/integration/home-assistant/homeassistant/components/sensor/uber.py
UberSensor.name
python
def name(self): if 'uber' not in self._name.lower(): self._name = 'Uber{}'.format(self._name) return self._name
Return the name of the sensor.
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/homeassistant/components/sensor/uber.py#L102-L106
import logging from datetime import timedelta import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.helpers.entity import Entity from homeassistant.util import Throttle REQUIREMENTS = ['uber_rides==0.6.0'] _LOGGER = logging.getLogger(__name__) CONF_END_LATITUDE = 'end_latitude' CONF_END_LONGITUDE = 'end_longitude' CONF_PRODUCT_IDS = 'product_ids' CONF_SERVER_TOKEN = 'server_token' CONF_START_LATITUDE = 'start_latitude' CONF_START_LONGITUDE = 'start_longitude' ICON = 'mdi:taxi' MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_SERVER_TOKEN): cv.string, vol.Optional(CONF_START_LATITUDE): cv.latitude, vol.Optional(CONF_START_LONGITUDE): cv.longitude, vol.Optional(CONF_END_LATITUDE): cv.latitude, vol.Optional(CONF_END_LONGITUDE): cv.longitude, vol.Optional(CONF_PRODUCT_IDS): vol.All(cv.ensure_list, [cv.string]), }) def setup_platform(hass, config, add_devices, discovery_info=None): from uber_rides.session import Session session = Session(server_token=config.get(CONF_SERVER_TOKEN)) start_latitude = config.get(CONF_START_LATITUDE, hass.config.latitude) start_longitude = config.get(CONF_START_LONGITUDE, hass.config.longitude) end_latitude = config.get(CONF_END_LATITUDE) end_longitude = config.get(CONF_END_LONGITUDE) wanted_product_ids = config.get(CONF_PRODUCT_IDS) dev = [] timeandpriceest = UberEstimate( session, start_latitude, start_longitude, end_latitude, end_longitude) for product_id, product in timeandpriceest.products.items(): if (wanted_product_ids is not None) and (product_id not in wanted_product_ids): continue dev.append(UberSensor('time', timeandpriceest, product_id, product)) if product.get('price_details') is not None and product['display_name'] != 'TAXI': dev.append(UberSensor( 'price', timeandpriceest, product_id, product)) add_devices(dev, True) class UberSensor(Entity): def __init__(self, sensorType, products, product_id, product): self.data = products self._product_id = product_id self._product = product self._sensortype = sensorType self._name = '{} {}'.format( self._product['display_name'], self._sensortype) if self._sensortype == 'time': self._unit_of_measurement = 'min' time_estimate = self._product.get('time_estimate_seconds', 0) self._state = int(time_estimate / 60) elif self._sensortype == 'price': if self._product.get('price_details') is not None: price_details = self._product['price_details'] self._unit_of_measurement = price_details.get('currency_code') try: if price_details.get('low_estimate') is not None: statekey = 'minimum' else: statekey = 'low_estimate' self._state = int(price_details.get(statekey)) except TypeError: self._state = 0 else: self._state = 0 @property
MIT License
alan-turing-institute/mogp-emulator
mogp_emulator/MeanFunction.py
MeanBase.__call__
python
def __call__(self, other): if issubclass(type(other), MeanBase): return MeanComposite(self, other) else: raise TypeError("other function cannot be composed with a MeanBase")
Composes two mean functions This method multiples two mean functions, returning a ``MeanComposite`` object. If the second argument is not a subclass of ``MeanBase``, an exception is raised. :param other: Second ``MeanBase`` to be composed :type other: subclass of MeanBase :returns: ``MeanComposite`` instance :rtype: MeanComposite
https://github.com/alan-turing-institute/mogp-emulator/blob/41c3615dd2b51222b00f1e85c48f82d128c99003/mogp_emulator/MeanFunction.py#L467-L484
import numpy as np from functools import partial from inspect import signature from mogp_emulator.formula import mean_from_patsy_formula, mean_from_string def MeanFunction(formula, inputdict={}, use_patsy=True): if formula is None or (isinstance(formula, str) and formula.strip() == ""): return ConstantMean(0.) if not isinstance(formula, str): raise ValueError("input formula must be a string") if use_patsy: mf = mean_from_patsy_formula(formula, inputdict) else: mf = mean_from_string(formula, inputdict) return mf class MeanBase(object): def _check_inputs(self, x, params): x = np.array(x) params = np.array(params).flatten() if len(x.shape) == 1: x = np.reshape(x, (-1, 1)) assert len(x.shape) == 2, "inputs must be a 1D or 2D array" assert len(params.shape) == 1, "params must be a 1D array" assert len(params) == self.get_n_params(x), "bad length for params" return x, params def get_n_params(self, x): raise NotImplementedError("base mean function does not implement a particular function") def mean_f(self, x, params): raise NotImplementedError("base mean function does not implement a particular function") def mean_deriv(self, x, params): raise NotImplementedError("base mean function does not implement a particular function") def mean_hessian(self, x, params): raise NotImplementedError("base mean function does not implement a particular function") def mean_inputderiv(self, x, params): raise NotImplementedError("base mean function does not implement a particular function") def __add__(self, other): if issubclass(type(other), MeanBase): return MeanSum(self, other) elif isinstance(other, (float, int)): return MeanSum(self, ConstantMean(other)) else: raise TypeError("other function cannot be added with a MeanBase") def __radd__(self, other): if issubclass(type(other), MeanBase): return MeanSum(other, self) elif isinstance(other, (float, int)): return MeanSum(ConstantMean(other), self) else: raise TypeError("other function cannot be added with a MeanBase") def __mul__(self, other): if issubclass(type(other), MeanBase): return MeanProduct(self, other) elif isinstance(other, (float, int)): return MeanProduct(self, ConstantMean(other)) else: raise TypeError("other function cannot be multiplied with a MeanBase") def __rmul__(self, other): if issubclass(type(other), MeanBase): return MeanProduct(other, self) elif isinstance(other, (float, int)): return MeanProduct(ConstantMean(other), self) else: raise TypeError("other function cannot be multipled with a MeanBase") def __pow__(self, exp): if isinstance(exp, (float, int)): return MeanPower(self, ConstantMean(exp)) elif isinstance(exp, (Coefficient, ConstantMean)): return MeanPower(self, exp) else: raise TypeError("MeanBase can only be raised to a power that is a ConstantMean, " + "Coefficient, or float/int") def __rpow__(self, base): if not isinstance(self, (Coefficient, ConstantMean)): raise TypeError("arbitrary mean functions cannot serve as the exponent when " + "raising a mean function to a power") if isinstance(base, (float, int)): return MeanPower(ConstantMean(base), self) elif issubclass(type(base), MeanBase): return MeanPower(base, self) else: raise TypeError("base in a MeanPower must be a MeanBase or a float/int")
MIT License
fishilico/shared
python/network/web_api.py
WebSiteContext.post
python
def post(self, uri, **post_params): data = urllib.parse.urlencode(post_params).encode('utf-8') return self.http_request('POST', uri, data=data, headers={ 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', })
Perform a POST request with POST parameters
https://github.com/fishilico/shared/blob/170a16abf8b8cd946ccb1c45c322151fd22a1e2c/python/network/web_api.py#L147-L152
import argparse import http.cookiejar import json import logging import urllib.parse import urllib.request import ssl logger = logging.getLogger(__name__) USER_AGENTS = { 'android-4': 'Mozilla/5.0 (Linux; U; Android 2.2) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1', 'chrome-72_linux-x64': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36', 'chrome-74_android-9': 'Mozilla/5.0 (Linux; Android 9; SM-G960F Build/PPR1.180610.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.157 Mobile Safari/537.36', 'chrome-74_windows-x64': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36', 'firefox-65_linux-x64': 'Mozilla/5.0 (X11; Linux x86_64; rv:65.0) Gecko/20100101 Firefox/65.0', 'msie-10_windows-x64': 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)', } def disable_ssl_cert_check_opener(): ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE return urllib.request.HTTPSHandler(context=ctx) class NoRedirectHandler(urllib.request.HTTPRedirectHandler): def redirect_request(self, req, fp, code, msg, hdrs, newurl): pass class WebSiteContext(object): def __init__(self, base_url, disable_ssl_check=False, is_ajax_api=False): self.base_url = base_url.rstrip('/') self.cookie_jar = http.cookiejar.CookieJar() self.disable_ssl_check = disable_ssl_check self.default_headers = { 'Referer': self.base_url + '/', 'Connection': 'close', 'User-Agent': USER_AGENTS['chrome-74_windows-x64'], 'Accept': '*/*', } if is_ajax_api: self.default_headers['X-Requested-With'] = 'XMLHttpRequest' if disable_ssl_check: self.ssl_opener = disable_ssl_cert_check_opener() else: self.ssl_opener = urllib.request.HTTPSHandler() def get_cookie(self, name): for cookie in self.cookie_jar: if cookie.name == name: return cookie.value return None def http_request(self, method, uri, data=None, headers=None, read_all=False): if headers is None: headers = {} for key, value in self.default_headers.items(): if key not in headers: headers[key] = value assert uri.startswith('/') url = self.base_url + uri logger.debug("HTTP %s %r", method, url) req = urllib.request.Request(url, data=data, headers=headers, method=method) cookies = urllib.request.HTTPCookieProcessor(cookiejar=self.cookie_jar) opener = urllib.request.build_opener(self.ssl_opener, cookies, NoRedirectHandler()) try: with opener.open(req) as resp: if resp.status not in (200, 204): logger.error("Request to %r returned HTTP status %d", uri, resp.status) raise ValueError(resp) content_length = int(resp.getheader('Content-Length', '0')) if content_length: data = resp.read(content_length) elif read_all: data = resp.read() else: data = None return resp, data except urllib.error.HTTPError as exc: if exc.status in (400, 401, 403, 405): content_length = int(exc.getheader('Content-Length', '0')) content_type = exc.getheader('Content-Type', '') data = exc.read(content_length) if content_length else None if content_length and content_type == 'application/json;charset=UTF-8': data = json.loads(data) logger.error("Got HTTP %d %r", exc.status, data) raise exc @staticmethod def decode_http_json_response(resp, data): content_type = resp.getheader('Content-Type', '') if content_type != 'application/json;charset=UTF-8': logger.error("Unexpected HTTP content type for JSON response: %r", content_type) raise ValueError return json.loads(data) def get(self, uri, **get_params): data = urllib.parse.urlencode(get_params) if data: uri += '?' + data return self.http_request('GET', uri) def get_and_json(self, uri, **get_params): resp, data = self.get(uri, **get_params) return self.decode_http_json_response(resp, data)
MIT License
djpugh/fastapi_aad_auth
docs/source/conf.py
ConfigDocumenter.generate
python
def generate(self, *args, **kwargs) -> None: if not self.parse_name(): return if not self.import_object(): return sourcename = self.get_sourcename() config_vars = self._process_variables(self.object, '<Config>') self.indent = '' self.analyzer = None self.add_directive_header('') self.add_line('**Options:**', sourcename) for line in config_vars: self.add_line(line, sourcename)
Generate reST for the object given by *self.name*, and possibly for its members. If *more_content* is given, include that content. If *real_modname* is given, use that module name to find attribute docs. If *check_module* is True, only generate if the object is defined in the module name it is imported from. If *all_members* is True, document all members.
https://github.com/djpugh/fastapi_aad_auth/blob/4089ca00abb56d613e40be23e700a645e2ce264b/docs/source/conf.py#L449-L472
import os import datetime as dt from pathlib import Path from typing import Any import uuid from pydantic import BaseModel, SecretStr import sphinx_material from sphinx.ext.autodoc import ALL, ClassDocumenter from fastapi_aad_auth import __version__ __author__ = 'David Pugh' extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.intersphinx', 'sphinx.ext.mathjax', 'sphinx.ext.extlinks', 'sphinx.ext.todo', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'sphinx_github_changelog', 'sphinx_material' ] extlinks = { "issue": ("https://github.com/djpugh/fastapi_aad_auth/issues/%s", "#"), "pull": ("https://github.com/djpugh/fastapi_aad_auth/pull/%s", "PR #"), "user": ("https://github.com/%s", "@"), "pypi": ("https://pypi.org/project/%s", ""), } templates_path = ['_templates'] source_suffix = '.rst' master_doc = 'index' project = u'fastapi_aad_auth' copyright = str(dt.datetime.now().year)+' '+__author__ version = __version__ release = __version__ language = 'English' exclude_patterns = [] default_role = ':any:' rst_epilog = """ .. role:: latex(raw) :format: latex """ autoclass_content='both' autodoc_class_signature='mix' html_title = 'FastAPI AAD Authentication' html_theme = 'sphinx_material' html_theme_path = sphinx_material.html_theme_path() html_context = sphinx_material.get_html_context() html_theme_options = { 'nav_title': html_title, 'base_url': 'https://djpugh.github.io/fastapi_aad_auth', 'repo_url': 'https://github.com/djpugh/fastapi_aad_auth/', 'repo_name': 'fastapi_aad_auth', 'globaltoc_depth': 1, 'globaltoc_collapse': False, 'globaltoc_includehidden': False, "logo_icon": "lock", "repo_type": "github", "color_primary": "cyan", "color_accent": "teal", "touch_icon": "images/apple-icon-152x152.png", "theme_color": "#2196f3", "master_doc": False, "nav_links": [ { "href": "https://fastapi.tiangolo.com/", "internal": False, "title": "FastAPI", }, { "href": "https://azure.microsoft.com/en-gb/services/active-directory/", "internal": False, "title": "Azure Active Directory", } ], "heroes": { "index": "Adding Azure Active Directory Authentication for fastAPI", }, "version_dropdown": False, } html_sidebars = { "**": ["logo-text.html", "globaltoc.html", "localtoc.html", "searchbox.html"] } html_title = f'{project} documentation' html_logo = 'figures/logo.png' html_static_path = ['_static'] html_last_updated_fmt = '%b %d, %Y (version '+str(__version__)+')' htmlhelp_basename = 'fastapi-aad-authdoc' latex_elements = { : 'a4paper', : r"""\twoside\n """, } latex_documents = [ ('latex', 'fastapi-aad-auth.tex', u'fastapi-aad-auth', __author__, 'custom_manual'), ] latex_logo = "figures/latex-logo.png" latex_show_urls = 'footnote' latex_domain_indices = False latex_additional_files = ['./_static/custom_manual.cls'] man_pages = [ ('man', 'fastapi-aad-auth', u'fastapi-aad-auth', [ __author__], 1)] man_show_urls = True texinfo_documents = [ ('index', 'fastapi-aad-auth', u'fastapi-aad-auth', __author__, 'fastapi-aad-auth', 'Adding Azure Active Directory Authentication for FastAPI', 'Miscellaneous'), ] epub_title = u'fastapi-aad-auth' epub_author = __author__ epub_publisher = __author__ epub_copyright = copyright epub_exclude_files = ['search.html'] intersphinx_mapping = {'https://docs.python.org/': None} class ConfigDocumenter(ClassDocumenter): objtype = 'config' directivetype = 'data' domain = 'py' priority = -1000
MIT License