id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
3467324
|
<reponame>aivazis/ampcor
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
# parasim
# (c) 1998-2021 all rights reserved
#
# pull the action protocol
from ..shells import action
# and the base panel
from ..shells import command
# pull in the command decorator
from .. import foundry
# commands
@foundry(implements=action, tip="estimate an offset field given a pair of raster images")
def offsets():
# get the action
from .Offsets import Offsets
# borrow its doctsring
__doc__ = Offsets.__doc__
# and publish it
return Offsets
# temporary vis
@foundry(implements=action, tip="visualize the correlation surface")
def mdy():
# get the action
from .Gamma import Gamma
# borrow its doctsring
__doc__ = Gamma.__doc__
# and publish it
return Gamma
# help
@foundry(implements=action, tip="display information about this application")
def about():
# get the action
from .About import About
# borrow its docstring
__doc__ = About.__doc__
# and publish it
return About
@foundry(implements=action, tip="display configuration information about this application")
def config():
# get the action
from .Config import Config
# borrow its docstring
__doc__ = Config.__doc__
# and publish it
return Config
@foundry(implements=action, tip="display debugging information about this application")
def debug():
# get the action
from .Debug import Debug
# borrow its docstring
__doc__ = Debug.__doc__
# and publish it
return Debug
# command completion; no tip so it doesn't show up on the help panel
@foundry(implements=action)
def complete():
# get the action
from .Complete import Complete
# and publish it
return Complete
# end of file
|
StarcoderdataPython
|
1873136
|
<reponame>amcclead7336/Enterprise_Data_Science_Final<filename>venv/lib/python3.8/site-packages/vsts/test/v4_0/models/suite_entry.py<gh_stars>0
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class SuiteEntry(Model):
"""SuiteEntry.
:param child_suite_id: Id of child suite in a suite
:type child_suite_id: int
:param sequence_number: Sequence number for the test case or child suite in the suite
:type sequence_number: int
:param suite_id: Id for the suite
:type suite_id: int
:param test_case_id: Id of a test case in a suite
:type test_case_id: int
"""
_attribute_map = {
'child_suite_id': {'key': 'childSuiteId', 'type': 'int'},
'sequence_number': {'key': 'sequenceNumber', 'type': 'int'},
'suite_id': {'key': 'suiteId', 'type': 'int'},
'test_case_id': {'key': 'testCaseId', 'type': 'int'}
}
def __init__(self, child_suite_id=None, sequence_number=None, suite_id=None, test_case_id=None):
super(SuiteEntry, self).__init__()
self.child_suite_id = child_suite_id
self.sequence_number = sequence_number
self.suite_id = suite_id
self.test_case_id = test_case_id
|
StarcoderdataPython
|
322006
|
<filename>istio/datadog_checks/istio/metrics.py
# (C) Datadog, Inc. 2020 - Present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
GENERIC_METRICS = {
'go_gc_duration_seconds': 'go.gc_duration_seconds',
'go_goroutines': 'go.goroutines',
'go_info': 'go.info',
'go_memstats_alloc_bytes': 'go.memstats.alloc_bytes',
'go_memstats_alloc_bytes_total': 'go.memstats.alloc_bytes_total',
'go_memstats_buck_hash_sys_bytes': 'go.memstats.buck_hash_sys_bytes',
'go_memstats_frees_total': 'go.memstats.frees_total',
'go_memstats_gc_cpu_fraction': 'go.memstats.gc_cpu_fraction',
'go_memstats_gc_sys_bytes': 'go.memstats.gc_sys_bytes',
'go_memstats_heap_alloc_bytes': 'go.memstats.heap_alloc_bytes',
'go_memstats_heap_idle_bytes': 'go.memstats.heap_idle_bytes',
'go_memstats_heap_inuse_bytes': 'go.memstats.heap_inuse_bytes',
'go_memstats_heap_objects': 'go.memstats.heap_objects',
'go_memstats_heap_released_bytes': 'go.memstats.heap_released_bytes',
'go_memstats_heap_sys_bytes': 'go.memstats.heap_sys_bytes',
'go_memstats_last_gc_time_seconds': 'go.memstats.last_gc_time_seconds',
'go_memstats_lookups_total': 'go.memstats.lookups_total',
'go_memstats_mallocs_total': 'go.memstats.mallocs_total',
'go_memstats_mcache_inuse_bytes': 'go.memstats.mcache_inuse_bytes',
'go_memstats_mcache_sys_bytes': 'go.memstats.mcache_sys_bytes',
'go_memstats_mspan_inuse_bytes': 'go.memstats.mspan_inuse_bytes',
'go_memstats_mspan_sys_bytes': 'go.memstats.mspan_sys_bytes',
'go_memstats_next_gc_bytes': 'go.memstats.next_gc_bytes',
'go_memstats_other_sys_bytes': 'go.memstats.other_sys_bytes',
'go_memstats_stack_inuse_bytes': 'go.memstats.stack_inuse_bytes',
'go_memstats_stack_sys_bytes': 'go.memstats.stack_sys_bytes',
'go_memstats_sys_bytes': 'go.memstats.sys_bytes',
'go_threads': 'go.threads',
'process_cpu_seconds_total': 'process.cpu_seconds_total',
'process_max_fds': 'process.max_fds',
'process_open_fds': 'process.open_fds',
'process_resident_memory_bytes': 'process.resident_memory_bytes',
'process_start_time_seconds': 'process.start_time_seconds',
'process_virtual_memory_bytes': 'process.virtual_memory_bytes',
}
CITADEL_METRICS = {
'citadel_secret_controller_csr_err_count': 'secret_controller.csr_err_count',
'citadel_secret_controller_secret_deleted_cert_count': ('secret_controller.secret_deleted_cert_count'),
'citadel_secret_controller_svc_acc_created_cert_count': ('secret_controller.svc_acc_created_cert_count'),
'citadel_secret_controller_svc_acc_deleted_cert_count': ('secret_controller.svc_acc_deleted_cert_count'),
'citadel_server_authentication_failure_count': 'server.authentication_failure_count',
'citadel_server_citadel_root_cert_expiry_timestamp': ('server.citadel_root_cert_expiry_timestamp'),
'citadel_server_csr_count': 'server.csr_count',
'citadel_server_csr_parsing_err_count': 'server.csr_parsing_err_count',
'citadel_server_id_extraction_err_count': 'server.id_extraction_err_count',
'citadel_server_success_cert_issuance_count': 'server.success_cert_issuance_count',
'citadel_server_root_cert_expiry_timestamp': 'server.root_cert_expiry_timestamp',
}
GALLEY_METRICS = {
'endpoint_no_pod': 'endpoint_no_pod',
'galley_mcp_source_clients_total': 'mcp_source.clients_total',
'galley_runtime_processor_event_span_duration_milliseconds': ('runtime_processor.event_span_duration_milliseconds'),
'galley_runtime_processor_events_processed_total': 'runtime_processor.events_processed_total',
'galley_runtime_processor_snapshot_events_total': 'runtime_processor.snapshot_events_total',
'galley_runtime_processor_snapshot_lifetime_duration_milliseconds': (
'runtime_processor.snapshot_lifetime_duration_milliseconds'
),
'galley_runtime_processor_snapshots_published_total': ('runtime_processor.snapshots_published_total'),
'galley_runtime_state_type_instances_total': 'runtime_state_type_instances_total',
'galley_runtime_strategy_on_change_total': 'runtime_strategy.on_change_total',
'galley_runtime_strategy_timer_max_time_reached_total': ('runtime_strategy.timer_max_time_reached_total'),
'galley_runtime_strategy_timer_quiesce_reached_total': 'runtime_strategy.quiesce_reached_total',
'galley_runtime_strategy_timer_resets_total': 'runtime_strategy.timer_resets_total',
'galley_source_kube_dynamic_converter_success_total': ('source_kube.dynamic_converter_success_total'),
'galley_source_kube_event_success_total': 'source_kube.event_success_total',
'galley_validation_cert_key_updates': 'validation.cert_key_updates',
'galley_validation_config_load': 'validation.config_load',
'galley_validation_config_updates': 'validation.config_update',
'galley_validation_passed': 'validation.passed',
# These metrics supported Istio 1.5
'galley_validation_config_update_error': 'validation.config_update_error',
}
MESH_METRICS = {
# These metrics support Istio 1.5
'istio_request_duration_milliseconds': 'request.duration.milliseconds',
# These metrics support Istio 1.0
'istio_requests_total': 'request.count',
'istio_request_duration_seconds': 'request.duration',
'istio_request_bytes': 'request.size',
'istio_response_bytes': 'response.size',
# These metrics support Istio 0.8
'istio_request_count': 'request.count',
'istio_request_duration': 'request.duration',
'istio_request_size': 'request.size',
'istio_response_size': 'response.size',
# TCP metrics
'istio_tcp_connections_closed_total': 'tcp.connections_closed.total',
'istio_tcp_connections_opened_total': 'tcp.connections_opened.total',
'istio_tcp_received_bytes_total': 'tcp.received_bytes.total',
'istio_tcp_sent_bytes_total': 'tcp.send_bytes.total',
}
MIXER_METRICS = {
# Pre 1.1 metrics
'grpc_server_handled_total': 'grpc.server.handled_total',
'grpc_server_handling_seconds': 'grpc.server.handling_seconds',
'grpc_server_msg_received_total': 'grpc.server.msg_received_total',
'grpc_server_msg_sent_total': 'grpc.server.msg_sent_total',
'grpc_server_started_total': 'grpc.server.started_total',
'mixer_adapter_dispatch_count': 'adapter.dispatch_count',
'mixer_adapter_dispatch_duration': 'adapter.dispatch_duration',
'mixer_adapter_old_dispatch_count': 'adapter.old_dispatch_count',
'mixer_adapter_old_dispatch_duration': 'adapter.old_dispatch_duration',
'mixer_config_resolve_actions': 'config.resolve_actions',
'mixer_config_resolve_count': 'config.resolve_count',
'mixer_config_resolve_duration': 'config.resolve_duration',
'mixer_config_resolve_rules': 'config.resolve_rules',
# 1.1 metrics
'grpc_io_server_completed_rpcs': 'grpc_io_server.completed_rpcs',
'grpc_io_server_received_bytes_per_rpc': 'grpc_io_server.received_bytes_per_rpc',
'grpc_io_server_sent_bytes_per_rpc': 'grpc_io_server.sent_bytes_per_rpc',
'grpc_io_server_server_latency': 'grpc_io_server.server_latency',
'mixer_config_attributes_total': 'config.attributes_total',
'mixer_config_handler_configs_total': 'config.handler_configs_total',
'mixer_config_instance_configs_total': 'config.instance_configs_total',
'mixer_config_rule_configs_total': 'config.rule_configs_total',
'mixer_dispatcher_destinations_per_request': 'dispatcher.destinations_per_request',
'mixer_dispatcher_instances_per_request': 'dispatcher.instances_per_request',
'mixer_handler_daemons_total': 'handler.daemons_total',
'mixer_handler_new_handlers_total': 'handler.new_handlers_total',
'mixer_mcp_sink_reconnections': 'mcp_sink.reconnections',
'mixer_mcp_sink_request_acks_total': 'mcp_sink.request_acks_total',
'mixer_runtime_dispatches_total': 'runtime.dispatches_total',
'mixer_runtime_dispatch_duration_seconds': 'runtime.dispatch_duration_seconds',
}
PILOT_METRICS = {
'pilot_conflict_inbound_listener': 'conflict.inbound_listener',
'pilot_conflict_outbound_listener_http_over_current_tcp': ('conflict.outbound_listener.http_over_current_tcp'),
'pilot_conflict_outbound_listener_tcp_over_current_http': ('conflict.outbound_listener.tcp_over_current_http'),
'pilot_conflict_outbound_listener_tcp_over_current_tcp': ('conflict.outbound_listener.tcp_over_current_tcp'),
'pilot_destrule_subsets': 'destrule_subsets',
'pilot_duplicate_envoy_clusters': 'duplicate_envoy_clusters',
'pilot_eds_no_instances': 'eds_no_instances',
'pilot_endpoint_not_ready': 'endpoint_not_ready',
'pilot_invalid_out_listeners': 'invalid_out_listeners',
'pilot_mcp_sink_reconnections': 'mcp_sink.reconnections',
'pilot_mcp_sink_recv_failures_total': 'mcp_sink.recv_failures_total',
'pilot_mcp_sink_request_acks_total': 'mcp_sink.request_acks_total',
'pilot_no_ip': 'no_ip',
'pilot_proxy_convergence_time': 'proxy_convergence_time',
'pilot_rds_expired_nonce': 'rds_expired_nonce',
'pilot_services': 'services',
'pilot_total_xds_internal_errors': 'total_xds_internal_errors',
'pilot_total_xds_rejects': 'total_xds_rejects',
'pilot_virt_services': 'virt_services',
'pilot_vservice_dup_domain': 'vservice_dup_domain',
'pilot_xds': 'xds',
'pilot_xds_eds_instances': 'xds.eds_instances',
'pilot_xds_push_context_errors': 'xds.push.context_errors',
'pilot_xds_push_timeout': 'xds.push.timeout',
'pilot_xds_push_timeout_failures': 'xds.push.timeout_failures',
'pilot_xds_pushes': 'xds.pushes',
'pilot_xds_write_timeout': 'xds.write_timeout',
'pilot_xds_rds_reject': 'pilot.xds.rds_reject',
'pilot_xds_eds_reject': 'pilot.xds.eds_reject',
'pilot_xds_cds_reject': 'pilot.xds.cds_reject',
'pilot_xds_lds_reject': 'pilot.xds.lds_reject',
}
ISTIOD_METRICS = {
# Maintain namespace compatibility from legacy components
# Generic metrics
'go_gc_duration_seconds': 'go.gc_duration_seconds',
'go_goroutines': 'go.goroutines',
'go_info': 'go.info',
'go_memstats_alloc_bytes': 'go.memstats.alloc_bytes',
'go_memstats_alloc_bytes_total': 'go.memstats.alloc_bytes_total',
'go_memstats_buck_hash_sys_bytes': 'go.memstats.buck_hash_sys_bytes',
'go_memstats_frees_total': 'go.memstats.frees_total',
'go_memstats_gc_cpu_fraction': 'go.memstats.gc_cpu_fraction',
'go_memstats_gc_sys_bytes': 'go.memstats.gc_sys_bytes',
'go_memstats_heap_alloc_bytes': 'go.memstats.heap_alloc_bytes',
'go_memstats_heap_idle_bytes': 'go.memstats.heap_idle_bytes',
'go_memstats_heap_inuse_bytes': 'go.memstats.heap_inuse_bytes',
'go_memstats_heap_objects': 'go.memstats.heap_objects',
'go_memstats_heap_released_bytes': 'go.memstats.heap_released_bytes',
'go_memstats_heap_sys_bytes': 'go.memstats.heap_sys_bytes',
'go_memstats_last_gc_time_seconds': 'go.memstats.last_gc_time_seconds',
'go_memstats_lookups_total': 'go.memstats.lookups_total',
'go_memstats_mallocs_total': 'go.memstats.mallocs_total',
'go_memstats_mcache_inuse_bytes': 'go.memstats.mcache_inuse_bytes',
'go_memstats_mcache_sys_bytes': 'go.memstats.mcache_sys_bytes',
'go_memstats_mspan_inuse_bytes': 'go.memstats.mspan_inuse_bytes',
'go_memstats_mspan_sys_bytes': 'go.memstats.mspan_sys_bytes',
'go_memstats_next_gc_bytes': 'go.memstats.next_gc_bytes',
'go_memstats_other_sys_bytes': 'go.memstats.other_sys_bytes',
'go_memstats_stack_inuse_bytes': 'go.memstats.stack_inuse_bytes',
'go_memstats_stack_sys_bytes': 'go.memstats.stack_sys_bytes',
'go_memstats_sys_bytes': 'go.memstats.sys_bytes',
'go_threads': 'go.threads',
'process_cpu_seconds_total': 'process.cpu_seconds_total',
'process_max_fds': 'process.max_fds',
'process_open_fds': 'process.open_fds',
'process_resident_memory_bytes': 'process.resident_memory_bytes',
'process_start_time_seconds': 'process.start_time_seconds',
'process_virtual_memory_bytes': 'process.virtual_memory_bytes',
'pilot_conflict_inbound_listener': 'pilot.conflict.inbound_listener',
'pilot_conflict_outbound_listener_http_over_current_tcp': (
'pilot.conflict.outbound_listener.http_over_current_tcp'
),
'pilot_conflict_outbound_listener_tcp_over_current_http': (
'pilot.conflict.outbound_listener.tcp_over_current_http'
),
'pilot_conflict_outbound_listener_tcp_over_current_tcp': ('pilot.conflict.outbound_listener.tcp_over_current_tcp'),
'pilot_destrule_subsets': 'pilot.destrule_subsets',
'pilot_duplicate_envoy_clusters': 'pilot.duplicate_envoy_clusters',
'pilot_eds_no_instances': 'pilot.eds_no_instances',
'pilot_endpoint_not_ready': 'pilot.endpoint_not_ready',
'pilot_invalid_out_listeners': 'pilot.invalid_out_listeners',
'pilot_mcp_sink_reconnections': 'pilot.mcp_sink.reconnections',
'pilot_mcp_sink_recv_failures_total': 'pilot.mcp_sink.recv_failures_total',
'pilot_mcp_sink_request_acks_total': 'pilot.mcp_sink.request_acks_total',
'pilot_no_ip': 'pilot.no_ip',
'pilot_proxy_convergence_time': 'pilot.proxy_convergence_time',
'pilot_rds_expired_nonce': 'pilot.rds_expired_nonce',
'pilot_services': 'pilot.services',
'pilot_total_xds_internal_errors': 'pilot.total_xds_internal_errors',
'pilot_total_xds_rejects': 'pilot.total_xds_rejects',
'pilot_virt_services': 'pilot.virt_services',
'pilot_vservice_dup_domain': 'pilot.vservice_dup_domain',
'pilot_xds': 'pilot.xds',
'pilot_xds_eds_instances': 'pilot.xds.eds_instances',
'pilot_xds_push_context_errors': 'pilot.xds.push.context_errors',
'pilot_xds_push_timeout': 'pilot.xds.push.timeout',
'pilot_xds_push_timeout_failures': 'pilot.xds.push.timeout_failures',
'pilot_xds_pushes': 'pilot.xds.pushes',
'pilot_xds_write_timeout': 'pilot.xds.write_timeout',
'pilot_xds_rds_reject': 'pilot.xds.rds_reject',
'pilot_xds_eds_reject': 'pilot.xds.eds_reject',
'pilot_xds_cds_reject': 'pilot.xds.cds_reject',
'pilot_xds_lds_reject': 'pilot.xds.lds_reject',
'grpc_server_handled_total': 'grpc.server.handled_total',
'grpc_server_handling_seconds': 'grpc.server.handling_seconds',
'grpc_server_msg_received_total': 'grpc.server.msg_received_total',
'grpc_server_msg_sent_total': 'grpc.server.msg_sent_total',
'grpc_server_started_total': 'grpc.server.started_total',
'grpc_io_server_completed_rpcs': 'mixer.grpc_io_server.completed_rpcs',
'grpc_io_server_received_bytes_per_rpc': 'mixer.grpc_io_server.received_bytes_per_rpc',
'grpc_io_server_sent_bytes_per_rpc': 'mixer.grpc_io_server.sent_bytes_per_rpc',
'grpc_io_server_server_latency': 'mixer.grpc_io_server.server_latency',
'mixer_config_attributes_total': 'mixer.config.attributes_total',
'mixer_config_handler_configs_total': 'mixer.config.handler_configs_total',
'mixer_config_instance_configs_total': 'mixer.config.instance_configs_total',
'mixer_config_rule_configs_total': 'mixer.config.rule_configs_total',
'mixer_dispatcher_destinations_per_request': 'mixer.dispatcher.destinations_per_request',
'mixer_dispatcher_instances_per_request': 'mixer.dispatcher.instances_per_request',
'mixer_handler_daemons_total': 'mixer.handler.daemons_total',
'mixer_handler_new_handlers_total': 'mixer.handler.new_handlers_total',
'mixer_mcp_sink_reconnections': 'mixer.mcp_sink.reconnections',
'mixer_mcp_sink_request_acks_total': 'mixer.mcp_sink.request_acks_total',
'mixer_runtime_dispatches_total': 'mixer.runtime.dispatches_total',
'mixer_runtime_dispatch_duration_seconds': 'mixer.runtime.dispatch_duration_seconds',
'endpoint_no_pod': 'galley.endpoint_no_pod',
'galley_mcp_source_clients_total': 'galley.mcp_source.clients_total',
'galley_runtime_processor_event_span_duration_milliseconds': (
'galley.runtime_processor.event_span_duration_milliseconds'
),
'galley_runtime_processor_events_processed_total': 'galley.runtime_processor.events_processed_total',
'galley_runtime_processor_snapshot_events_total': 'galley.runtime_processor.snapshot_events_total',
'galley_runtime_processor_snapshot_lifetime_duration_milliseconds': (
'galley.runtime_processor.snapshot_lifetime_duration_milliseconds'
),
'galley_runtime_processor_snapshots_published_total': ('galley.runtime_processor.snapshots_published_total'),
'galley_runtime_state_type_instances_total': 'galley.runtime_state_type_instances_total',
'galley_runtime_strategy_on_change_total': 'galley.runtime_strategy.on_change_total',
'galley_runtime_strategy_timer_max_time_reached_total': ('galley.runtime_strategy.timer_max_time_reached_total'),
'galley_runtime_strategy_timer_quiesce_reached_total': 'galley.runtime_strategy.quiesce_reached_total',
'galley_runtime_strategy_timer_resets_total': 'galley.runtime_strategy.timer_resets_total',
'galley_source_kube_dynamic_converter_success_total': ('galley.source_kube.dynamic_converter_success_total'),
'galley_source_kube_event_success_total': 'galley.source_kube.event_success_total',
'galley_validation_config_load': 'galley.validation.config_load',
'galley_validation_config_updates': 'galley.validation.config_update',
'citadel_secret_controller_csr_err_count': 'citadel.secret_controller.csr_err_count',
'citadel_secret_controller_secret_deleted_cert_count': ('citadel.secret_controller.secret_deleted_cert_count'),
'citadel_secret_controller_svc_acc_created_cert_count': ('citadel.secret_controller.svc_acc_created_cert_count'),
'citadel_secret_controller_svc_acc_deleted_cert_count': ('citadel.secret_controller.svc_acc_deleted_cert_count'),
'citadel_server_authentication_failure_count': 'citadel.server.authentication_failure_count',
'citadel_server_citadel_root_cert_expiry_timestamp': ('citadel.server.citadel_root_cert_expiry_timestamp'),
'citadel_server_csr_count': 'citadel.server.csr_count',
'citadel_server_csr_parsing_err_count': 'citadel.server.csr_parsing_err_count',
'citadel_server_id_extraction_err_count': 'citadel.server.id_extraction_err_count',
'citadel_server_success_cert_issuance_count': 'citadel.server.success_cert_issuance_count',
# These metrics supported Istio 1.5
'galley_validation_config_update_error': 'galley.validation.config_update_error',
'citadel_server_root_cert_expiry_timestamp': 'citadel.server.root_cert_expiry_timestamp',
'galley_validation_passed': 'galley.validation.passed',
'galley_validation_failed': 'galley.validation.failed',
'pilot_conflict_outbound_listener_http_over_https': 'pilot.conflict.outbound_listener.http_over_https',
'pilot_inbound_updates': 'pilot.inbound_updates',
'pilot_k8s_cfg_events': 'pilot.k8s.cfg_events',
'pilot_k8s_reg_events': 'pilot.k8s.reg_events',
'pilot_proxy_queue_time': 'pilot.proxy_queue_time',
'pilot_push_triggers': 'pilot.push.triggers',
'pilot_xds_eds_all_locality_endpoints': 'pilot.xds.eds_all_locality_endpoints',
'pilot_xds_push_time': 'pilot.xds.push.time',
'process_virtual_memory_max_bytes': 'process.virtual_memory_max_bytes',
'sidecar_injection_requests_total': 'sidecar_injection.requests_total',
'sidecar_injection_success_total': 'sidecar_injection.success_total',
'sidecar_injection_failure_total': 'sidecar_injection.failure_total',
'sidecar_injection_skip_total': 'sidecar_injection.skip_total',
}
|
StarcoderdataPython
|
8169604
|
# https://hackernoon.com/gradient-boosting-and-xgboost-90862daa6c77
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import classification_report
import numpy as np
import matplotlib.pyplot as plt
iris_dataset = load_iris()
# print(iris_dataset.data)
# print(iris_dataset.target_names)
# print(iris_dataset.target)
# print(iris_dataset.feature_names)
features = pd.DataFrame(iris_dataset.data)
features.columns = iris_dataset.feature_names
print(features.head())
labels = pd.DataFrame(iris_dataset.target)
print(labels.head())
print(features.info())
print(labels.info())
print("features size ", features.shape)
print("labels size ", labels.shape)
train_features, test_features, train_labels, test_labels = train_test_split(
features, labels, test_size=0.3, stratify=labels
)
print("train features size ", train_features.shape)
print("train labels size ", train_labels.shape)
print("test features size ", test_features.shape)
print("test labels size ", test_labels.shape)
# learning_rate=0.1, n_estimators=100, loss='deviance',
model = GradientBoostingClassifier()
model.fit(train_features, train_labels)
print("train score", model.score(train_features, train_labels))
print(classification_report(train_labels, model.predict(train_features)))
print("test score", model.score(test_features, test_labels))
print(classification_report(test_labels, model.predict(test_features)))
"""
train score 1.0
precision recall f1-score support
0 1.00 1.00 1.00 37
1 1.00 1.00 1.00 35
2 1.00 1.00 1.00 33
accuracy 1.00 105
macro avg 1.00 1.00 1.00 105
weighted avg 1.00 1.00 1.00 105
test score 0.9333333333333333
precision recall f1-score support
0 1.00 1.00 1.00 13
1 0.88 0.93 0.90 15
2 0.94 0.88 0.91 17
accuracy 0.93 45
macro avg 0.94 0.94 0.94 45
weighted avg 0.93 0.93 0.93 45
"""
X = iris_dataset.data[:, :2] # Sepal length, sepal width
y = iris_dataset.target
h = 0.02 # Step size
color_dict = {0: "blue", 1: "lightgreen", 2: "red"}
colors = [color_dict[i] for i in y]
model.fit(X, y)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
plt.figure()
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=colors)
plt.xlabel(iris_dataset.feature_names[0])
plt.ylabel(iris_dataset.feature_names[1])
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks()
plt.yticks()
plt.show()
|
StarcoderdataPython
|
6413789
|
<reponame>dave-tucker/hp-sdn-client
#!/usr/bin/env python
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
# Python3 compatibility
try:
import urllib.parse as urllib
except ImportError:
import urllib
def raise_errors(response):
if response.status_code == 400:
raise_400(response)
elif response.status_code == 404:
raise_404(response)
elif response.status_code == 500:
raise_500(response)
else:
# let requests raise the error
response.raise_for_status()
def raise_400(response):
data = response.json()
if "Invalid JSON format" in data['message']:
raise InvalidJson(response.request.url,
json.dumps(response.request.body),
data['message'])
elif "IllegalArgumentException" in data['error']:
raise IllegalArgument()
elif "VersionMismatchException" in data['error']:
dpid = urllib.unquote(response.request.url.split('/')[7])
required_version = data['message'][-3:]
raise VersionMismatch(dpid, required_version)
else:
response.raise_for_status()
def raise_404(response):
data = response.json()
if "NotFoundException" in data['error']:
raise NotFound(data['message'])
else:
response.raise_for_status()
def raise_500(response):
data = response.json()
if "IllegalStateException" in data['error']:
raise OpenflowProtocolError()
else:
response.raise_for_status()
class HpsdnclientError(Exception):
"""Base class for Flare API errors"""
pass
class InvalidJson(HpsdnclientError):
def __init__(self, url, request_body, message):
self.url = url
self.request_body = request_body
self.message = message
super(InvalidJson, self).__init__(message)
class VersionMismatch(HpsdnclientError):
def __init__(self, dpid, required_version):
self.dpid = dpid
self.required_version = required_version
message = ("This feature is not supported on DPID {0}. " +
"It requires OpenFlow version {1}").format(dpid,
required_version)
super(VersionMismatch, self).__init__(message)
class IllegalArgument(HpsdnclientError):
def __init__(self, arguments=None):
super(IllegalArgument, self).__init__()
self.arguments = arguments
class NotFound(HpsdnclientError):
def __init__(self, message):
super(NotFound, self).__init__(message)
class OpenflowProtocolError(HpsdnclientError):
def __init__(self):
message = ("Something bad happened at the OpenFlow protocol layer." +
" This could be because this feature is not implemented " +
"on this device")
super(OpenflowProtocolError, self).__init__(message)
class DatatypeError(HpsdnclientError):
def __init__(self, received, expected):
self.received = received
self.expected = expected
message = "Received: {0} Expected: {1}".format(received, expected)
super(DatatypeError, self).__init__(message)
|
StarcoderdataPython
|
8093794
|
<gh_stars>10-100
# ===============================================================================
# Copyright 2021 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from traits.api import Enum
from pychron.options.options import FigureOptions
from pychron.options.views.regression_views import VIEWS
from pychron.pychron_constants import MAIN, APPEARANCE
class RegressionOptions(FigureOptions):
regressor = Enum("NewYork")
def initialize(self):
self.subview_names = [MAIN, APPEARANCE]
def _get_subview(self, name):
return VIEWS[name]
# ============= EOF =============================================
|
StarcoderdataPython
|
3516081
|
<gh_stars>0
class ErrorLog:
def __init__(self, _servername, _timestamp, _type, _msg):
self.servername = _servername
self.timestamp = _timestamp
self.typ = _type
self.msg = _msg
def get_servername(self):
return self.servername
def get_timestamp(self):
# TODO
return self.timestamp
def get_type(self):
return self.typ
def get_msg(self):
return self.msg
|
StarcoderdataPython
|
5088990
|
import torch
def log_likelihood(nbhd_means, feature_means, feature_vars, k, past_comps = []):
# given neighborhood expression data, construct likelihood function
# should work in pytorch
n_samples = nbhd_means.shape[0]
nbhd_means = torch.tensor(nbhd_means).double()
feature_means = torch.tensor(feature_means).double()
feature_vars = torch.tensor(feature_vars).double()
def f(coefs):
coefs_orth = coefs
for i,comp in enumerate(past_comps):
#project orthogonally
coefs_orth = coefs_orth - torch.dot(coefs_orth, comp) * comp
observed_means = torch.matmul(nbhd_means, torch.reshape(coefs_orth, (-1, 1)))
# mean and variance of metagene defined by coef, under
# independent gene hypothesis
# variance gets scaled down due to averaging.
theoretical_mean = torch.dot(feature_means, coefs_orth)
theoretical_var = torch.div(torch.dot(torch.pow(coefs_orth, 2), feature_vars), float(k))
# print(theoretical_mean)
# print(theoretical_var)
result = (-1 * n_samples / 2.) * torch.log(theoretical_var) - torch.div(
torch.sum(torch.pow(observed_means - theoretical_mean, 2)),
2 * theoretical_var)
return (result)
return (f)
|
StarcoderdataPython
|
390346
|
from django.urls import path
from . import views as v
app_name = 'core'
urlpatterns = [
path('', v.index, name='index'),
path('form_submit', v.form_submit, name='form_submit'),
path('api/pokemon/<slug:slug>', v.get_pokemon, name='get_pokemon'),
]
|
StarcoderdataPython
|
47806
|
# encoding: utf-8
import os
import re
import sys
import gzip
import time
import json
import socket
import random
import weakref
import datetime
import functools
import threading
import collections
import urllib.error
import urllib.parse
import urllib.request
import collections.abc
import json_dict
from . import utils
class ProxyURLRefreshError(Exception):
pass
class AliveProxiesNotFound(Exception):
pass
class NoFreeProxies(Exception):
pass
def _get_missing(target, source):
"""Возвращает присутствующие в `target`, но отсутствующие в `source` элементы
"""
old_target = set(target)
new_target = old_target.intersection(source)
return old_target.difference(new_target)
def _build_opener(proxy=None):
if proxy is not None:
parsed = urllib.parse.urlparse(proxy)
handler = urllib.request.ProxyHandler({parsed.scheme: proxy})
return urllib.request.build_opener(handler)
else:
return urllib.request.build_opener()
class Proxies:
default_opener = _build_opener()
def __init__(
self,
proxies=None,
proxies_url=None,
proxies_url_gateway=None,
proxies_file=None,
options=None,
):
"""
@param proxies: список адресов прокси-серверов
@param proxies_url: ссылка на список прокси-серверов
@param proxies_file: путь до файла со списком прокси-серверов
@param options: доп. параметры
"""
if options is None:
options = {}
shuffle = options.get('shuffle', False)
if proxies is not None:
proxies = list(proxies)
if shuffle:
random.shuffle(proxies)
auto_refresh_period = options.get('auto_refresh_period')
if auto_refresh_period:
auto_refresh_period = datetime.timedelta(**auto_refresh_period)
blacklist = utils.get_json_dict(json_dict.JsonLastUpdatedOrderedDict, filename=options.get('blacklist'))
cooling_down = utils.get_json_dict(json_dict.JsonOrderedDict, filename=options.get('cooldown'))
stats = utils.get_json_dict(json_dict.JsonDict, filename=options.get('stats'))
if proxies_url_gateway:
url_opener = _build_opener(proxies_url_gateway)
else:
url_opener = None
self._url_opener = url_opener
self._proxies = proxies
self.proxies_url = proxies_url
self.proxies_file = proxies_file
self._shuffle = shuffle
self.slice = options.get('slice')
self.force_type = options.get('type')
self.auto_refresh_period = auto_refresh_period
self._blacklist = blacklist
self._cooling_down = cooling_down
self._stats = stats
self._cleanup_lock = threading.RLock()
self._last_auto_refresh = None
self._auto_refresh_lock = threading.Lock()
self._load_lock = threading.Lock()
self._modified_at = time.perf_counter()
self.__pool = None
self._smart_holdout_start = options.get('smart_holdout_start')
self._options = options
if self._proxies is not None:
proxies = set(self._proxies)
self._cleanup_internals(proxies)
@property
def proxies(self):
if self._proxies is None:
with self._load_lock:
# Вышли из состояния гонки, теперь можно удостовериться в реальной необходимости
if self._proxies is None:
self._proxies = self._load()
self._cleanup_internals(self._proxies)
self._modified_at = time.perf_counter()
return self._proxies
def _load(self):
if self.proxies_url:
proxies = self.read_url(self.proxies_url, opener=self._url_opener)
elif self.proxies_file:
proxies = self.read_file(self.proxies_file)
else:
raise NotImplementedError(
"Can't load proxies: "
"please specify one of the sources ('proxies_url' or 'proxies_file')"
)
if self.slice:
proxies = proxies[slice(*self.slice)]
if self.force_type:
new_type = self.force_type + '://' # `socks` format
proxies = [
re.sub(r'^(?:(.*?)://)?', new_type, proxy)
for proxy in proxies
]
if self._shuffle:
random.shuffle(proxies)
return proxies
def _cleanup_internals(self, proxies):
with self._cleanup_lock:
self._cleanup_blacklist(proxies)
self._cleanup_cooling_down(proxies)
self._cleanup_stats(proxies)
def _cleanup_cooling_down(self, proxies):
for proxy in _get_missing(self._cooling_down, proxies):
self._cooling_down.pop(proxy)
def _cleanup_blacklist(self, proxies):
for proxy in _get_missing(self._blacklist, proxies):
self._blacklist.pop(proxy)
def _cleanup_stats(self, proxies):
for proxy in _get_missing(self._stats, proxies):
self._stats.pop(proxy)
def _get_options(self, *options, missing_ok=True):
if missing_ok:
return {k: self._options.get(k) for k in options}
else:
return {k: self._options[k] for k in options}
@classmethod
def read_string(cls, string, sep=','):
return list(x for x in map(str.strip, string.split(sep)) if x)
@classmethod
def read_url(cls, url, sep='\n', retry=10, sleep_range=(2, 10), timeout=2, opener=None):
if opener is None:
opener = cls.default_opener
while True:
try:
resp = opener.open(url, timeout=timeout)
break
except (urllib.error.HTTPError, socket.timeout):
if not retry:
raise
retry -= 1
time.sleep(random.randint(*sleep_range))
content = resp.read()
if resp.headers.get('Content-Encoding', 'identity') == 'gzip':
content = gzip.decompress(content)
charset = resp.headers.get_content_charset('utf-8')
content = content.decode(charset)
return cls.read_string(content, sep=sep)
@classmethod
def read_file(cls, file_name, sep='\n'):
with open(file_name) as f:
return cls.read_string(f.read(), sep=sep)
def refresh(self):
if not self.proxies_url and not self.proxies_file:
return
try:
self._proxies = self._load()
self._cleanup_internals(self._proxies)
except urllib.error.HTTPError:
import problems
problems.handle(ProxyURLRefreshError, extra={'url': self.proxies_url})
else:
self._modified_at = time.perf_counter()
def _auto_refresh(self):
if self.proxies_file:
with self._auto_refresh_lock:
modification_time = datetime.datetime.fromtimestamp(os.stat(self.proxies_file).st_mtime)
if modification_time == self._last_auto_refresh:
return
self.refresh()
self._last_auto_refresh = modification_time
elif self.proxies_url:
if self.auto_refresh_period is None:
return
with self._auto_refresh_lock:
now = datetime.datetime.now()
if self._last_auto_refresh is not None:
if now - self._last_auto_refresh < self.auto_refresh_period:
return
self.refresh()
self._last_auto_refresh = now
def get_random_address(self):
self._auto_refresh()
return random.choice(self.proxies)
def get_pool(self):
if self.__pool is None:
with self._cleanup_lock: # оптимизация: используем уже существующий лок
# Вышли из состояния гонки, теперь можно удостовериться в реальной необходимости
if self.__pool is None:
options = self._get_options('default_holdout', 'default_bad_holdout', 'force_defaults')
if self._smart_holdout_start is not None:
options['smart_holdout'] = True
options['smart_holdout_start'] = self._smart_holdout_start
options.update(self._get_options('smart_holdout_min', 'smart_holdout_max'))
self.__pool = _Pool(
self, self._cooling_down, self._blacklist, self._stats, self._cleanup_lock,
**options
)
return self.__pool
@classmethod
def from_cfg_string(cls, cfg_string):
"""Возвращает список прокси с тем исключением что список опций берется автоматически.
Формат: json
Доступные опции:
type ('socks5', 'http'; для полного списка типов см. модуль socks):
все прокси будут автоматически промаркированы этип типом
slice (tuple c аргументами для builtins.slice):
будет взят только указанный фрагмент списка прокси-серверов
auto_refresh_period (dict): {'days': ..., 'hours': ..., 'minutes': ...}
как часто необходимо обновлять список прокси-серверов (только для `url` и `file`)
url_gateway:
адрес proxy, через которые будет загружаться список прокси по url
(url, file, list) - может быть именем файла, ссылкой или списком в формате json
Параметры slice и force_type являются необязательными
Примеры:
option = {"list": ["127.0.0.1:3128"]}
option = {"list": ["127.0.0.1:3128", "127.0.0.1:9999"]}
option = {"file": "./my_new_proxies.txt", "type": "socks5"}
option = {"url": "http://example.com/get/proxy_list/", "slice": [35, null], "type": "http"}
option = {"url": "http://example.com/get/proxy_list/", "auto_refresh_period": {"days": 1}}
option = {"url": "http://example.com/get/proxy_list/", "url_gateway": "http://proxy.example.com:9999"}
"""
cfg = json.loads(cfg_string)
proxies = cfg.pop('list', None)
proxies_url = cfg.pop('url', None)
proxies_url_gateway = cfg.pop('url_gateway', None)
proxies_file = cfg.pop('file', None)
return cls(
proxies=proxies,
proxies_url=proxies_url,
proxies_url_gateway=proxies_url_gateway,
proxies_file=proxies_file,
options=cfg
)
class _Pool:
def __init__(
self, proxies: "`Proxies` instance", cooling_down, blacklist, stats, _cleanup_lock=None,
smart_holdout=False, smart_holdout_start=None, smart_holdout_min=None, smart_holdout_max=None,
default_holdout=None, default_bad_holdout=None, force_defaults=False,
):
if smart_holdout:
if smart_holdout_start in (None, 0):
raise RuntimeError("Вы должны указать начальное время охлаждения")
if smart_holdout_max is None:
smart_holdout_max = float('inf')
self._used = set()
self._cond = threading.Condition(lock=_cleanup_lock)
self._free = collections.deque(
p for p in proxies.proxies
if (
p not in blacklist and
p not in cooling_down
)
)
self._proxies = proxies
self._cooling_down = cooling_down
self._blacklist = blacklist
self._stats = stats
self._smart_holdout = smart_holdout
self._smart_holdout_start = smart_holdout_start
self._smart_holdout_min = smart_holdout_min or 0
self._smart_holdout_max = smart_holdout_max
self._default_holdout = default_holdout
self._default_bad_holdout = default_bad_holdout
self._force_defaults = force_defaults
self._proxies_modified_at = proxies._modified_at
@property
def _size(self):
return len(self._free) + len(self._used) + len(self._cooling_down) + len(self._blacklist)
def _cool_released(self):
now = time.time()
cooled = []
for proxy, holdout in self._cooling_down.items():
if now >= holdout:
cooled.append(proxy)
for proxy in cooled:
self._cooling_down.pop(proxy, None)
if proxy not in self._blacklist:
self._free.append(proxy)
def _is_proxies_changed(self):
self._proxies._auto_refresh()
return self._proxies._modified_at != self._proxies_modified_at
def _remove_outdated(self):
# список прокси изменился, оставляем только актуальные
full_list = set(self._proxies.proxies)
for proxy in _get_missing(self._blacklist, full_list):
self._blacklist.pop(proxy, None)
for proxy in _get_missing(self._cooling_down, full_list):
self._cooling_down.pop(proxy, None)
for proxy in _get_missing(self._used, full_list):
self._used.remove(proxy)
for proxy in _get_missing(self._stats, full_list):
self._stats.pop(proxy, None)
free = set(
p for p in full_list
if (
p not in self._used and
p not in self._blacklist and
p not in self._cooling_down
)
)
old_free = set(self._free)
new_free = old_free.intersection(free)
if old_free.difference(new_free):
self._free.clear()
self._free.extend(new_free)
self._proxies_modified_at = self._proxies._modified_at
def _update_stats(self, proxy, bad=False, holdout=None):
proxy_stat = self._stats.get(proxy) or {}
ok, fail = proxy_stat.get('uptime', (0, 0))
if not bad:
ok += 1
else:
fail += 1
proxy_stat['uptime'] = ok, fail
proxy_stat['last_holdout'] = holdout
if (
not bad or
(
holdout is not None and
holdout >= (proxy_stat.get('last_good_holdout') or 0)
)
):
proxy_stat['last_good_holdout'] = holdout
# универсальный способ сказать что статистика обновилась
# тк без вызова метода .save будет работать и с обычным словарем (не только с JsonDict)
self._stats[proxy] = proxy_stat
def _get_next_holdout(self, proxy, bad=False):
"""Рассчитывает время охлаждения.
@param proxy: прокси, для которого необходимо вычислить
@param bad: True - вычисляем охлаждение для неудачи, иначе False
@return: рекомендуемое время охлаждения в секундах или None, если недостаточно данных
"""
# Алгоритм основан на бинарном поиске,
# в отличии от которого нам не известна верхняя граница
proxy_stat = self._stats.get(proxy)
if proxy_stat is None:
return None
last_holdout = proxy_stat['last_holdout']
last_good_holdout = proxy_stat.get('last_good_holdout', 0)
lo = last_holdout # предыдущее время охлаждения (нижняя граница)
if bad:
# Мы получили "бан" ...
if lo < last_good_holdout:
# ... возвращаемся к предыдущему хорошему значению ...
holdout = last_good_holdout
else:
# ... или сдвигаем границу дальше
holdout = lo * 2
else:
# возвращаемся к предыдущей границе (lo / 2)
# но с небольшим отступом - на середину отрезка [(lo / 2), lo]
holdout = lo * 0.75
return holdout
def acquire(self, timeout=None):
start = time.perf_counter()
with self._cond:
while True:
if self._is_proxies_changed():
self._remove_outdated()
self._cool_released()
if self._free:
proxy = self._free.popleft()
self._used.add(proxy)
return proxy
if self._blacklist:
# Возвращаем самый стабильный из блеклиста. Возможно бан снят.
def _uptime(p):
uptime = float('inf')
p_stat = self._stats.get(p)
if p_stat is not None:
ok, failed = p_stat.get('uptime', (0, 0))
if failed != 0:
uptime = ok // failed
else:
uptime = ok
return uptime
proxy = next((
p for p in sorted(self._blacklist, key=_uptime, reverse=True)
if p not in self._cooling_down
), None)
if proxy is not None:
self._blacklist.pop(proxy)
self._used.add(proxy)
return proxy
else:
# Все прокси из блеклиста находятся на охлаждении
pass
if self._cooling_down:
self._cond.wait(1)
else:
self._cond.wait(timeout)
if timeout is not None:
if time.perf_counter() - start > timeout:
raise NoFreeProxies
def release(self, proxy, bad=False, holdout=None, bad_reason=None):
"""Возвращает прокси в пул
@param proxy: прокси
@param holdout (сек): None - вернуть сразу, иначе прокси не будет использован до истечения указанного интервала
"""
with self._cond:
is_outdated = proxy not in self._used
if is_outdated:
# Скорее всего прокси уже не актуален
# И был удален из списка
return
self._used.remove(proxy)
if holdout is None or self._force_defaults:
holdout = self._default_holdout if not bad else self._default_bad_holdout
if self._smart_holdout:
_holdout = (
self._get_next_holdout(proxy, bad=bad) or
holdout or
self._smart_holdout_start
)
# Не позволяем границе опуститься слишком низко
if _holdout < self._smart_holdout_min:
holdout = self._smart_holdout_min
elif _holdout > self._smart_holdout_max:
holdout = self._smart_holdout_max
else:
holdout = max(self._smart_holdout_min, _holdout)
if holdout is not None:
self._cooling_down[proxy] = time.time() + holdout
if bad:
self._blacklist[proxy] = bad_reason
elif holdout is None:
# прокси не требует остывания
self._free.append(proxy)
self._cond.notify()
self._update_stats(proxy, bad=bad, holdout=holdout)
class IChain:
def switch(self, bad=False, holdout=None, bad_reason=None, lazy=False):
raise NotImplementedError
def get_adapter(self):
raise NotImplementedError
def get_handler(self):
raise NotImplementedError
def get_path(self):
raise NotImplementedError
def wrap_session(self, session):
raise NotImplementedError
def wrap_module(self, module, all_threads=False):
"""
Attempts to replace a module's socket library with a SOCKS socket.
This will only work on modules that import socket directly into the
namespace; most of the Python Standard Library falls into this category.
"""
import socks
import socks.monkey_socket
routes = socks.RoutingTable.from_addresses(self.get_path())
if not all_threads:
socks.monkey_socket.socks_wrap_module_thread(routes, module)
else:
socks.monkey_socket.socks_wrap_module_global(routes, module)
class Chain(IChain):
"""
Не является потокобезопасным.
"""
def __init__(self, proxies, proxy_gw=None, use_pool=False, pool_acquire_timeout=None):
"""
@param proxies: список адресов прокси-серверов
@param proxy_gw: прокси-сервер, который должен стоять во главе цепочки
(все запросы к другим прокси-серверам будут проходить через него)
@param use_pool: использовать список прокси в качестве пула
@param pool_acquire_timeout (сек.): если за указанный период не удастся получить свободный прокси
будет брошено исключение `NoFreeProxies`, None - ждать до появления свободного адреса
"""
if not isinstance(proxies, Proxies) and isinstance(proxies, collections.Sequence):
proxies = Proxies(proxies)
if use_pool:
pool = proxies.get_pool()
else:
pool = None
self.proxies = proxies
self.proxy_gw = proxy_gw
self._proxies_pool = pool
self._current_pool_proxy = None
self._pool_acquire_timeout = pool_acquire_timeout
self.__path = []
# fix http://bugs.python.org/issue23841
if sys.version_info >= (3, 4, 0):
self.finalizer = weakref.finalize(self, self.finalize)
else:
self.finalizer = None
def __del__(self):
if self.finalizer is None:
self.finalize()
def finalize(self):
if self._proxies_pool is not None:
self._release_pool_proxy()
def _build_path(self, proxy):
path = []
if self.proxy_gw:
path.append(self.proxy_gw)
path.append(proxy)
return path
def _release_pool_proxy(self, bad=False, holdout=None, bad_reason=None):
if self._current_pool_proxy:
proxy = self._current_pool_proxy
self._current_pool_proxy = None
self._proxies_pool.release(proxy, bad=bad, holdout=holdout, bad_reason=bad_reason)
def _acquire_pool_proxy(self):
proxy = self._proxies_pool.acquire(timeout=self._pool_acquire_timeout)
self._current_pool_proxy = proxy
return proxy
def _get_proxy(self):
if self._proxies_pool is not None:
return self._acquire_pool_proxy()
else:
return self.proxies.get_random_address()
@property
def _path(self):
if not self.__path:
self.__path = self._build_path(self._get_proxy())
return self.__path
def get_path(self):
return self._path
def switch(self, bad=False, holdout=None, bad_reason=None, lazy=False):
self.__path.clear()
if self._proxies_pool is not None:
self._release_pool_proxy(bad, holdout, bad_reason)
if not lazy:
self.__path = self._build_path(self._get_proxy())
def get_adapter(self):
import socks.adapters
return socks.adapters.ChainedProxyHTTPAdapter(chain=self._path)
def get_handler(self):
import socks.handlers
return socks.handlers.ChainProxyHandler(chain=self._path)
def wrap_session(self, session):
adapter = self.get_adapter()
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
@classmethod
def from_config(cls, cfg):
proxy_cfg_string = cfg.get('Прокси')
if proxy_cfg_string is None:
return None
proxy_gw = cfg.get('Шлюз')
proxies = Proxies.from_cfg_string(proxy_cfg_string)
return cls(proxies, proxy_gw=proxy_gw)
class MultiChain(IChain):
def __init__(self, *proxies_all, use_pool=True, pool_acquire_timeout=None):
if use_pool:
pool_kw = {'use_pool': True, 'pool_acquire_timeout': 1}
else:
pool_kw = {}
self._pool_acquire_timeout = pool_acquire_timeout
self._chains = collections.deque(
Chain(p, gw, **pool_kw)
for p, gw in self._unwrap_proxies_all(proxies_all)
)
@staticmethod
def _unwrap_proxies_all(proxies_all):
for p in proxies_all:
if isinstance(p, tuple):
# (Proxies, Gateway)
p, gw = p
else:
# Proxies
p, gw = p, None
yield p, gw
def _self_auto_rotate(func):
@functools.wraps(func)
def wrapped(self, *args, **kw):
start = time.perf_counter()
while True:
try:
return func(self, *args, **kw)
except NoFreeProxies:
self._rotate() # FIXME: cycle rotate is normal?
if (
self._pool_acquire_timeout is not None and
time.perf_counter() - start > self._pool_acquire_timeout
):
raise
return wrapped
@property
def _current(self):
return self._chains[-1]
def get_path(self):
return self._current.get_path()
def _rotate(self):
self._chains.rotate(1)
def switch(self, bad=False, holdout=None, bad_reason=None, lazy=False):
self._current.switch(bad=bad, holdout=holdout, bad_reason=bad_reason, lazy=True)
self._rotate()
if not lazy:
self._enforce_current_path_build()
@_self_auto_rotate
def _enforce_current_path_build(self):
_ = self._current._path # FIXME: ugly enforce path building after switching
@_self_auto_rotate
def get_adapter(self):
return self._current.get_adapter()
@_self_auto_rotate
def get_handler(self):
return self._current.get_handler()
@_self_auto_rotate
def wrap_session(self, session):
return self._current.wrap_session(session)
@_self_auto_rotate
def wrap_module(self, module):
return self._current.wrap_module(module)
|
StarcoderdataPython
|
6528648
|
<filename>python_examples/util.py
from itertools import islice
import numpy as np
def data_generator(files, batch_size, n_classes):
while 1:
lines = []
for file in files:
with open(file,'r',encoding='utf-8') as f:
header = f.readline() # ignore the header
while True:
temp = len(lines)
lines += list(islice(f,batch_size-temp))
if len(lines)!=batch_size:
break
idxs = []
vals = []
##
y_idxs = []
y_vals = []
y_batch = np.zeros([batch_size,n_classes], dtype=float)
count = 0
for line in lines:
itms = line.strip().split(' ')
##
y_idxs = [int(itm) for itm in itms[0].split(',')]
for i in range(len(y_idxs)):
y_batch[count,y_idxs[i]] = 1.0/len(y_idxs)
# y_batch[count,y_idxs[i]] = 1.0
##
idxs += [(count,int(itm.split(':')[0])) for itm in itms[1:]]
vals += [float(itm.split(':')[1]) for itm in itms[1:]]
count += 1
lines = []
yield (idxs, vals, y_batch)
def data_generator_tst(files, batch_size):
while 1:
lines = []
for file in files:
with open(file,'r',encoding='utf-8') as f:
header = f.readline() # ignore the header
while True:
temp = len(lines)
lines += list(islice(f,batch_size-temp))
if len(lines)!=batch_size:
break
idxs = []
vals = []
##
y_batch = [None for i in range(len(lines))]
count = 0
for line in lines:
itms = line.strip().split(' ')
##
y_batch[count] = [int(itm) for itm in itms[0].split(',')]
##
idxs += [(count,int(itm.split(':')[0])) for itm in itms[1:]]
vals += [float(itm.split(':')[1]) for itm in itms[1:]]
count += 1
lines = []
yield (idxs, vals, y_batch)
|
StarcoderdataPython
|
1662948
|
import re
import random
import requests
import table
import user_agent_list
from bs4 import BeautifulSoup
class HtmlPage:
user_agent_number = 7345
def __init__(self, url):
self.url = url
def get_html(self, creds, proxy_pass):
have_a_try = 3
if not proxy_pass:
while have_a_try:
t = table.Table('proxy_list', creds=creds)
user_agent = user_agent_list.get_user_agent(int(random.random() * self.user_agent_number))
user_agent_dict = {'user-agent': user_agent}
table_exist = t.table_check()
if not table_exist:
print("Proxy table corrupted.")
return False
tab_length = t.table_len()
try:
proxy = t.table_read(int(random.random() * (tab_length[0] - 1)) + 1)
proxy_dict = {proxy[1]: proxy[2]}
except TypeError:
print("Fatal error in proxy list.")
return False
try:
result = requests.get(str.rstrip(self.url), headers=user_agent_dict, proxies=proxy_dict)
result.raise_for_status()
return result.text
except(requests.RequestException, ValueError):
print("Bad proxy. One more try.")
have_a_try -= 1
print("Network error. Update proxy list.")
else:
while have_a_try:
try:
result = requests.get(str.rstrip(self.url))
result.raise_for_status()
return result.text
except(requests.RequestException, ValueError):
have_a_try -= 1
print("Network error. Can't get html.")
return False
def get_wb_page(self, creds, proxy_pass):
html = self.get_html(creds, proxy_pass)
if html:
soup = BeautifulSoup(html, 'html.parser')
articles = {}
for index in soup.findAll('div', class_="dtList i-dtList j-card-item"):
article_number = re.search(r'\d+', index.get('data-catalogercod1s'))
articles[article_number[0]] = index.find('a')['href']
return articles
return False
|
StarcoderdataPython
|
12822
|
from django.urls import reverse
from rest_framework import status
from .base import BaseTestCase
class FollowTestCase(BaseTestCase):
"""Testcases for following a user."""
def test_follow_user_post(self):
"""Test start following a user."""
url = reverse('follow', kwargs={'username': 'test2'})
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth_header)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_follow_already_followed_user(self):
"""Test start following a user you already follow."""
url = reverse('follow', kwargs={'username': 'test2'})
self.client.post(url, HTTP_AUTHORIZATION=self.auth_header)
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth_header)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_follow_missing_user_post(self):
"""Test trying to start following a missing user."""
url = reverse('follow', kwargs={'username': 'joel'})
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth_header)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_follow(self):
"""Test unfollowing a user"""
url = reverse('follow', kwargs={'username': 'test2'})
self.client.post(url, HTTP_AUTHORIZATION=self.auth_header)
response = self.client.delete(url, HTTP_AUTHORIZATION=self.auth_header)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_delete_follow_of_not_followed_user(self):
"""Test unfollowing a user you are not following"""
url = reverse('follow', kwargs={'username': 'test2'})
response = self.client.delete(url, HTTP_AUTHORIZATION=self.auth_header)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_list_followers_of_user(self):
"""Test list followers of a user"""
url_followers = reverse('getfollowers', kwargs={'username': 'test2'})
self.client.get(url_followers, HTTP_AUTHORIZATION=self.auth_header)
url_follow = reverse('follow', kwargs={'username': 'test2'})
self.client.post(url_follow, HTTP_AUTHORIZATION=self.auth_header)
response = self.client.get(url_followers, HTTP_AUTHORIZATION=self.auth_header)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_list_user_is_following(self):
"""Test list users the user is following"""
url_following = reverse('getfollowing', kwargs={'username': 'test1'})
self.client.get(url_following, HTTP_AUTHORIZATION=self.auth_header)
url_follow = reverse('follow', kwargs={'username': 'test2'})
self.client.post(url_follow, HTTP_AUTHORIZATION=self.auth_header)
response = self.client.get(url_following, HTTP_AUTHORIZATION=self.auth_header)
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
StarcoderdataPython
|
9676225
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: <NAME>
# Description : FFT Baseline Correction
import sys, os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, SpanSelector
from matplotlib import gridspec
import scipy.fftpack
from datetime import datetime
from dateutil.parser import parse
plt.rcParams['toolbar'] = 'None'
def calculate_y_limits(ys):
ymin = np.nanmin(ys)
ymax = np.nanmax(ys)
ydiff = ymax-ymin
return((ymin-0.1*ydiff, ymax+0.1*ydiff))
def crawl_info_file(filename):
with open(filename, "r") as file:
data = file.read()
blocks = [block.split("\n") for block in data.split("\n\n")]
db_dict={}
db_dict["filename"] = os.path.basename(filename)
if db_dict["filename"].find("fat.")!=-1:
db_dict["status"] = 0
elif db_dict["filename"][0]=="B":
db_dict["status"] = +2
else:
db_dict["status"] = +1
#Block 0
#General
db_dict["experimentalist"] = blocks[0][0].split(" measured",1)[0]
db_dict["molecule"] = blocks[0][0].split(": ",1)[1]
[db_dict["datestart"],db_dict["datestop"]]=blocks[0][1].replace("Measurement started on ","").split(" and finished at ")
db_dict["measurementtype"] = blocks[0][2].split(": ",1)[1]
for i in ["datestart", "datestop"]:
db_dict[i]=str(parse(db_dict[i]))
#Block 1
#Probe Synthesizer
db_dict["freqstart"] = float(blocks[1][1].split(": ",1)[1].replace("MHz",""))
db_dict["freqcenter"] = float(blocks[1][2].split(": ",1)[1].replace("MHz",""))
db_dict["freqstop"] = float(blocks[1][3].split(": ",1)[1].replace("MHz",""))
db_dict["span"] = float(blocks[1][4].split(": ",1)[1].replace("MHz",""))
db_dict["steps"] = float(blocks[1][5].split(": ",1)[1])
db_dict["stepsize"] = float(blocks[1][6].split(": ",1)[1].replace("kHz",""))
db_dict["datapoints"] = float(blocks[1][7].split(": ",1)[1])
db_dict["mfactor"] = float(blocks[1][8].split(": ",1)[1])
db_dict["fmdeviation"] = float(blocks[1][9].split(": ",1)[1].replace("kHz/V",""))
db_dict["probepower"] = float(blocks[1][10].split(": ",1)[1].replace("dBm",""))
#Block 2
#Pump Synthesizer
#Block 3
#Lock In
db_dict["delaytime"] = float(blocks[3][1].split(": ",1)[1].replace("ms",""))
db_dict["timeconstant"] = float(blocks[3][2].split(": ",1)[1].replace("ms",""))
db_dict["averagedpoints"]= float(blocks[3][3].split(": ",1)[1])
db_dict["averagediter"] = float(blocks[3][4].split(": ",1)[1])
db_dict["oscfreq"] = float(blocks[3][5].split(": ",1)[1].replace("Hz",""))
db_dict["oscamplitude"] = float(blocks[3][6].split(": ",1)[1].replace("V",""))
db_dict["ADCslope"] = float(blocks[3][7].split(": ",1)[1].replace("dB/ocatve",""))
db_dict["ACgain"] = float(blocks[3][8].split(": ",1)[1].replace("dB",""))
#Block 3
#Pressure
db_dict["totFMmod"] = float(blocks[4][0].split("= ",1)[1].replace("kHz",""))
if blocks[4][1].split(": ",1)[0]=="Pressure":
db_dict["pressurestart"]= "pressure not available"
db_dict["pressureend"] = "pressure not available"
else:
db_dict["pressurestart"]= blocks[4][1].split(": ",1)[1].replace("mbar","")
db_dict["pressureend"] = blocks[4][2].split(": ",1)[1].replace("mbar","")
for i in ("pressurestart", "pressureend"):
if db_dict[i].find("pressure not available")!=-1:
db_dict[i]=-1
else:
db_dict[i]=float(db_dict[i])
return(db_dict)
def decrease_standingwave(data_in, save_data):
global xs, ys, ys_corr, duration, filename, current_index, x_range, data
data = data_in
def onclick(event):
if event.inaxes == ax1:
if event.button == 1:
if isinstance(event.xdata,np.float64):
cut_off_slider.set_val(event.xdata)
def onzoom(vmin, vmax, i):
global x_range, fft_range
if vmin == vmax:
return
elif i < 2:
fft_range = [vmin, vmax]
else:
x_range = [vmin, vmax]
update_plot(rescale = False)
def press(key):
global xs, ys, ys_corr, duration, filename, current_index, data
if key=="left":
cut_off_slider.set_val(cut_off_slider.val-0.01)
elif key=="shift+left":
cut_off_slider.set_val(cut_off_slider.val-0.1)
elif key=="right":
cut_off_slider.set_val(cut_off_slider.val+0.01)
elif key=="shift+right":
cut_off_slider.set_val(cut_off_slider.val+0.1)
elif key=="up":
cut_off_slider.set_val(cut_off_slider.val+0.15)
elif key=="shift+up":
cut_off_slider.set_val(cut_off_slider.val+0.2)
elif key=="down":
cut_off_slider.set_val(cut_off_slider.val-0.15)
elif key=="shift+down":
cut_off_slider.set_val(cut_off_slider.val-0.2)
elif key in [" ", "space", "enter"]:
save_data(xs, ys_corr, filename)
current_index += 1
if current_index >= len(data):
current_index = len(data)-1
update_plot()
elif key in ["ctrl+left"]:
current_index -= 1
if current_index < 0:
current_index = 0
update_plot()
elif key in ["ctrl+q"]:
# fig.canvas.mpl_disconnect(cid_1)
fig.canvas.mpl_disconnect(cid_2)
plt.close()
elif key in ["escape", "ctrl+right"]:
current_index += 1
if current_index >= len(data):
current_index = len(data)-1
update_plot()
elif key in ["ctrl+r"]:
update_plot()
elif key in ["ctrl+s"]:
try:
import tkinter as tk
from tkinter import filedialog
root = tk.Tk()
root.withdraw()
filename = filedialog.asksaveasfilename()
root.destroy()
except Exception as E:
print(str(E))
filename = input("Enter filename: ")
plt.savefig(filename)
elif key in ["ctrl+o"]:
data = get_data()
current_index = 0
update_plot()
def update_plot(rescale = True):
global xs, ys, ys_corr, duration, filename, current_index, x_range, fft_range, data
if len(data) == 0 or current_index > len(data):
return
xs, ys, duration, filename = data[current_index]
cutoff_freq = cut_off_slider.val
fft_ys = scipy.fftpack.rfft(ys)
fft_xs = scipy.fftpack.rfftfreq(len(ys), duration/len(xs))
fft_cut = [x for x in fft_ys]
fft_bas = [x for x in fft_ys]
fft_cut = [fft_ys[i] if fft_xs[i] > cutoff_freq else 0 for i in range(len(fft_ys))]
fft_bas = [fft_ys[i] if fft_xs[i] < cutoff_freq else 0 for i in range(len(fft_ys))]
ys_corr = scipy.fftpack.irfft(fft_cut)
ys_base = scipy.fftpack.irfft(fft_bas)
ax1.lines[0].set_data(fft_xs, fft_ys)
ax2.lines[0].set_data(xs, ys)
ax2.lines[1].set_data(xs, ys_base)
ax3.lines[0].set_data(xs, ys_corr)
if rescale == True:
x_range = [np.nanmin(xs), np.nanmax(xs)]
tmp = (np.nanmin(fft_xs), np.nanmax(fft_xs))
tmp_p = (tmp[1]-tmp[0])*0.05
fft_range = (tmp[0]-tmp_p, tmp[1]+tmp_p)
mask = [x_range[0] <= x <= x_range[1] for x in xs]
y_range = calculate_y_limits(ys[mask])
y_range_corr = calculate_y_limits(ys_corr[mask])
y_range = (min(y_range[0], y_range_corr[0]), max(y_range[1], y_range_corr[1]))
y_range_fft = calculate_y_limits(fft_ys)
cut_off_slider.valmin = fft_range[0]
cut_off_slider.valmax = fft_range[1]
cut_off_slider.ax.set_xlim(fft_range)
ax1.set_xlim(fft_range)
ax1.set_ylim(y_range_fft)
ax2.set_xlim(x_range)
ax2.set_ylim(y_range)
ax3.set_xlim(x_range)
ax3.set_ylim(y_range)
ax3.set_xticks(np.linspace(*x_range, 5))
ax3.set_xticklabels([f"{x:.2f}" for x in np.linspace(*x_range, 5)])
line.set_xdata(cutoff_freq)
title_ax.set_title(f"{current_index+1}/{len(data)}: {os.path.basename(filename)}", ha="center")
fig.canvas.draw_idle()
current_index = 0
cutoff_freq = 0.0
x_range = [0, 0]
fft_range = [0, 0]
fig= plt.figure()
gs = gridspec.GridSpec(9, 12, height_ratios = [0.25, 0.5, 1, 0.5, 1, 1, 0.5, 0.5, 0.5], hspace = 0, wspace=0)
title_ax = fig.add_subplot(gs[0, :])
title_ax.axis("off")
title_ax.set_title("Press 'Replace Files' to open files")
ax0 = fig.add_subplot(gs[1, :])
cut_off_slider = Slider(ax0, "Cut-Off", 0, 1, valinit=cutoff_freq)
cut_off_slider.on_changed(lambda a: update_plot(rescale=False))
ax1 = fig.add_subplot(gs[2, :])
ax1.plot([], [], color="green", label="FFT Coefficients")
ax1.legend(loc = "upper right")
line = ax1.axvline(x=cutoff_freq, color="red", ls="--")
tmp_ax = fig.add_subplot(gs[3, :])
tmp_ax.axis("off")
ax2 = fig.add_subplot(gs[4, :])
ax2.plot([], [], color="#6ebeff", label="Original Spectrum")
ax2.plot([], [], color="#FF0266", label="Baseline", linewidth=3, alpha=0.3)
ax2.get_xaxis().set_visible(False)
ax2.legend(loc = "upper right")
ax3 = fig.add_subplot(gs[5, :], sharex=ax2)
ax3.plot([], [], color="#0336FF", label="Corrected Spectrum")
ax3.legend(loc = "upper right")
tmp_ax = fig.add_subplot(gs[6, :])
tmp_ax.axis("off")
buttons = [("Reset Zoom", "ctrl+r"), ("Previous", "ctrl+left"), ("Next", "ctrl+right"), ("Save", "enter")]
buttons_nsi = [("Quit", "ctrl+q"), ("Save Figure", "ctrl+s"), ("Replace Files", "ctrl+o")]
refs = {}
for i, (text, key) in enumerate(buttons):
tmp_ax = fig.add_subplot(gs[7, 3*i:3*(i+1)])
tmp_button = Button(tmp_ax, text)
tmp_button.on_clicked(lambda a, key=key: press(key))
refs[key] = tmp_button
for i, (text, key) in enumerate(buttons_nsi):
tmp_ax = fig.add_subplot(gs[8, 4*i:4*(i+1)])
tmp_button = Button(tmp_ax, text)
tmp_button.on_clicked(lambda a, key=key: press(key))
refs[key] = tmp_button
update_plot()
cid_1 = fig.canvas.mpl_connect('button_press_event', onclick) # Is now done by span selectors
cid_2 = fig.canvas.mpl_connect('key_press_event', lambda event: press(event.key))
rectprops = dict(facecolor='blue', alpha=0.5)
span_selectors = {}
for i, ax in enumerate((ax0, ax1, ax2, ax3)):
span_selectors[i] = SpanSelector(ax, lambda vmax, vmin, index=i: onzoom(vmax, vmin, index), 'horizontal',rectprops=rectprops, useblit=True, button = 3)
fig.tight_layout()
plt.show()
def get_data():
# Get files
try:
import tkinter as tk
from tkinter import filedialog
root = tk.Tk()
root.withdraw()
filenames = filedialog.askopenfilename(multiple=True)
root.destroy()
except Exception as E:
filenames = input("Enter filenames: ").split(",")
filenames = list(set(filenames))
data = []
# Fill data array
for filename in filenames:
# Get x- and y-data
df = pd.read_csv(filename, sep="\t", skip_blank_lines=True, dtype=np.float64, names=(["x", "y"]))
xs = df["x"].to_numpy()
ys = df["y"].to_numpy()
# Get duration if possible, otherwise set to 30
fname, extension = os.path.splitext(filename)
try:
db_dict=crawl_info_file(fname+".info")
date_start=parse(db_dict["datestart"])
date_stop=parse(db_dict["datestop"])
duration=(date_stop-date_start).total_seconds()/2
except Exception as E:
duration = 30
data.append((xs, ys, duration, filename))
return(data)
if __name__ == '__main__':
# Set up what should happen with corrected data, here just save to file
def save_data(xs, ys, filename):
fname, extension = os.path.splitext(filename)
df = pd.DataFrame({"x": xs, "y":ys})
df.to_csv(fname + "FFT" + extension, header=False, index=False, sep="\t")
# Start main function
decrease_standingwave(get_data(), save_data)
|
StarcoderdataPython
|
3201542
|
##########################################################################
# Geometry data
##
class GeometryData:
""" Class which holds the geometry data of a ObjId
"""
def __init__(self, subdetid = 0, discriminator = ()):
self.subdetid = subdetid
self.discriminator = discriminator
# ObjId names from Alignment/CommonAlignment/interface/StructureType.h
data = {-1: GeometryData(), # notfound
0: GeometryData(), # invalid
1: GeometryData(), # AlignableDetUnit
2: GeometryData(), # AlignableDet
3: GeometryData(1), # TPBModule
4: GeometryData(1, ("Half", "Layer", "Rod")), # TPBLadder
5: GeometryData(1, ("Half", "Layer")), # TPBLayer
6: GeometryData(1, ("Half",)), # TPBHalfBarrel
7: GeometryData(1), # TPBBarrel
8: GeometryData(2), # TPEModule
9: GeometryData(2, ("Side", "Half", "Layer", "Blade", "Panel")), # TPEPanel
10: GeometryData(2, ("Side", "Half", "Layer", "Blade")), # TPEBlade
11: GeometryData(2, ("Side", "Half", "Layer")), # TPEHalfDisk
12: GeometryData(2, ("Side", "Half")), # TPEHalfCylinder
13: GeometryData(2, ("Side",)), # TPEEndcap
14: GeometryData(3), # TIBModule
15: GeometryData(3), # TIBString
16: GeometryData(3, ("Side", "Layer", "Half", "OuterInner")), # TIBSurface
17: GeometryData(3, ("Side", "Layer", "Half")), # TIBHalfShell
18: GeometryData(3, ("Side", "Layer")), # TIBLayer
19: GeometryData(3, ("Side",)), # TIBHalfBarrel
20: GeometryData(3), # TIBBarrel
21: GeometryData(4), # TIDModule
22: GeometryData(4, ("Side", "Layer", "Ring", "OuterInner")), # TIDSide
23: GeometryData(4, ("Side", "Layer", "Ring")), # TIDRing
24: GeometryData(4, ("Side", "Layer")), # TIDDisk
25: GeometryData(4, ("Side",)), # TIDEndcap
26: GeometryData(5), # TOBModule
27: GeometryData(5, ("Side", "Layer", "Rod")), # TOBRod
28: GeometryData(5, ("Side", "Layer")), # TOBLayer
29: GeometryData(5, ("Side",)), # TOBHalfBarrel
30: GeometryData(5), # TOBBarrel
31: GeometryData(6), # TECModule
32: GeometryData(6, ("Side", "Layer", "OuterInner", "Petal", "Ring")), # TECRing
33: GeometryData(6, ("Side", "Layer", "OuterInner", "Petal")), # TECPetal
34: GeometryData(6, ("Side", "Layer", "OuterInner")), # TECSide
35: GeometryData(6, ("Side", "Layer")), # TECDisk
36: GeometryData(6, ("Side",)), # TECEndcap
37: GeometryData(), # Pixel
38: GeometryData(), # Strip
39: GeometryData(), # Tracker
100: GeometryData(), # AlignableDTBarrel
101: GeometryData(), # AlignableDTWheel
102: GeometryData(), # AlignableDTStation
103: GeometryData(), # AlignableDTChamber
104: GeometryData(), # AlignableDTSuperLayer
105: GeometryData(), # AlignableDTLayer
106: GeometryData(), # AlignableCSCEndcap
107: GeometryData(), # AlignableCSCStation
108: GeometryData(), # AlignableCSCRing
109: GeometryData(), # AlignableCSCChamber
110: GeometryData(), # AlignableCSCLayer
111: GeometryData(), # AlignableMuon
112: GeometryData(), # Detector
1000: GeometryData(), # Extras
1001: GeometryData(), # BeamSpot
}
|
StarcoderdataPython
|
1770434
|
from os import path
import sys
sys.path.append(path.join(path.dirname(__file__), path.pardir, path.pardir))
|
StarcoderdataPython
|
109858
|
import pandas as pd
import seaborn as sns
from datetime import datetime
import matplotlib.patches as patches
from ..common import log
from ..util.completion import completion_idx_has_data
def completion_plot(completion, modalities, start, end, freq,
ax=None, cmap=None, x_tick_mult=24, x_tick_fmt="%y-%m-%d %H:%M",
enrolment_date=None, events=None, event_radius=0):
""" Plots the completion array as a heatmap
Parameters
__________
completion: np.ndarray
Completion 2d array for a participant
modalities: list of str
A list of the modalities, used to label the Y-axis
start: pd.Timedelta / np.datetime64
end: pd.Timedelta / np.datetime64
freq: str
The timedelta string associated with the given completion array
ax: matplotlib axes
cmap: Colormap
x_tick_mult: int
Multiplier for x ticks, will draw a tick every x_tick_mult hours.
x_tick_fmt: str
enrolment_date: pd.Timedelta / np.datetime64
events: list of tuples of strings
A list of event timestamp and label pairs, both as strings
Draws a vertical line at each event, colored as:
green: data from all modalities is present at the event
orange: data from at least one modality is present at the event
red: no data is present at the event
If event_radius is specified the color checks include the whole event circumference.
event_radius: float
Radius around each event in multiples of freq
Returns
_______
ax: matplotlib axes
"""
td = pd.Timedelta(freq)
ax = sns.heatmap(completion, cmap=cmap, cbar=False, xticklabels=int(x_tick_mult*(pd.Timedelta('1h')/td)), ax=None)
# Gaps
N_x = completion.shape[1]
N_y = completion.shape[0]
for i in range(N_y-1):
ax.hlines(i+1, 0, N_x, color='white', linewidth=2)
# Outline
ax.vlines(0, 0, N_y, color='black', linewidth=2)
ax.vlines(N_x, 0, N_y, color='black', linewidth=2)
ax.hlines(0, 0, N_x, color='black', linewidth=2)
ax.hlines(N_y, 0, N_x, color='black', linewidth=2)
# Events
if events:
for e_stamp_str,e_label in events:
try:
e_stamp_unix = float(e_stamp_str)
e_stamp = pd.Timestamp(e_stamp_unix, unit='s').tz_localize('UTC')
except:
# TODO: review usage of tz_localize()
e_stamp = pd.Timestamp(datetime.strptime(e_stamp_str, '%Y-%m-%d %H:%M:%S')).tz_localize('CET').tz_convert('UTC')
if e_stamp < start or e_stamp > end: continue
log.debug("Event at {}: {}".format(e_stamp, e_label))
e_idx = (e_stamp - start)//td
e_slice = None
if event_radius:
e_start = max(0, (e_stamp - start - (td * event_radius))//td)
e_end = min(N_x-1, (e_stamp - start + (td * event_radius))//td)
e_slice = slice(e_start, e_end+1)
rect = patches.Rectangle((e_start, 0), e_end - e_start, N_y, linewidth=0.5, edgecolor='k', alpha=0.25, zorder=9)
ax.add_patch(rect)
has_all = completion_idx_has_data(completion, e_slice if e_slice else e_idx, requirement_function=all)
has_any = completion_idx_has_data(completion, e_slice if e_slice else e_idx, requirement_function=any)
if has_all: ax.vlines(e_idx, 0, N_y, color='green', linewidth=2, zorder=10)
elif has_any: ax.vlines(e_idx, 0, N_y, color='orange', linewidth=2, zorder=10)
else: ax.vlines(e_idx, 0, N_y, color='red', linewidth=2, zorder=10)
# Enrolment
if enrolment_date:
enrolment_idx = (enrolment_date - start)//td
ax.vline(enrolment_idx-1, 0, N_y, color='red', linewidth=0.5)
# Labels
ax.set_ylabel('Data topic')
ax.set_xlabel('Date')
xticks = ax.get_xticks()
ax.set_xticklabels([(start + (tm * td)).strftime(x_tick_fmt) for tm in xticks], rotation=0)
ax.set_yticklabels([ m.split('_', 2)[-1] for m in modalities ], rotation=0)
return ax
|
StarcoderdataPython
|
4993721
|
<reponame>parsoyaarihant/CS726-Project-2048-Using-RL<gh_stars>0
import random
import logic
import constants as c
class GameGrid():
def __init__(self):
self.commands = {c.KEY_UP: logic.up, c.KEY_DOWN: logic.down,
c.KEY_LEFT: logic.left, c.KEY_RIGHT: logic.right,
c.KEY_UP_ALT: logic.up, c.KEY_DOWN_ALT: logic.down,
c.KEY_LEFT_ALT: logic.left,
c.KEY_RIGHT_ALT: logic.right}
self.actions = [c.KEY_UP, c.KEY_DOWN, c.KEY_RIGHT, c.KEY_LEFT]
self.grid_cells = []
self.init_matrix()
def gen(self):
return random.randint(0, c.GRID_LEN - 1)
def init_matrix(self):
self.matrix = logic.new_game(4)
self.matrix = logic.add_two(self.matrix)
self.matrix = logic.add_two(self.matrix)
def step(self, key):
if isinstance(key, int):
key = self.actions[key]
if key in self.commands:
self.matrix, done = self.commands[key](self.matrix)
if done:
self.matrix = logic.add_two(self.matrix)
done = False
# Check if game is completed
if logic.game_state(self.matrix) == 'win':
done = True
if logic.game_state(self.matrix) == 'lose':
done = True
return self.matrix, logic.reward(self.matrix), done, ""
def generate_next(self):
index = (self.gen(), self.gen())
while self.matrix[index[0]][index[1]] != 0:
index = (self.gen(), self.gen())
self.matrix[index[0]][index[1]] = 2
def get_state(self):
return self.matrix
def key_down(self, event):
key = repr(event.char)
if event.keycode == 114:
self.reset()
return
self.step(key)
self.display_state()
def action_space(self):
# return possible action
return self.actions[random.randint(0, 3)]
def reset(self):
# resets the game to initial state
self.init_matrix()
def display_state(self):
print()
for i in self.matrix:
print(i)
def reward(self):
return logic.reward(self.matrix)
def highest_score(self):
return logic.highest_score(self.matrix)
gamegrid = GameGrid()
#print(gamegrid.display_state())
#gamegrid.perform_action(c.KEY_UP)
#print(gamegrid.display_state())
|
StarcoderdataPython
|
6582731
|
<filename>psydac/linalg/tests/test_pcg.py
import numpy as np
import pytest
#===============================================================================
@pytest.mark.parametrize( 'n', [8, 16] )
@pytest.mark.parametrize( 'p', [2, 3] )
def test_pcg(n, p):
"""
Test preconditioned Conjugate Gradient algorithm on tridiagonal linear system.
Parameters
----------
n : int
Dimension of linear system (number of rows = number of columns).
"""
from psydac.linalg.iterative_solvers import pcg, jacobi
from psydac.linalg.stencil import StencilVectorSpace, StencilMatrix, StencilVector
from psydac.linalg.basic import LinearSolver
#---------------------------------------------------------------------------
# PARAMETERS
#---------------------------------------------------------------------------
# ... Vector Spaces
V = StencilVectorSpace([n], [p], [False])
e = V.ends[0]
s = V.starts[0]
# Build banded matrix with 2p+1 diagonals: must be symmetric and positive definite
# Here we assign value 2*p on main diagonal and -1 on other diagonals
A = StencilMatrix(V, V)
A[:,-p:0 ] = -1
A[:, 0:1 ] = 2*p
A[:, 1:p+1] = -1
A.remove_spurious_entries()
# Build exact solution
xe = StencilVector(V)
xe[s:e+1] = np.random.random(e+1-s)
# Tolerance for success: L2-norm of error in solution
tol = 1e-10
#---------------------------------------------------------------------------
# TEST
#---------------------------------------------------------------------------
# Title
print()
print("="*80)
print("SERIAL TEST: solve linear system A*x = b using preconditioned conjugate gradient")
print("="*80)
print()
# Manufacture right-hand-side vector from exact solution
b = A.dot(xe)
class LocallyOnlyJacobiSolver(LinearSolver):
@property
def space(self):
return V
def solve(self, rhs, out=None, transposed=False):
# (don't care about out or any other parameter here; it's only used locally)
return jacobi(A, rhs)
# Solve linear system using PCG (and CG)
# also does an interface test for the Jacobi preconditioner
x0, info0 = pcg( A, b, pc= None, tol=1e-12 )
x1, info1 = pcg( A, b, pc= "jacobi", tol=1e-12 )
x1b, info1b = pcg( A, b, pc= jacobi, tol=1e-12 )
x1c, info1c = pcg( A, b, pc= LocallyOnlyJacobiSolver(), tol=1e-12 )
x2, info2 = pcg( A, b, pc= "weighted_jacobi", tol=1e-12 )
# Verify correctness of calculation: L2-norm of error
err0 = x0-xe
err_norm0 = np.linalg.norm(err0.toarray())
err1 = x1-xe
err_norm1 = np.linalg.norm(err1.toarray())
err2 = x2-xe
err_norm2 = np.linalg.norm(err2.toarray())
#---------------------------------------------------------------------------
# TERMINAL OUTPUT
#---------------------------------------------------------------------------
print()
print( 'A =', A.toarray(), sep='\n' )
print( 'b =', b.toarray())
print( 'x1 =', x1.toarray())
print( 'x2 =', x2.toarray())
print( 'xe =', xe.toarray())
print( 'info1 (Jac) =', info1 )
print( 'info2 (w-Jac)=', info2 )
print()
print( "-"*40 )
print( "L2-norm of error in (PCG + Jacobi) solution = {:.2e}".format(err_norm1))
print( "L2-norm of error in solution (PCG + weighted Jacobi) solution = {:.2e}".format(err_norm2))
if err_norm0 < tol and err_norm1 < tol and err_norm2 < tol:
print( "PASSED" )
else:
print( "FAIL" )
print( "-"*40 )
#---------------------------------------------------------------------------
# PYTEST
#---------------------------------------------------------------------------
assert err_norm0 < tol and err_norm1 < tol and err_norm2 < tol
assert info1 == info1b and info1 == info1c
|
StarcoderdataPython
|
3533318
|
from mathlib.math import CustomMath
def test_sum_two_arguments():
first = 2
second = 11
custom_math = CustomMath()
result = custom_math.sum(first,second)
assert result == (first+second)
|
StarcoderdataPython
|
8000389
|
<gh_stars>1-10
"""
Create doc-doc edges
Steps:
1. Load all entities with their relations
2. Load relevant relations
3. Create adjacency matrix for word-word relations
4. Count number of relation between two documents
5. Weight relations and set a doc-doc edge weight
"""
from collections import defaultdict
from math import log
import pandas as pd
from tqdm import tqdm
from helper import file_utils as file, io_utils as io
from loader.wiki_api import get_safely
from os.path import exists
# Data loader
def get_vocab_ids():
entity2id_df = file.get_entity2id()
unmapped_entities = entity2id_df[entity2id_df["wikiID"] == "-1"].index
entity2id_df.drop(unmapped_entities, inplace=True)
return entity2id_df["wikiID"].to_numpy()
def get_triples(filtered=False):
return file.get_filtered_triples() if filtered else file.get_all_triples()
def get_documents():
cleaned_sentences = list(map(lambda doc: doc.split(" "), file.get_cleaned_sentences()))
return cleaned_sentences
# Create triple documents
def create_triples():
# Creates triples based on the vocab entities and relations (unfiltered)
triples = []
all_entities = file.get_vocab_entities()
for entity in all_entities.keys():
for relation in get_safely(all_entities, [entity, "relations"]).keys():
for relation_value in get_safely(all_entities, [entity, "relations", relation]).keys():
result = get_safely(all_entities, [entity, "relations", relation, relation_value])
if not isinstance(result, dict) or result.get("id") is None:
continue
else:
triple = [all_entities[entity]["id"], relation, result.get("id")]
triples.append(triple)
file.save_all_triples(triples)
return triples
def filter_triples():
# Adjust filters in `analyze_properties` and save them to the filtered_relations.csv
triples = get_triples()
relevant_relations = file.get_filtered_relations()
vocab_ids = get_vocab_ids()
old_size = triples.shape[0]
# Filter out all triples which are not contained in `filtered_relations.csv`
irrelevant_triples = triples[~triples["relation"].isin(relevant_relations)].index
triples.drop(irrelevant_triples, inplace=True)
# Filter out all triples which don't lead to another word from the node
unmatched_triples = triples[~triples["entity2"].isin(vocab_ids)].index
triples.drop(unmatched_triples, inplace=True)
# Filter out all relation to itself
self_relations = triples[triples["entity1"] == triples["entity2"]].index
triples.drop(self_relations, inplace=True)
# Drop duplicate relations
triples.drop_duplicates(inplace=True)
file.save_filtered_triples(triples)
print(f"Filtered out {old_size - triples.shape[0]} irrelevant triples...")
def setup_triples():
# Creates a filtered and unfiltered triples CVS file
create_triples()
filter_triples()
def generate_doc2relations():
setup_triples()
doc_nouns_norm = file.get_normalized_nouns() # Array with all nouns per doc // must be split
relations_array = []
ids = file.get_doc2id()
filtered_triples = get_triples(filtered=True) # Triples
for doc_index, doc in enumerate(doc_nouns_norm):
if doc == "":
relations_array.append("-")
continue
# All ID's of the normalized nouns in the current document
doc_ids = ids[ids["doc"] == doc_index]["wikiID"].tolist()
# Graph edges pointing to other entities
triples_out = filtered_triples[filtered_triples["entity1"].isin(doc_ids)]
triples_in = filtered_triples[filtered_triples["entity2"].isin(doc_ids)]
triples_in.columns = ["entity2", "relations", "entity1"]
triples_total = pd.concat([triples_out, triples_in])
all_outgoing_relations = triples_total["relations"].tolist()
if len(all_outgoing_relations) == 0:
all_outgoing_relations = "-"
relations_array.append(all_outgoing_relations)
file.save_doc2relations([" ".join(elem) for elem in relations_array])
# Adjacency matrices
def create_doc2doc_edges():
# if exists(io.get_document_triples_path()):
# print("Document triples pickle file adready exists, will not be created again")
# generate_idf_scores()
# apply_idf()
# return
generate_doc2relations()
generate_idf_scores()
doc_nouns_norm = file.get_normalized_nouns() # Array with all nouns per doc // must be split
filtered_triples = get_triples(filtered=True) # Triples
ids = file.get_doc2id()
triples = []
filtered_out_items = 0
with tqdm(total=len(doc_nouns_norm)) as bar:
for doc_index, doc in enumerate(doc_nouns_norm):
if doc == "":
bar.update(1)
continue
# All ID's of the normalized nouns in the current document
doc_ids = ids[ids["doc"] == doc_index]["wikiID"].tolist()
assert len(doc_ids) <= len(doc.split(" ")), f"{len(doc.split(' '))} vs. {len(doc_ids)}"
# Graph edges pointing to other entities
triples_out = filtered_triples[filtered_triples["entity1"].isin(doc_ids)]
triples_in = filtered_triples[filtered_triples["entity2"].isin(doc_ids)]
triples_in.columns = ["entity2", "relations", "entity1"]
triples_total = pd.concat([triples_out, triples_in])
doc_pointers = {}
for index, row in triples_total.iterrows():
entity1 = row["entity1"]
relation = row["relations"]
entity2 = row["entity2"]
# Look in which documents entity2 appears
pointer = ids[ids["wikiID"] == entity2]["doc"].tolist()
assert entity1 in doc_ids
for doc_id in pointer:
# Ignore doc2doc edges to doc itself
if doc_id <= doc_index:
continue
if doc_id in doc_pointers:
doc_pointers[doc_id].append(relation)
else:
doc_pointers[doc_id] = [relation]
for key in doc_pointers.keys():
# Filter out all docs with length below 2
if len(doc_pointers[key]) > 1:
triples.append([doc_index, key, len(doc_pointers[key]), "+".join(doc_pointers[key])])
bar.update(1)
data = pd.DataFrame(triples)
data.columns = ["doc1", "doc2", "relations", "detail"]
print(f"Highest number of relations between two docs: {max(data['relations'])}")
print(f"Created {len(triples)} doc2doc edges (filtered by threshold: {filtered_out_items})")
file.save_document_triples(data)
apply_idf()
def generate_idf_scores():
print("Generate IDF scores...")
doc_relations = file.get_doc2relations()
num_docs = len(doc_relations)
doc_word_freq = defaultdict(int)
relation_doc_freq = {}
relation_doc_freq_wiki = {}
relations_in_docs = defaultdict(set)
row = []
col = []
weight = []
weight_wiki = []
for i, rels in enumerate(doc_relations):
relations = rels.split()
for rel in relations:
relations_in_docs[rel].add(i)
doc_word_str = (i, rel)
doc_word_freq[doc_word_str] += 1
all_relations = file.get_all_relations()
for rel, doc_list in relations_in_docs.items():
count = all_relations[all_relations["ID"] == rel]["count"].tolist()
assert len(count) <= 1, (count, rel)
if len(count) == 1:
relation_doc_freq_wiki[rel] = count[0]
else:
relation_doc_freq_wiki[rel] = 0
relation_doc_freq[rel] = len(doc_list)
for i, rels in enumerate(doc_relations):
relations = rels.split()
doc_rel_set = set()
for rel in relations:
if rel in doc_rel_set or rel == "-":
continue
freq = doc_word_freq[(i, rel)]
row.append(i)
col.append(rel)
idf = log(1.0 * num_docs / relation_doc_freq[rel])
# Source: https://www.wikidata.org/wiki/Wikidata:Statistics on 17.12.2020 at 12:20
idf_wiki = log(1.0 * 91559495 / relation_doc_freq_wiki[rel])
weight.append(freq * idf)
weight_wiki.append(freq * idf_wiki)
doc_rel_set.add(rel)
data = pd.DataFrame({"doc": row, "relation": col, "idf": weight, "idf_wiki": weight_wiki})
file.save_doc2idf(data)
def apply_idf():
print("Applying IDF...")
doc_triples = file.get_document_triples()
idf = file.get_doc2idf()
data = []
with tqdm(total=doc_triples.shape[0]) as bar:
for index, row in doc_triples.iterrows():
doc1 = row["doc1"]
doc2 = row["doc2"]
relations = row["detail"].split("+")
score = 0
wiki_score = 0
for rel in relations:
scores = idf[(idf["relation"] == rel) & (idf["doc"] == doc1)][["idf", "idf_wiki"]]
idf_score = scores["idf"].tolist()
idf_wiki_score = scores["idf_wiki"].tolist()
assert len(idf_score) == 1 and len(idf_wiki_score) == 1
score += idf_score[0]
wiki_score += idf_wiki_score[0]
data.append([doc1, doc2, len(relations), score, wiki_score])
bar.update(1)
dataframe = pd.DataFrame(data)
dataframe.columns = ["doc1", "doc2", "count", "idf", "idf_wiki"]
normalize(dataframe)
def normalize(data):
base_edges = file.get_original_edges()
pmi_factor = base_edges[base_edges["edge_type"] == "pmi"]["weight"].max()
idf_factor = base_edges[base_edges["edge_type"] == "idf"]["weight"].max()
idf_max = data["idf"].max()
idf_min = data["idf"].min()
idf_wiki_max = data["idf_wiki"].max()
idf_wiki_min = data["idf_wiki"].min()
count_max = data["count"].max()
count_min = data["count"].min()
all = []
for index, row in data.iterrows():
doc1 = row["doc1"]
doc2 = row["doc2"]
count = row["count"]
idf_score = row["idf"]
idf_wiki_score = row["idf_wiki"]
count_norm = apply_normalization(count, count_min, count_max, idf_factor)
count_norm_pmi = apply_normalization(count, count_min, count_max, pmi_factor)
idf_norm = apply_normalization(idf_score, idf_min, idf_max, idf_factor)
idf_wiki_norm = apply_normalization(idf_wiki_score, idf_wiki_min, idf_wiki_max, idf_factor)
idf_norm_pmi = apply_normalization(idf_score, idf_min, idf_max, pmi_factor)
idf_wiki_norm_pmi = apply_normalization(idf_wiki_score, idf_wiki_min, idf_wiki_max, pmi_factor)
result = [doc1, doc2, count, idf_score, idf_wiki_score, count_norm, count_norm_pmi, idf_norm, idf_wiki_norm, idf_norm_pmi, idf_wiki_norm_pmi]
all.append(result)
df = pd.DataFrame(all)
df.columns = ["doc1", "doc2", "count", "idf", "idf_wiki", "count_norm", "count_norm_pmi", "idf_norm", "idf_wiki_norm", "idf_norm_pmi", "idf_wiki_norm_pmi"]
file.save_document_triples_metrics(df)
def apply_normalization(value, min, max, factor):
return ((value - min) / (max - min)) * factor
# windows_relation = []
# windows_document = []
#
#
# def windows_relation_base(cond_index=-1, window_size=15):
# global windows_relation
# triples = file.get_document_triples("mr")
# tmp = []
# for index, row in triples.iterrows():
# if index > 20:
# break
# if index <= cond_index:
# continue
# relations = row["detail"].split("+")
# doc_length = len(relations)
# if doc_length <= window_size:
# [tmp.append(r) for r in relations]
# else:
# assert False
#
# if len(tmp) >= window_size:
# windows_relation.append(tmp)
# tmp = []
# windows_relation_base(cond_index+1)
#
#
# def windows_document_base(cond_index=-1, window_size=15):
# global windows_document
# triples = file.get_document_triples("mr")
# tmp = []
# counter = 0
# for index, row in triples.iterrows():
# if index > 20:
# break
# if index <= cond_index:
# continue
# relations = row["detail"].split("+")
#
# if counter < window_size:
# [tmp.append(r) for r in relations]
#
# elif counter == window_size:
# windows_document.append(tmp)
# windows_document_base(cond_index+1)
# break
#
# elif index == triples.shape[0] - window_size
#
# counter += 1
#
# def generate_pmi(windows):
# number_sliding_windows = len(windows) #W
# counter = {}
# for window in windows:
# for relation in window:
# for x in windows:
# contained = relation in x
# if contained:
# if relation in counter:
# counter[relation] += 1
# else:
# counter[relation] = 1
# print(counter)
#
#
if __name__ == '__main__':
create_doc2doc_edges()
|
StarcoderdataPython
|
9746323
|
from django.contrib import admin
from common.actions import make_export_action
from search.models.alias import Alias
from search.models import SuggestionLog
from search.models.session_alias import SessionAlias
class AliasAdmin(admin.ModelAdmin):
list_display = ('id', 'alias', 'target')
actions = make_export_action("Export Alias to CSV")
class SuggestionAdmin(admin.ModelAdmin):
search_fields = ['search_query', 'session_id']
list_filter = ['num_suggestions']
list_display = ['session_hash', 'search_query', 'num_suggestions', 'created_at']
actions = make_export_action("Export Suggestions to CSV")
def session_hash(self, obj):
return obj.session_id[:6]
def get_queryset(self, request):
return SuggestionLog.objects.exclude(session_id__isnull=True).exclude(session_id__exact='')
class SessionAliasAdmin(admin.ModelAdmin):
list_display = ('id', 'alias', 'session')
search_fields = ('alias',)
admin.site.register(Alias, AliasAdmin)
admin.site.register(SuggestionLog, SuggestionAdmin)
admin.site.register(SessionAlias, SessionAliasAdmin)
|
StarcoderdataPython
|
11299897
|
<filename>L1Trigger/L1TCalorimeter/python/hackConditions_cff.py
#
# hachConditions.py Load ES Producers for any conditions not yet in GT...
#
# The intention is that this file should shrink with time as conditions are added to GT.
#
import FWCore.ParameterSet.Config as cms
import sys
from Configuration.Eras.Modifier_run2_HI_specific_cff import run2_HI_specific
#from Configuration.Eras.Era_Run2_2016_pA_cff import Run2_2016_pA
from Configuration.Eras.Modifier_pA_2016_cff import pA_2016
#
# Legacy Trigger: No Hacks Needed
#
from Configuration.Eras.Modifier_stage1L1Trigger_cff import stage1L1Trigger
from Configuration.Eras.Modifier_stage2L1Trigger_cff import stage2L1Trigger
def _load(process, f):
process.load(f)
#
# Stage-1 Trigger
#
# Switch between HI and PP calo configuration:
modifyL1TCalorimeterHackConditions_stage1HI = (stage1L1Trigger & ~stage2L1Trigger & run2_HI_specific).makeProcessModifier(lambda p: _load(p, "L1Trigger.L1TCalorimeter.caloConfigStage1HI_cfi"))
modifyL1TCalorimeterHackConditions_stage1PP = (stage1L1Trigger & ~stage2L1Trigger & ~run2_HI_specific).makeProcessModifier(lambda p: _load(p, "L1Trigger.L1TCalorimeter.caloConfigStage1PP_cfi"))
# Override Calo Scales:
modifyL1TCalorimeterHackConditions_stage1Common = (stage1L1Trigger & ~stage2L1Trigger).makeProcessModifier(lambda p: _load(p, "L1Trigger.L1TCalorimeter.caloScalesStage1_cff"))
# CaloParams is in the DB for Stage-1
#
# Stage-2 Trigger
#
modifyL1TCalorimeterHackConditions_stage2PA = (stage2L1Trigger & pA_2016).makeProcessModifier(lambda p: _load(p, "L1Trigger.L1TCalorimeter.caloStage2Params_2016_v3_3_1_HI_cfi"))
modifyL1TCalorimeterHackConditions_stage2PP = (stage2L1Trigger & ~pA_2016).makeProcessModifier(lambda p: _load(p, "L1Trigger.L1TCalorimeter.caloStage2Params_2017_v1_8_4_cfi"))
# What about CaloConfig? Related: How will we switch PP/HH?
#
|
StarcoderdataPython
|
1678043
|
<reponame>HSunboy/hue<filename>desktop/core/ext-py/eventlet-0.21.0/eventlet/hubs/poll.py<gh_stars>1-10
import errno
import sys
from eventlet import patcher
select = patcher.original('select')
time = patcher.original('time')
from eventlet.hubs.hub import BaseHub, READ, WRITE, noop
from eventlet.support import get_errno, clear_sys_exc_info
EXC_MASK = select.POLLERR | select.POLLHUP
READ_MASK = select.POLLIN | select.POLLPRI
WRITE_MASK = select.POLLOUT
class Hub(BaseHub):
def __init__(self, clock=None):
super(Hub, self).__init__(clock)
self.poll = select.poll()
# poll.modify is new to 2.6
try:
self.modify = self.poll.modify
except AttributeError:
self.modify = self.poll.register
def add(self, evtype, fileno, cb, tb, mac):
listener = super(Hub, self).add(evtype, fileno, cb, tb, mac)
self.register(fileno, new=True)
return listener
def remove(self, listener):
super(Hub, self).remove(listener)
self.register(listener.fileno)
def register(self, fileno, new=False):
mask = 0
if self.listeners[READ].get(fileno):
mask |= READ_MASK | EXC_MASK
if self.listeners[WRITE].get(fileno):
mask |= WRITE_MASK | EXC_MASK
try:
if mask:
if new:
self.poll.register(fileno, mask)
else:
try:
self.modify(fileno, mask)
except (IOError, OSError):
self.poll.register(fileno, mask)
else:
try:
self.poll.unregister(fileno)
except (KeyError, IOError, OSError):
# raised if we try to remove a fileno that was
# already removed/invalid
pass
except ValueError:
# fileno is bad, issue 74
self.remove_descriptor(fileno)
raise
def remove_descriptor(self, fileno):
super(Hub, self).remove_descriptor(fileno)
try:
self.poll.unregister(fileno)
except (KeyError, ValueError, IOError, OSError):
# raised if we try to remove a fileno that was
# already removed/invalid
pass
def do_poll(self, seconds):
# poll.poll expects integral milliseconds
return self.poll.poll(int(seconds * 1000.0))
def wait(self, seconds=None):
readers = self.listeners[READ]
writers = self.listeners[WRITE]
if not readers and not writers:
if seconds:
time.sleep(seconds)
return
try:
presult = self.do_poll(seconds)
except (IOError, select.error) as e:
if get_errno(e) == errno.EINTR:
return
raise
SYSTEM_EXCEPTIONS = self.SYSTEM_EXCEPTIONS
if self.debug_blocking:
self.block_detect_pre()
# Accumulate the listeners to call back to prior to
# triggering any of them. This is to keep the set
# of callbacks in sync with the events we've just
# polled for. It prevents one handler from invalidating
# another.
callbacks = set()
for fileno, event in presult:
if event & READ_MASK:
callbacks.add((readers.get(fileno, noop), fileno))
if event & WRITE_MASK:
callbacks.add((writers.get(fileno, noop), fileno))
if event & select.POLLNVAL:
self.remove_descriptor(fileno)
continue
if event & EXC_MASK:
callbacks.add((readers.get(fileno, noop), fileno))
callbacks.add((writers.get(fileno, noop), fileno))
for listener, fileno in callbacks:
try:
listener.cb(fileno)
except SYSTEM_EXCEPTIONS:
raise
except:
self.squelch_exception(fileno, sys.exc_info())
clear_sys_exc_info()
if self.debug_blocking:
self.block_detect_post()
|
StarcoderdataPython
|
11263779
|
# class Node:
# def __init__(self,value,next= None):
# self.value = value
# self.next = next
# class LinkedList:
# def __init__(self, head= None):
# self.head = head
# def __str__(self):
# current = self.head
# output = ""
# while current is not None:
# output += f"{{ {current.value} }} -> "
# current = current.next
# return output + "None"
# def append(self, value):
# newnode = Node(value)
# if self.head:
# current = self.head
# while current.next:
# current = current.next
# current.next = newnode
# else:
# self.head = newnode
# @staticmethod
def zipLists(a=object, b=object):
cur1, cur2 = a.head, b.head
while cur1 and cur2:
save1 = cur1.next
save2 = cur2.next
cur1.next = cur2
cur2.next = save1
cur1 = save1
cur2 = save2
return a
# next1,cur1.next = cur1.next, cur2
# if next1:
# next2, cur2.next = cur2.next, next1
# cur1, cur2 = next1, next2
# return a
|
StarcoderdataPython
|
6521537
|
"""Generating spectra with Fluctuating Gunn Peterson Approximation (FGPA)
- The code is MPI working on Illustris and MP-Gadget snapshots. The packages needed are :
- astropy
- fake_spectra
To get the FGPA spectra, refer to the helper script at
https://github.com/mahdiqezlou/LyTomo_Watershed/tree/dist/helper_scripts/FGPA
which follows the steps below :
1. From density.py use Gadget() to construct DM density
field on a grid with desired size. For FGPA, the grid cells should on avergae
have 1 particle per cell.
2. Save the results from the previous step in savedir directory. The density
has been saved on several files depending on number of ranks used.
3. Run get_noiseless_map() or get_sample_spectra() functions here and pass the
directories for the density field above as savedir argument.
4. The output is a single hdf5 file containing either the full true flux map or the
random spectra sample.
"""
import os
import glob
import numpy as np
import h5py
from astropy.cosmology import Planck15 as cosmo
from scipy.ndimage import gaussian_filter1d
import fake_spectra.fluxstatistics as fs
from . import spectra_mocking as sm
from . import density
class Fgpa:
"""A class for FGPA method"""
def __init__(self, MPI, comm, z, boxsize, Ngrids, Npix, SmLD, SmLV, savedir, fix_mean_flux=True,
mean_flux=None, gamma=1.46, T0=1.94*10**4):
"""
Params :
comm : instanse of the MPI communicator
z : redshift
savedir : the directory containing the density map
Ngrids : int, the size of the x and y dimensions of the desired map
Npix : number of desired pxiels along final map
gamma : The slope in temperature-density relation
T0 : Temperature at mean density
"""
# Initialize the MPI communication
self.MPI = MPI
self.comm = comm
self.rank = self.comm.Get_rank()
self.z = z
self.boxsize = boxsize
self.Ngrids = Ngrids
self.Npix = Npix
self.SmLD = SmLD
self.SmLV = SmLV
self.savedir = savedir
self.fix_mean_flux = fix_mean_flux
self.mean_flux = mean_flux
# Thermal paramters of the IGM
self.gamma= gamma
self.T0 = T0
# Physical constants
# Lya constants do not matter at all, since we fix the mean absorption
self.lambda_Lya = 1215.67
self.sigma_Lya = 1
self.mp = 1.67*10**(-27) # proton mass in Kg
self.kB=1.38*10**(-23) # Boltzman const in SI
def get_sample_spectra(self, num, seed=13, savefile='spectra_z2.4_FGPA_n1.hdf5'):
"""Get a sample of spectra to be used for mock map reconstruction
num : Numebr of desired random spectra
savefile : The name for hdf5 file to save the spectra file
"""
tau_conv = self.get_tau_conv()
if seed is not None:
cofm = self.get_cofm(num=num, Nvoxles=tau_conv.shape[0], seed=seed).astype(int)
else:
x, y = np.meshgrid(np.arange(self.Ngrids), np.arange(self.Ngrids))
cofm = np.zeros(shape=(self.Ngrids*self.Ngrids,2), dtype=int)
cofm[:,0] = np.ravel(x)
cofm[:,1] = np.ravel(y)
del x, y
ind = np.where(tau_conv!= -1)
tau_sampled = np.zeros(shape=(cofm.shape[0], tau_conv.shape[2]))
# Find the sample spectra on this rank
ind_cofm = np.where(np.isin(cofm[:,0], ind[0])*np.isin(cofm[:,1], ind[1]))[0]
tau_sampled[ind_cofm] = tau_conv[cofm[ind_cofm,0], cofm[ind_cofm,1],:]
### MPI part
# Make sure the data is contiguous in memeory
tau_sampled = np.ascontiguousarray(tau_sampled, np.float64)
# Add the results from all ranks
self.comm.Allreduce(self.MPI.IN_PLACE, tau_sampled, op=self.MPI.SUM)
# Scale the tau to get mean flux right
if self.fix_mean_flux:
if self.mean_flux is None:
mean_flux = sm.get_mean_flux(z=self.z)
else:
mean_flux =self.mean_flux
print('mean flux is ', mean_flux, flush=True)
scale = fs.mean_flux(tau_sampled, mean_flux_desired=mean_flux)
else:
scale=1
tau_sampled *= scale
if self.rank==0 :
print('Scaling tau with :', scale)
# Change cofm to kpc/h to record on spctra
cofm = cofm.astype(float)*(self.boxsize*1000/tau_conv.shape[0])
if self.rank == 0 :
with h5py.File(self.savedir+savefile,'w') as fw:
# We need all Header info to load the file with fake_spectra
# Some attrs are copied from hydro spectra, a more stable way
# should be implemented
fw.create_group('Header')
fw['Header'].attrs.create('redshift', self.z)
fw['Header'].attrs.create('box', self.boxsize*1000)
fw['Header'].attrs.create('discarded', 0)
fw['Header'].attrs.create('hubble', 0.6774)
fw['Header'].attrs.create('nbins', tau_sampled.shape[1])
fw['Header'].attrs.create('npart', np.array([0, 15625000000, 0, 0, 0, 0]))
fw['Header'].attrs.create('omegab', 0.04757289217927339)
fw['Header'].attrs.create('omegal', 0.6911)
fw['Header'].attrs.create('omegam', 0.3089)
fw['tau/H/1/1215'] = tau_sampled
fw.create_group('spectra')
fw['spectra/axis'] = 3*np.ones(shape=(cofm.shape[0],))
fw['spectra/cofm'] = cofm
fw['colden/H/1'] = np.zeros(shape=(1,))
fw.create_group('density_Weight_debsity')
fw.create_group('num_important')
fw.create_group('velocity')
fw.create_group('temperature')
fw.create_group('tau_obs')
def get_cofm(self, num, Nvoxels, seed):
""" A copy of fake_spectra.rand_spectra.get_cofm() to replicate the
spectra used for hydro snalysis.
seed : the seed for random sample
num : number of spectra
Nvoxels: the number of vxoels along each side of the simulations
"""
np.random.seed(seed)
cofm = Nvoxels*np.random.random_sample((num,3))
return cofm
def get_noiseless_map(self, savefile='FGPA_flux_z2.4.hdf5'):
"""Calculate the true map on a mesh grid of size (Ngrids*Ngrids*Npix)
savefile : The name for hdf5 file to save final map on
"""
tau_conv = self.get_tau_conv()
### The work here is not very well balanced among ranks
if self.rank == 0:
if self.fix_mean_flux:
if self.mean_flux is None:
mean_flux = sm.get_mean_flux(z=self.z)
else:
mean_flux = self.mean_flux
print('mean flux is ', mean_flux, flush=True)
scale = fs.mean_flux(tau_conv, mean_flux_desired=mean_flux)
else :
scale = 1
### Resampling pixels along spectra
flux_conv = self.resample_flux(scale*tau_conv)
del tau_conv
with h5py.File(self.savedir+savefile,'w') as fw:
fw['map'] = flux_conv
self.comm.Barrier()
def resample_flux(self, tau):
"""
Resample spectra to get Npix pixels along line of sight. It is done by averaging the flux over
few consecutive pixels.
Params :
tau : Optical depth.
"""
Nz = tau.shape[2]
# Number of adjacent pixels along spectrum need to be averaged over
addpix = int(Nz / self.Npix)
flux = np.zeros(shape=(tau.shape[0], tau.shape[1], self.Npix), dtype=np.float64)
for t in range(self.Npix):
flux[:,:,t] = np.sum(np.exp(-tau[:,:,t*addpix:(t+1)*addpix]), axis=2)/addpix
flux = gaussian_filter1d(flux, sigma=1, mode='wrap')
return flux
def get_tau_conv(self):
"""
Calculate tau in redshift space
Convolving tau in real space with an apprimation of the Voight profile (Gaussian profile)
Returns :
tau_conv : convoluted optical depth
"""
import glob
import os
from . import mpi4py_helper
fnames = glob.glob(os.path.join(self.savedir,'*_densfield.hdf5'))
fnames = mpi4py_helper.distribute_files(comm=self.comm, fnames=fnames)
tau_conv = None
c=0
for fn in fnames:
c+=1
print(self.rank, fn, flush=True)
if not os.path.exists(fn):
raise IOError('File '+fn+' does not exist!')
with h5py.File(fn,'r') as f:
if tau_conv is None:
# nbodykit does not break the data along the z direction, so Nz is the
# the size of the initial density map in all 3 dimentions
Nz = f['DM/dens'][:].shape[2]
dvbin = cosmo.H(self.z).value*self.boxsize/(cosmo.h*Nz*(1+self.z))
up = np.arange(Nz)*dvbin
# Approx position of the desired sightlines. The approximation should be ok
# for FGPA since the density map has very fine voxels
x, y = int(Nz/self.Ngrids)*np.arange(self.Ngrids), int(Nz/self.Ngrids)*np.arange(self.Ngrids)
# Which sightlines are on this rank
indx = np.where(np.isin(x, f['DM/x'][:]))[0]
if indx.size == 0:
# Some ranks may not hold any sightlines at all
print('The sightline coordinates are not on density grids on file ', fn, flush=True)
print("The y desnity grid coordinates are = ", f['DM/y'][:], flush=True)
continue
xstart, xend = indx[0], indx[-1]
indy = np.where(np.isin(y, f['DM/y'][:]))[0]
if indy.size == 0:
# Some ranks may not hold any sightlines at all
print('The sightline coordinates are not on density grids on file ', fn, flush=True)
print("The y desnity grid coordinates are = ", f['DM/y'][:], flush=True)
continue
ystart, yend = indy[0], indy[-1]
print('Sightlines on Rank =', self.rank, (int(xstart), int(xend)), (int(ystart), int(yend)) ,flush=True)
# i, j are indices for the final flux map (Ngrids * Ngrids)
tau_conv = np.zeros(shape=(indx.size, indy.size, Nz))
for i in range(indx.size):
if self.rank ==1:
print(str(int(100*c/len(fnames)))+'%', flush=True )
# Indices on f['DM/dens'] map
ic = x[indx[i]] - f['DM/x'][0]
for j in range(indy.size):
# Indices on f['DM/dens'] map
jc = y[indy[j]] - f['DM/y'][0]
dens = f['DM/dens'][ic,jc,:]
tau_real = self.get_tau_real(f['DM/dens'][ic,jc,:])
# Peculiar velocity addition
ind = np.where((dens != 0))
vel_pec = np.zeros_like(f['DM/pz'][ic,jc,:])
# Convert momentum to velocity
vel_pec[ind] = f['DM/pz'][ic,jc,:][ind]/dens[ind]
vel_pec = gaussian_filter1d(vel_pec, self.SmLV)
dens = gaussian_filter1d(dens, self.SmLD)
u0 = up + vel_pec
btherm = self.get_btherm(dens)
# To avoide devision by 0, if b_threm == 0, pass a nonzero value since
# tau_real is 0 in that voxel anyway, tau_conv will be 0.
btherm[np.where(btherm==0)] = 1.0
for k in range(Nz):
dvel = np.abs(up[k]-u0)
# Periodic Boundary
indv = np.where(dvel > dvbin*Nz/2)
dvel[indv] = dvbin*Nz - dvel[indv]
Voight = (1/btherm)*np.exp(-(dvel/btherm)**2)
tau_conv[i,j,k] = np.sum(tau_real*Voight*dvbin)
# save the tau_conv on file for density files containing the desired sightlines
with h5py.File(fn.replace('densfield','fgpa_LessMem'), 'w') as fw:
fw['tau_conv'] = tau_conv
fw['indx'] = indx
fw['indy'] = indy
self.comm.Barrier()
if self.rank==0:
# Read the saved tau_conv files
tau_conv = self.add_up_tau_conv(Nz=Nz)
else:
tau_conv = None
self.comm.Barrier()
print('Rank', self.rank, 'is done with tau_conv', flush=True)
return tau_conv
def add_up_tau_conv(self, Nz):
"""Add individual tau_conv files to form the full map"""
tau_conv = -1*np.ones(shape=(self.Ngrids, self.Ngrids, Nz))
tau_files = glob.glob(os.path.join(self.savedir,'*_fgpa_LessMem.hdf5'))
for fn in tau_files:
with h5py.File(fn,'r') as f:
indx = f['indx'][:]
indy = f['indy'][:]
indx, indy = np.meshgrid(indx,indy, indexing='ij')
tau_conv[indx, indy, :] = f['tau_conv'][:]
assert np.all(tau_conv != -1)
return tau_conv
def get_tau_real(self, Delta):
""" Get tau in real space
The amplitude needs to get fixed with mean
observed flux or 1D power
z : redshift
"""
return (self.lambda_Lya*self.sigma_Lya/cosmo.H(self.z).value)*self.get_nHI(Delta)
def get_nHI(self, Delta):
""" Calculate Neutral Hydrogen Density
The amplitude needs to get fixed with mean flux
"""
return Delta**(2-0.7*(self.gamma -1))
def get_btherm(self, Delta):
""" Thermal Doppler parameter in km/s"""
return np.sqrt(2*self.kB*self.get_Temp(Delta)/self.mp)/1000
def get_Temp(self, Delta):
""" Temperature density relation
Delta : (1 + delta_b)
"""
return self.T0*Delta**(self.gamma-1)
|
StarcoderdataPython
|
1652657
|
from flask import Flask
# from config import Config
app = Flask(__name__)
from application import routes
|
StarcoderdataPython
|
9793758
|
<gh_stars>1-10
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from benchmarks import media_router_dialog_metric
from benchmarks import media_router_cpu_memory_metric
from telemetry.page import page_test
class MediaRouterDialogTest(page_test.PageTest):
"""Performs a measurement of Media Route dialog latency."""
def __init__(self):
super(MediaRouterDialogTest, self).__init__()
self._metric = media_router_dialog_metric.MediaRouterDialogMetric()
def DidNavigateToPage(self, page, tab):
self._metric.Start(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
self._metric.Stop(page, tab)
self._metric.AddResults(tab, results)
class MediaRouterCPUMemoryTest(page_test.PageTest):
"""Performs a measurement of Media Route CPU/memory usage."""
def __init__(self):
super(MediaRouterCPUMemoryTest, self).__init__()
self._metric = media_router_cpu_memory_metric.MediaRouterCPUMemoryMetric()
def ValidateAndMeasurePage(self, page, tab, results):
self._metric.AddResults(tab, results)
|
StarcoderdataPython
|
1643201
|
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os.path
MPATH = "44'/77'/"
WIF_PREFIX = 212 # 212 = d4
MAGIC_BYTE = 30
TESTNET_WIF_PREFIX = 239
TESTNET_MAGIC_BYTE = 139
DEFAULT_PROTOCOL_VERSION = 70913
MINIMUM_FEE = 0.0001 # minimum QMC/kB
starting_width = 933
starting_height = 666
APPDATA_DIRNAME = ".QMCTorrentTool"
home_dir = os.path.expanduser('~')
user_dir = os.path.join(home_dir, APPDATA_DIRNAME)
log_File = os.path.join(user_dir, 'lastLogs.html')
masternodes_File = 'masternodes.json'
rpc_File = 'rpcServer.json'
cache_File = 'cache.json'
DEFAULT_RPC_CONF = {
"rpc_ip": "127.0.0.1",
"rpc_port": 55777,
"rpc_user": "myUsername",
"rpc_password": "<PASSWORD>"
}
DEFAULT_MN_CONF = {
"name": "",
"ip": "",
"port": 51472,
"mnPrivKey": "",
"isTestnet": 0,
"isHardware": True,
"hwAcc": 0,
"collateral": {}
}
DEFAULT_CACHE = {
"lastAddress": "",
"window_width": starting_width,
"window_height": starting_height,
"splitter_sizes": [342, 133],
"mnList_order": {},
"useSwiftX": False,
"votingMasternodes": [],
"votingDelayCheck": False,
"votingDelayNeg": 0,
"votingDelayPos": 300
}
|
StarcoderdataPython
|
124186
|
from pathlib import Path
import configparser
from logger import logger
def change_config(**options):
"""takes arbitrary keyword arguments and
writes their values into the config"""
# overwrite values
for k, v in options.items():
config.set('root', k, v)
# write back, but without the mandatory header
config_string = '\n'.join(['{}={}'.format(k, v)
for (k, v) in config['root'].items()])
with open(str(config_path), 'w') as f:
f.write(config_string)
f.write('\n')
def get_config(key):
return config['root'][key]
# load config file for both server.py and fader.py
config = None
config_path = None
try:
config_path = Path(Path(__file__).resolve().parent,
Path('../config')).resolve()
with open(str(config_path), 'r') as f:
config = configparser.RawConfigParser()
config.read_string('[root]\n' + f.read())
if not 'raspberry_port' in config['root']:
# for the port I just went with some random unassigned port from this list:
# https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=Unassigned
change_config(raspberry_port=3546)
if not 'raspberry_ip' in config['root']:
# 0.0.0.0 works if you send requests from another local machine to the raspberry
# 'localhost' would only allow requests from within the raspberry
change_config(raspberry_ip='0.0.0.0')
except FileNotFoundError:
logger.warning(
'config file could not be found! using port {}'.format(raspberry_port))
|
StarcoderdataPython
|
6501686
|
<filename>arachnado/downloadermiddlewares/droprequests.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import warnings
from scrapy.exceptions import IgnoreRequest
class DropRequestsMiddleware:
"""
Downloader middleware to drop a requests if a certain condition is met.
It calls ``spider.should_drop_request(request)`` method to check if a
request should be downloaded or dropped; spider must implement this method.
"""
def __init__(self, stats):
self.stats = stats
@classmethod
def from_crawler(cls, crawler):
return cls(stats=crawler.stats)
def process_request(self, request, spider):
if not hasattr(spider, 'should_drop_request'):
return
if not callable(spider.should_drop_request):
warnings.warn('spider %s has "should_drop_request" attribute, '
'but it is not callable' % spider)
return
if spider.should_drop_request(request):
self.stats.inc_value("DropRequestsMiddleware/dropped")
raise IgnoreRequest()
|
StarcoderdataPython
|
9617432
|
<filename>django_cloud_deploy/cli/prompt.py
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Prompts the user for information e.g. project name."""
import abc
import enum
import functools
import importlib
import os.path
import random
import re
import string
import sys
import time
from typing import Any, Callable, Dict, List, Optional, Tuple
import warnings
from django_cloud_deploy import workflow
from django_cloud_deploy.cli import io
from django_cloud_deploy.cloudlib import auth
from django_cloud_deploy.cloudlib import billing
from django_cloud_deploy.cloudlib import project
from django_cloud_deploy.skeleton import utils
from django_cloud_deploy.utils import webbrowser
from google.oauth2 import service_account
import psycopg2
class Command(enum.Enum):
NEW = 1
UPDATE = 2
CLOUDIFY = 3
def _ask_prompt(question: str,
console: io.IO,
validate: Optional[Callable[[str], None]] = None,
default: Optional[str] = None) -> str:
"""Used to ask for a single string value.
Args:
question: Question shown to the user on the console.
console: Object to use for user I/O.
validate: Function used to check if value provided is valid. It should
raise a ValueError if the the value fails to validate.
default: Default value if user provides no value. (Presses enter) If
default is None, the user must provide an answer that is valid.
Returns:
The value entered by the user.
"""
validate = validate or (lambda x: None)
while True:
answer = console.ask(question)
if default and not answer:
answer = default
try:
validate(answer)
break
except ValueError as e:
console.error(e)
return answer
def _multiple_choice_prompt(question: str,
options: List[str],
console: io.IO,
default: Optional[int] = None) -> Optional[int]:
"""Used to prompt user to choose from a list of values.
Args:
question: Question shown to the user on the console. Should have
a {} to insert a list of enumerated options.
options: Possible values user should choose from.
console: Object to use for user I/O.
default: Default value if user provides no value. (Presses enter) If
default is None the user is forced to choose a value in the
option list.
Typical usage:
# User can press enter if user doesn't want anything.
choice = _multiple_choice_prompt('Choose an option:\n{}\n',
['Chicken', 'Salad', 'Burger'],
console,
default=None)
Returns:
The choice made by the user. If default is none, it is guaranteed to be
an index in the options, else it can possible be the default value.
"""
assert '{}' in question
assert len(options) > 0
options_formatted = [
'{}. {}'.format(str(i), opt) for i, opt in enumerate(options, 1)
]
options = '\n'.join(options_formatted)
while True:
answer = console.ask(question.format(options))
if not answer and default:
return default
try:
_multiple_choice_validate(answer, len(options))
break
except ValueError as e:
console.error(e)
return int(answer) - 1
def _multiple_choice_validate(s: str, len_options: int):
"""Validates the option chosen is valid.
Args:
s: Value to validate.
len_options: Number of possible options for the user.
Raises:
ValueError: If the answer is not valid.
"""
if not s:
raise ValueError('Please enter a value between {} and {}'.format(
1, len_options + 1))
if not str.isnumeric(s):
raise ValueError('Please enter a numeric value')
if 1 <= int(s) <= (len_options + 1):
return
else:
raise ValueError('Please enter a value between {} and {}'.format(
1, len_options + 1))
def binary_prompt(question: str, console: io.IO,
default: Optional[bool] = None) -> bool:
"""Used to prompt user to choose from a yes or no question.
Args:
question: Question shown to the user on the console.
console: Object to use for user I/O.
default: Default value if user provides no value. (Presses enter) If
default is None the user is forced to choose a value (y/n).
Returns:
The bool representation of the choice of the user. Yes is True.
"""
while True:
answer = console.ask(question).lower()
if default is not None and not answer:
return default
try:
_binary_validate(answer)
break
except ValueError as e:
console.error(e)
return answer == 'y'
def _binary_validate(s: str):
"""Ensures value is yes or no.
Args:
s: Value to validate.
"""
if s.lower() not in ['y', 'n']:
raise ValueError('Please respond using "y" or "n"')
return
def _password_prompt(question: str, console: io.IO) -> str:
"""Used to prompt user to choose a password field.
Args:
console: Object to use for user I/O.
question: Question shown to the user on the console.
Returns:
The password provided by the user.
"""
console.tell(question)
while True:
password1 = console.getpass('Password: ')
try:
_password_validate(password1)
except ValueError as e:
console.error(e)
continue
password2 = console.getpass('Password (again): ')
if password1 != password2:
console.error('Passwords do not match, please try again')
continue
return password1
def _password_validate(s):
"""Validates that a string is a valid password.
Args:
s: The string to validate.
Raises:
ValueError: if the input string is not valid.
"""
if len(s) < 6:
raise ValueError('Passwords must be at least 6 characters long')
allowed_characters = frozenset(string.ascii_letters + string.digits +
string.punctuation)
if frozenset(s).issuperset(allowed_characters):
raise ValueError('Invalid character in password: '
'use letters, numbers and punctuation')
return
def _database_username_validate(s):
"""Validates that a string is a valid database user name.
A valid user name should contain 1 to 63 characters. Only numbers, letters
and _ are accepted. It should start with a letter.
Args:
s: The string to validate.
Raises:
ValueError: if the input string is not valid.
"""
if len(s) < 1 or len(s) > 63:
raise ValueError('Database user name must be 1 to 63 characters long')
if s[0] not in string.ascii_letters:
raise ValueError('Database user name must start with a letter')
allowed_characters = frozenset(string.ascii_letters + string.digits + '_')
if frozenset(s).issuperset(allowed_characters):
raise ValueError('Invalid character in database user name. Only '
'numbers, letters, and _ are acceptable.')
class Prompt(abc.ABC):
@abc.abstractmethod
def prompt(self, console: io.IO, step: str,
args: Dict[str, Any]) -> Dict[str, Any]:
"""Prompts the user if the required argument isn't already present.
Args:
console: Object to use for user I/O.
step: Message to present to user regarding what step they are on.
e.g. "[1 of 12]"
args: Dictionary holding the results of previous prompts and
command-line arguments.
Returns:
A copy of args plus new argument provided by this prompt.
"""
pass
@abc.abstractmethod
def _is_valid_passed_arg(self, console: io.IO, step: str,
value: Optional[str],
validate: Callable[[str], None]) -> bool:
"""Used to validate if the user passed in a parameter as a flag.
All prompts that retrieve a parameter should call this function first.
This allows for passed in paramters via flags be considered as a step.
This is used to have a hard coded amount of steps that is easier to
manage.
Returns:
A boolean indicating if the passed in argument is valid.
"""
pass
class TemplatePrompt(Prompt):
"""Base template for all parameter prompts interacting with the user.
They must have a prompt method that calls one of the _x_prompt functions.
They must own only one parameter.
They should have a validate function.
They should call _is_valid_passed_arg at the beggining of prompt method.
"""
# Parameter must be set for dictionary key
PARAMETER = None
MESSAGE = ''
MESSAGE_DEFAULT = 'or leave blank to use [{}]: '
def prompt(self, console: io.IO, step: str,
args: Dict[str, Any]) -> Dict[str, Any]:
"""Extracts user arguments through the command-line.
Args:
console: Object to use for user I/O.
step: Message to present to user regarding what step they are on.
args: Dictionary holding prompts answered by user and set up
command-line arguments.
Returns: A Copy of args + the new parameter collected.
"""
pass
def _is_valid_passed_arg(self, console: io.IO, step: str,
value: Optional[str],
validate: Callable[[str], None]) -> bool:
"""Checks if the passed in argument via the command line is valid.
All prompts that collect a parameter should call this function first.
It uses the validate function of the prompt. The code also
will process a passed in paramater as a step. This is used to have a
static amount of steps that is easier to manage.
Returns:
A boolean indicating if the passed in argument is valid.
"""
if value is None:
return False
try:
validate(value)
except ValueError as e:
console.error(e)
quit()
msg = '{}: {}'.format(self.MESSAGE.format(step), value)
console.tell(msg)
return True
class StringTemplatePrompt(TemplatePrompt):
"""Template for a simple string Prompt.
Any prompt that only needs to ask the user for a value without additional
branching or logic should derive from this class. Classes inheriting from
this should set the variables below.
"""
# The key used for the args dictionary, eg. project_id
PARAMETER = ''
# Value user can use if they press enter on the command line, eg django-1234
DEFAULT_VALUE = ''
# Message to prompt the user on the command-line, eg. Please choose a
# project id.
MESSAGE = ''
def prompt(self, console: io.IO, step: str,
args: Dict[str, Any]) -> Dict[str, Any]:
"""Extracts user arguments through the command-line.
Args:
console: Object to use for user I/O.
step: Message to present to user regarding what step they are on.
args: Dictionary holding prompts answered by user and set up
command-line arguments.
Returns: A Copy of args + the new parameter collected.
"""
new_args = dict(args)
if self._is_valid_passed_arg(console, step,
args.get(self.PARAMETER, None),
self._validate):
return new_args
base_message = self.MESSAGE.format(step)
if self.DEFAULT_VALUE:
default_message = self.MESSAGE_DEFAULT.format(self.DEFAULT_VALUE)
msg = '\n'.join([base_message, default_message])
else:
msg = base_message
answer = _ask_prompt(msg,
console,
self._validate,
default=self.DEFAULT_VALUE)
new_args[self.PARAMETER] = answer
return new_args
class GoogleProjectName(TemplatePrompt):
PARAMETER = 'project_name'
MESSAGE = '{} Enter a Google Cloud Platform project name'
def __init__(self, project_client: project.ProjectClient):
self.project_client = project_client
def _validate(self, project_id: str,
project_creation_mode: workflow.ProjectCreationMode, s: str):
"""Returns the method that validates the string.
Args:
project_id: Used to retrieve name when project already exists.
project_creation_mode: Used to check if project already exists.
s: The string to validate
"""
if not (4 <= len(s) <= 30):
raise ValueError(
('Invalid Google Cloud Platform project name "{}": '
'must be between 4 and 30 characters').format(s))
if self._is_new_project(project_creation_mode):
return
assert project_id is not None
project_name = self.project_client.get_project(project_id)['name']
if project_name != s:
raise ValueError('Wrong project name given for project id.')
def _handle_new_project(self, console: io.IO, step: str, args: [str, Any]):
default_answer = 'Django Project'
msg_base = self.MESSAGE.format(step)
msg_default = self.MESSAGE_DEFAULT.format(default_answer)
msg = '\n'.join([msg_base, msg_default])
project_id = args.get('project_id', None)
mode = args.get('project_creation_mode', None)
validate = functools.partial(self._validate, project_id, mode)
return _ask_prompt(msg, console, validate, default=default_answer)
def _is_new_project(self,
project_creation_mode: workflow.ProjectCreationMode
) -> bool:
must_exist = workflow.ProjectCreationMode.MUST_EXIST
return project_creation_mode != must_exist
def _handle_existing_project(self, console: io.IO, step: str,
args: Dict[str, Any]) -> str:
assert 'project_id' in args, 'project_id must be set'
project_id = args['project_id']
project_name = self.project_client.get_project(project_id)['name']
message = '{} {}: {}'.format(step, self.PARAMETER, project_name)
console.tell(message)
return project_name
def prompt(self, console: io.IO, step: str,
args: Dict[str, Any]) -> Dict[str, Any]:
"""Extracts user arguments through the command-line.
Args:
console: Object to use for user I/O.
step: Message to present to user regarding what step they are on.
args: Dictionary holding prompts answered by user and set up
command-line arguments.
Returns: A Copy of args + the new parameter collected.
"""
new_args = dict(args)
project_id = args.get('project_id', None)
mode = args.get('project_creation_mode', None)
validate = functools.partial(self._validate, project_id, mode)
if self._is_valid_passed_arg(console, step,
args.get(self.PARAMETER, None), validate):
return new_args
project_creation_mode = args.get('project_creation_mode', None)
if self._is_new_project(project_creation_mode):
new_args[self.PARAMETER] = self._handle_new_project(
console, step, args)
else:
new_args[self.PARAMETER] = self._handle_existing_project(
console, step, args)
return new_args
class GoogleNewProjectId(TemplatePrompt):
"""Handles Project ID for new projects."""
PARAMETER = 'project_id'
MESSAGE = '{} Enter a Google Cloud Platform Project id'
def _validate(self, s: str):
"""Validates that a string is a valid project id.
Args:
s: The string to validate.
Raises:
ValueError: if the input string is not valid.
"""
if not re.match(r'[a-z][a-z0-9\-]{5,29}', s):
raise ValueError(('Invalid Google Cloud Platform Project ID "{}": '
'must be between 6 and 30 characters and contain '
'lowercase letters, digits or hyphens').format(s))
def _generate_default_project_id(self, project_name=None):
default_project_id = (project_name or 'django').lower()
default_project_id = default_project_id.replace(' ', '-')
if default_project_id[0] not in string.ascii_lowercase:
default_project_id = 'django-' + default_project_id
default_project_id = re.sub(r'[^a-z0-9\-]', '', default_project_id)
return '{0}-{1}'.format(default_project_id[0:30 - 6 - 1],
random.randint(100000, 1000000))
def prompt(self, console: io.IO, step: str,
args: Dict[str, Any]) -> Dict[str, Any]:
"""Extracts user arguments through the command-line.
Args:
console: Object to use for user I/O.
step: Message to present to user regarding what step they are on.
args: Dictionary holding prompts answered by user and set up
command-line arguments.
Returns: A Copy of args + the new parameter collected.
"""
new_args = dict(args)
if self._is_valid_passed_arg(console, step,
args.get(self.PARAMETER, None),
self._validate):
return new_args
project_name = args.get('project_name', None)
default_answer = self._generate_default_project_id(project_name)
msg_base = self.MESSAGE.format(step)
msg_default = self.MESSAGE_DEFAULT.format(default_answer)
msg = '\n'.join([msg_base, msg_default])
answer = _ask_prompt(msg,
console,
self._validate,
default=default_answer)
new_args[self.PARAMETER] = answer
return new_args
class GoogleProjectId(TemplatePrompt):
"""Logic that handles fork between Existing and New Projects."""
PARAMETER = 'project_id'
def __init__(self, project_client: project.ProjectClient,
active_account: str):
self.project_client = project_client
self.active_account = active_account
def prompt(self, console: io.IO, step: str,
args: Dict[str, Any]) -> Dict[str, Any]:
"""Extracts user arguments through the command-line.
Args:
console: Object to use for user I/O.
step: Message to present to user regarding what step they are on.
args: Dictionary holding prompts answered by user and set up
command-line arguments.
Returns: A Copy of args + the new parameter collected.
"""
prompter = GoogleNewProjectId()
if args.get('use_existing_project', False):
prompter = GoogleExistingProjectId(self.project_client,
self.active_account)
return prompter.prompt(console, step, args)
class GoogleExistingProjectId(TemplatePrompt):
"""Handles Project ID for existing projects."""
PARAMETER = 'project_id'
MESSAGE = '{} Enter the <b>existing</b> Google Cloud Platform Project ID'
def __init__(self, project_client: project.ProjectClient,
active_account: str):
self.project_client = project_client
self.active_account = active_account
def prompt(self, console: io.IO, step: str,
args: Dict[str, Any]) -> Dict[str, Any]:
"""Prompt the user to a Google Cloud Platform project id.
If the user supplies the project_id as a flag we want to validate that
it exists. We tell the user to supply a new one if it does not.
Returns: A Copy of args + the new parameter collected.
"""
new_args = dict(args)
backend = args.get('backend')
validate = functools.partial(self._validate, backend,
self.active_account)
if self._is_valid_passed_arg(console, step,
args.get(self.PARAMETER, None), validate):
return new_args
msg = '{}: '.format(self.MESSAGE.format(step))
answer = _ask_prompt(msg, console, validate)
new_args[self.PARAMETER] = answer
return new_args
def _validate(self, backend: str, active_account: str, project_id: str):
"""Validates that a string is a valid project id.
Args:
backend: The backend that will be used to host the app.
active_account: Account that is logged in on gcloud cli.
project_id: Id of the Google Project.
Raises:
ValueError: if the input string is not valid.
"""
if not re.match(r'[a-z][a-z0-9\-]{5,29}', project_id):
raise ValueError(
('Invalid Google Cloud Platform Project ID "{}": '
'must be between 6 and 30 characters and contain '
'lowercase letters, digits or hyphens').format(project_id))
if not self.project_client.project_exists(project_id):
raise ValueError('Project {} does not exist'.format(project_id))
if not self._has_correct_permissions(backend, project_id,
active_account):
msg = 'User has incorrect permissions to deploy.'
if backend == 'gae':
msg = 'User must be a Project Owner to deploy on GAE'
elif backend == 'gke':
msg = ('User does not have correct permissions'
'to deploy on GKE')
raise ValueError(msg)
def _has_correct_permissions(self, backend: str, project_id: str,
active_account: str):
"""Validates that the user has the permissions to deploy onto project.
Args:
project_id: Id of the existing project.
backend: The backend that will be used to host the app.
active_account: Account that is logged in on gcloud cli.
"""
# The user must have logged in
assert active_account not in ['', None], "User must log in via gcloud"
permissions = self.project_client.get_project_permissions(project_id)
owner_permission = list(
filter(lambda d: d.get('role') == 'roles/owner', permissions))
editor_permission = list(
filter(lambda d: d.get('role') == 'roles/editor', permissions))
owners = []
editors = []
if owner_permission:
owners = owner_permission[0].get('members', [])
if editor_permission:
editors = editor_permission[0].get('members', [])
# Regex will catch user:<EMAIL>
# and serviceAccount:<EMAIL>
# which are currently the only cases
active_account = re.escape(active_account)
active_account = r'\w+:{}'.format(active_account)
if re.search(active_account, ' '.join(owners)):
return True
if backend == 'gae': # User needs to be in owner to deploy in GAE.
return False
if re.search(active_account, ' '.join(editors)):
return True
return False
class CredentialsPrompt(TemplatePrompt):
PARAMETER = 'credentials'
def __init__(self, auth_client: auth.AuthClient):
self.auth_client = auth_client
def prompt(self, console: io.IO, step: str,
args: Dict[str, Any]) -> Dict[str, Any]:
"""Extracts user arguments through the command-line.
Args:
console: Object to use for user I/O.
step: Message to present to user regarding what step they are on.
args: Dictionary holding prompts answered by user and set up
command-line arguments.
Returns: A Copy of args + the new parameter collected.
"""
new_args = dict(args)
if self._is_valid_passed_arg(console, step,
args.get(self.PARAMETER), lambda x: x):
return new_args
if args.get('credentials_path'):
credentials_path = args.get('credentials_path')
try:
credentials = (
service_account.Credentials.from_service_account_file(
credentials_path,
scopes=[
'https://www.googleapis.com/auth/cloud-platform'
]))
new_args['credentials'] = credentials
return new_args
except Exception as e:
warnings.warn(
('File "{}" does not exist or is not a valid credentials '
'file.\n{}'.format(credentials_path, e)))
console.tell(
('{} In order to deploy your application, you must allow Django '
'Deploy to access your Google account.').format(step))
create_new_credentials = True
active_account = self.auth_client.get_active_account()
if active_account: # The user has already logged in before
msg = ('You have logged in with account [{}]. Do you want to '
'use it? [Y/n]: ').format(active_account)
use_active_credentials = binary_prompt(msg, console, default=True)
create_new_credentials = not use_active_credentials
if create_new_credentials:
creds = self.auth_client.create_default_credentials()
else:
creds = self.auth_client.get_default_credentials()
if not creds:
console.ask(
('Warning: You are using a service account to authenticate '
'gcloud, or your credentials file is missing, or it '
'exists but does not have the correct format. Press Enter '
'to create a new credential.'))
creds = self.auth_client.create_default_credentials()
new_args[self.PARAMETER] = creds
return new_args
class BillingPrompt(TemplatePrompt):
"""Allow the user to select a billing account to use for deployment."""
PARAMETER = 'billing_account_name'
MESSAGE = '{} Enter a billing account'
def __init__(self, billing_client: billing.BillingClient = None):
self.billing_client = billing_client
def _get_new_billing_account(self, console,
existing_billing_accounts: List[Dict[str, Any]]
) -> str:
"""Ask the user to create a new billing account and return name of it.
Args:
existing_billing_accounts: User's billing accounts before creation
of new accounts.
Returns:
Name of the user's newly created billing account.
"""
webbrowser.open_url('https://console.cloud.google.com/billing/create')
existing_billing_account_names = [
account['name'] for account in existing_billing_accounts
]
console.tell('Waiting for billing account to be created.')
while True:
billing_accounts = self.billing_client.list_billing_accounts(
only_open_accounts=True)
if len(existing_billing_accounts) != len(billing_accounts):
billing_account_names = [
account['name'] for account in billing_accounts
]
diff = list(
set(billing_account_names) -
set(existing_billing_account_names))
return diff[0]
time.sleep(2)
def _does_project_exist(
self, project_creation_mode: Optional[workflow.ProjectCreationMode]
) -> bool:
must_exist = workflow.ProjectCreationMode.MUST_EXIST
return project_creation_mode == must_exist
def _has_existing_billing_account(self, console: io.IO, step: str,
args: Dict[str, Any]) -> (Optional[str]):
assert 'project_id' in args, 'project_id must be set'
project_id = args['project_id']
billing_account = (self.billing_client.get_billing_account(project_id))
if not billing_account.get('billingEnabled', False):
return None
msg = ('{} Billing is already enabled on this project.'.format(step))
console.tell(msg)
return billing_account.get('billingAccountName')
def _handle_existing_billing_accounts(self, console, billing_accounts):
question = ('You have the following existing billing accounts:\n{}\n'
'Please enter your numeric choice or press [Enter] to '
'create a new billing account: ')
options = [info['displayName'] for info in billing_accounts]
new_billing_account = -1
answer = _multiple_choice_prompt(question, options, console,
new_billing_account)
if answer == new_billing_account:
return self._get_new_billing_account(console, billing_accounts)
val = billing_accounts[answer]['name']
return val
def prompt(self, console: io.IO, step: str,
args: Dict[str, Any]) -> Dict[str, Any]:
"""Extracts user arguments through the command-line.
Args:
console: Object to use for user I/O.
step: Message to present to user regarding what step they are on.
args: Dictionary holding prompts answered by user and set up
command-line arguments.
Returns: A Copy of args + the new parameter collected.
"""
new_args = dict(args)
if self._is_valid_passed_arg(console, step, args.get(self.PARAMETER),
self._validate):
return new_args
project_creation_mode = args.get('project_creation_mode')
if self._does_project_exist(project_creation_mode):
billing_account = self._has_existing_billing_account(
console, step, args)
if billing_account is not None:
new_args[self.PARAMETER] = billing_account
return new_args
billing_accounts = self.billing_client.list_billing_accounts(
only_open_accounts=True)
console.tell(
('{} In order to deploy your application, you must enable billing '
'for your Google Cloud Project.').format(step))
# If the user has existing billing accounts, we let the user pick one
if billing_accounts:
val = self._handle_existing_billing_accounts(
console, billing_accounts)
new_args[self.PARAMETER] = val
return new_args
# If the user does not have existing billing accounts, we direct
# the user to create a new one.
console.tell('You do not have existing billing accounts.')
console.ask('Press [Enter] to create a new billing account.')
val = self._get_new_billing_account(console, billing_accounts)
new_args[self.PARAMETER] = val
return new_args
def _validate(self, s):
"""Validates that a string is a valid billing account.
Args:
s: The string to validate.
Raises:
ValueError: if the input string is not valid.
"""
billing_accounts = self.billing_client.list_billing_accounts(
only_open_accounts=True)
billing_account_names = [
account['name'] for account in billing_accounts
]
if s not in billing_account_names:
raise ValueError(
'The provided billing account does not exist or is not eligible to use.'
)
class GroupingPrompt(TemplatePrompt):
"""A prompt which groups other prompts."""
def parse_step_info(self, step: str) -> Tuple[str]:
"""Get the current step and total steps from the given step string.
Step string should look like "<b>[2/12]</b>"
Args:
step: A string represents a step.
Returns:
The tuple (current_step, total_step)
"""
step_info = re.findall(r'\[[^\[\]]+\]', step)[0][1:-1].split('/')
return step_info[0], step_info[1]
class NewDatabaseInformationPrompt(GroupingPrompt):
"""Allow the user to enter the information about the database."""
def prompt(self, console: io.IO, step: str,
args: Dict[str, Any]) -> Dict[str, Any]:
"""Extracts user arguments through the command-line.
Args:
console: Object to use for user I/O.
step: Message to present to user regarding what step they are on.
args: Dictionary holding prompts answered by user and set up
command-line arguments.
Returns:
A Copy of args + the new parameter collected.
"""
new_args = dict(args)
database_arguments = self._try_get_database_arguments(new_args)
if self._is_valid_passed_arg(console, step, database_arguments,
self._validate):
return new_args
current_step, total_steps = self.parse_step_info(step)
msg = '[{}.a/{}] Enter the master user name for the database: '.format(
current_step, total_steps)
username = _ask_prompt(msg, console, _database_username_validate)
msg = '[{}.b/{}] Enter a password for the database user "{}"'.format(
current_step, total_steps, username)
password = _password_prompt(msg, console)
new_args['is_new_database'] = True
new_args['database_username'] = username
new_args['database_password'] = password
return new_args
def _try_get_database_arguments(self, args: Dict[str, Any]
) -> Optional[Dict[str, Any]]:
try:
return {
'password': args['database_password'],
'database': args['database_name'],
}
except KeyError:
return None
def _validate(self, database_arguments: Dict[str, Any]):
user = database_arguments.get('database_username')
password = database_arguments.get('database_password')
_database_username_validate(user)
_password_validate(password)
class ExistingDatabaseInformationPrompt(GroupingPrompt):
"""Allow the user to enter the information about an existing database."""
def prompt(self, console: io.IO, step: str,
args: Dict[str, Any]) -> Dict[str, Any]:
"""Extracts user arguments through the command-line.
Args:
console: Object to use for user I/O.
step: Message to present to user regarding what step they are on.
args: Dictionary holding prompts answered by user and set up
command-line arguments.
Returns:
A Copy of args + the new parameter collected.
"""
new_args = dict(args)
database_arguments = self._try_get_database_arguments(new_args)
if self._is_valid_passed_arg(console, step, database_arguments,
self._validate):
return new_args
current_step, total_steps = self.parse_step_info(step)
while True:
msg = ('[{}.a/{}] Enter the public ip or host name of your '
'database: ').format(current_step, total_steps)
host = _ask_prompt(msg, console)
default_port = 5432
msg = ('[{}.b/{}] Enter the port number of your database or '
'press Enter to use "{}":').format(current_step, total_steps,
default_port)
port = _ask_prompt(msg, console, default=default_port)
msg = ('[{}.c/{}] Enter the master user name for the '
'database: ').format(current_step, total_steps)
username = _ask_prompt(msg, console, _database_username_validate)
msg = '[{}.d/{}] Enter password for the database user "{}"'.format(
current_step, total_steps, username)
password = _password_prompt(msg, console)
msg = '[{}.e/{}] Enter the name of your database: '.format(
current_step, total_steps)
database_name = _ask_prompt(msg, console)
new_args['is_new_database'] = False
new_args['database_username'] = username
new_args['database_password'] = password
new_args['database_host'] = host
new_args['database_port'] = int(port)
new_args['database_name'] = database_name
try:
self._validate(new_args)
break
except ValueError as e:
console.error(e)
return new_args
def _try_get_database_arguments(self, args: Dict[str, Any]
) -> Optional[Dict[str, Any]]:
try:
return {
'host': args['database_host'],
'port': args['database_port'],
'user': args['database_username'],
'password': args['database_password'],
'database': args['database_name'],
}
except KeyError:
return None
def _validate(self, database_arguments: Dict[str, Any]):
"""Validate the given database info by connecting to it.
Args:
database_arguments: Dictionary holding the information of database.
Raises:
ValueError: If failed to connect to the provided database.
"""
host = database_arguments.get('database_host')
port = database_arguments.get('database_port')
user = database_arguments.get('database_username')
password = database_arguments.get('database_password')
database = database_arguments.get('database_name')
conn = None
try:
conn = psycopg2.connect(host=host,
database=database,
user=user,
password=password,
port=port)
except psycopg2.Error as e:
raise ValueError(
('Error: Failed to connect to the provided database.\n'
'{}').format(e))
finally:
if conn:
conn.close()
class DatabasePrompt(TemplatePrompt):
"""Allow the user to enter the information about the db for deployment."""
PARAMETER = 'database_information'
def prompt(self, console: io.IO, step: str,
args: Dict[str, Any]) -> Dict[str, Any]:
"""Extracts user arguments through the command-line.
Args:
console: Object to use for user I/O.
step: Message to present to user regarding what step they are on.
args: Dictionary holding prompts answered by user and set up
command-line arguments.
Returns:
A Copy of args + the new parameter collected.
"""
new_args = dict(args)
msg = ('{} Do you want to create a new database or use an existing '
'database for deployment? [y/N]: ').format(step)
use_existing_database = binary_prompt(msg, console, default=False)
if use_existing_database:
return ExistingDatabaseInformationPrompt().prompt(
console, step, new_args)
else:
return NewDatabaseInformationPrompt().prompt(
console, step, new_args)
class PostgresPasswordPrompt(TemplatePrompt):
"""Allow the user to enter a Django Postgres password."""
PARAMETER = 'database_password'
MESSAGE = '{} Enter a password for the default database user "postgres"'
def prompt(self, console: io.IO, step: str,
args: Dict[str, Any]) -> Dict[str, Any]:
"""Extracts user arguments through the command-line.
Args:
console: Object to use for user I/O.
step: Message to present to user regarding what step they are on.
args: Dictionary holding prompts answered by user and set up
command-line arguments.
Returns: A Copy of args + the new parameter collected.
"""
new_args = dict(args)
if self._is_valid_passed_arg(console, step, args.get(self.PARAMETER),
self._validate):
return new_args
password = _password_prompt(self.MESSAGE.format(step), console)
new_args[self.PARAMETER] = password
return new_args
def _validate(self, s: str):
_password_validate(s)
class DjangoFilesystemPath(TemplatePrompt):
"""Allow the user to indicate the file system path for their project."""
PARAMETER = 'django_directory_path'
MESSAGE = '{} Enter a new directory path to store project source'
def _ask_to_replace(self, console, directory):
msg = (('The directory \'{}\' already exists, '
'replace it\'s contents [y/N]: ').format(directory))
return binary_prompt(msg, console, default=False)
def _ask_for_directory(self, console, step, args) -> str:
home_dir = os.path.expanduser('~')
# TODO: Remove filesystem-unsafe characters. Implement a validation
# method that checks for these.
default_dir = os.path.join(
home_dir,
args.get('project_name',
'django-project').lower().replace(' ', '-'))
msg_base = self.MESSAGE.format(step)
msg_default = self.MESSAGE_DEFAULT.format(default_dir)
msg = '\n'.join([msg_base, msg_default])
return _ask_prompt(msg, console, default=default_dir)
def prompt(self, console: io.IO, step: str,
args: Dict[str, Any]) -> Dict[str, Any]:
"""Extracts user arguments through the command-line.
Args:
console: Object to use for user I/O.
step: Message to present to user regarding what step they are on.
args: Dictionary holding prompts answered by user and set up
command-line arguments.
Returns: A Copy of args + the new parameter collected.
"""
new_args = dict(args)
if self._is_valid_passed_arg(console, step,
args.get(self.PARAMETER), lambda x: x):
return new_args
while True:
directory = self._ask_for_directory(console, step, args)
if os.path.exists(directory):
replace = self._ask_to_replace(console, directory)
if not replace:
continue
break
new_args[self.PARAMETER] = directory
return new_args
class DjangoFilesystemPathUpdate(StringTemplatePrompt):
"""Allow the user to indicate the file system path for their project."""
PARAMETER = 'django_directory_path_update'
MESSAGE = '{} Enter the django project directory path'
DEFAULT_VALUE = os.path.abspath(os.path.expanduser('.'))
def _validate(self, s: str):
"""Validates that a string is a valid Django project path.
Args:
s: The string to validate.
Raises:
ValueError: if the input string is not valid.
"""
if not os.path.exists(s):
raise ValueError(('Path ["{}"] does not exist.').format(s))
if not utils.is_valid_django_project(s):
raise ValueError(
('Path ["{}"] does not contain a valid Django project.'
).format(s))
class DjangoFilesystemPathCloudify(StringTemplatePrompt):
"""Allow the user to indicate the file system path for their project."""
PARAMETER = 'django_directory_path_cloudify'
MESSAGE = ('{} Enter the directory of the Django project you want to '
'deploy')
DEFAULT_VALUE = os.path.abspath(os.path.expanduser('.'))
def _validate(self, s: str):
"""Validates that a string is a valid Django project path.
Args:
s: The string to validate.
Raises:
ValueError: if the input string is not valid.
"""
if not os.path.exists(s):
raise ValueError(('Path ["{}"] does not exist.').format(s))
if not utils.is_valid_django_project(s):
raise ValueError(
('Path ["{}"] does not contain a valid Django project.'
).format(s))
class DjangoProjectNamePrompt(StringTemplatePrompt):
"""Allow the user to enter a Django project name."""
PARAMETER = 'django_project_name'
MESSAGE = '{} Enter a Django project name'
DEFAULT_VALUE = 'mysite'
def _validate(self, s: str):
"""Validates that a string is a valid Django project name.
Args:
s: The string to validate.
Raises:
ValueError: if the input string is not valid.
"""
if not s.isidentifier():
raise ValueError(('Invalid Django project name "{}": '
'must be a valid Python identifier').format(s))
class DjangoProjectNamePromptCloudify(TemplatePrompt):
"""Allow the user to enter a Django project name.
The prompt will try guessing the Django project name first. If it failed,
ask the user to provide it.
"""
PARAMETER = 'django_project_name_cloudify'
MESSAGE = '{} Enter the Django project name'
def prompt(self, console: io.IO, step: str,
args: Dict[str, Any]) -> Dict[str, Any]:
"""Extracts user arguments through the command-line.
Args:
console: Object to use for user I/O.
step: Message to present to user regarding what step they are on.
args: Dictionary holding prompts answered by user and set up
command-line arguments.
Returns:
A Copy of args + the new parameter collected.
"""
assert 'django_directory_path_cloudify' in args, (
'The absolute path of Django project must be provided')
new_args = dict(args)
django_directory_path = args['django_directory_path_cloudify']
django_project_name = utils.get_django_project_name(
django_directory_path)
new_args[self.PARAMETER] = django_project_name
if self._is_valid_passed_arg(console, step,
new_args.get(self.PARAMETER, None),
self._validate):
return new_args
msg = self.MESSAGE.format(step)
answer = _ask_prompt(msg, console, self._validate)
new_args[self.PARAMETER] = answer
return new_args
def _validate(self, s: str):
"""Validates that a string is a valid Django project name.
Args:
s: The string to validate.
Raises:
ValueError: if the input string is not valid.
"""
if not s.isidentifier():
raise ValueError(('Invalid Django project name "{}": '
'must be a valid Python identifier').format(s))
class DjangoAppNamePrompt(TemplatePrompt):
"""Allow the user to enter a Django project name.
The reason we dont use StringTemplatePrompt is due to the fact we need
to set up the validate function with the extra argument.
"""
PARAMETER = 'django_app_name'
MESSAGE = '{} Enter a Django app name'
DEFAULT_VALUE = 'home'
def prompt(self, console: io.IO, step: str,
args: Dict[str, Any]) -> Dict[str, Any]:
django_project_name = args.get('django_project_name', None)
validate = functools.partial(self._validate, django_project_name)
new_args = dict(args)
if self._is_valid_passed_arg(console, step,
args.get(self.PARAMETER, None), validate):
return new_args
base_message = self.MESSAGE.format(step)
default_message = self.MESSAGE_DEFAULT.format(self.DEFAULT_VALUE)
msg = '\n'.join([base_message, default_message])
answer = _ask_prompt(msg, console, validate, default=self.DEFAULT_VALUE)
new_args[self.PARAMETER] = answer
return new_args
def _validate(self, django_project_name: str, s: str):
"""Validates that a string is a valid Django project name.
Args:
s: The string to validate.
django_project_name: Project name must be different from app name.
Raises:
ValueError: if the input string is not valid.
"""
if not s.isidentifier():
raise ValueError(('Invalid Django app name "{}": '
'must be a valid Python identifier').format(s))
if django_project_name == s:
raise ValueError(
('Invalid Django project name "{}": '
'must be different than Django project name').format(s))
class DjangoSuperuserLoginPrompt(StringTemplatePrompt):
"""Allow the user to enter a Django superuser login."""
PARAMETER = 'django_superuser_login'
MESSAGE = '{} Enter a Django superuser login name'
DEFAULT_VALUE = 'admin'
def _validate(self, s: str):
"""Validates that a string is a valid Django superuser login.
Args:
s: The string to validate.
Raises:
ValueError: if the input string is not valid.
"""
if not s.isalnum():
raise ValueError(('Invalid Django superuser login "{}": '
'must be a alpha numeric').format(s))
class DjangoSuperuserPasswordPrompt(TemplatePrompt):
"""Allow the user to enter a password for the Django superuser."""
PARAMETER = 'django_superuser_password'
MESSAGE = '{} Enter a password for the Django superuser'
def _get_prompt_message(self, arguments: Dict[str, Any]) -> str:
if 'django_superuser_login' in arguments:
return 'Enter a password for the Django superuser "{}"'.format(
arguments['django_superuser_login'])
else:
return 'Enter a password for the Django superuser'
def prompt(self, console: io.IO, step: str,
args: Dict[str, Any]) -> Dict[str, Any]:
"""Extracts user arguments through the command-line.
Args:
console: Object to use for user I/O.
step: Message to present to user regarding what step they are on.
args: Dictionary holding prompts answered by user and set up
command-line arguments.
Returns: A Copy of args + the new parameter collected.
"""
new_args = dict(args)
if self._is_valid_passed_arg(console, step, args.get(self.PARAMETER),
self._validate):
return new_args
msg = self._get_prompt_message(args)
question = '{} {}'.format(step, msg)
answer = _password_prompt(question, console)
new_args[self.PARAMETER] = answer
return new_args
def _validate(self, s: str):
return _password_validate(s)
class DjangoSuperuserEmailPrompt(StringTemplatePrompt):
"""Allow the user to enter a Django email address."""
PARAMETER = 'django_superuser_email'
MESSAGE = ('{} Enter an email adress for the Django superuser')
DEFAULT_VALUE = '<EMAIL>'
def _validate(self, s: str):
"""Validates that a string is a valid Django superuser email address.
Args:
s: The string to validate.
Raises:
ValueError: if the input string is not valid.
"""
if not re.match(r'[^@]+@[^@]+\.[^@]+', s):
raise ValueError(('Invalid Django superuser email address "{}": '
'the format should be like '
'"<EMAIL>"').format(s))
class DjangoSettingsPathPrompt(StringTemplatePrompt):
"""Allow the user to enter the settings file path of a Django project.
When deploying existing Django projects, sometimes settings file of the
Django project is not at the default location. For example, by default a
settings file should be at <project_name>/settings.py, but actually the
Django project is using <project_name>/settings/prod.py. It is hard to
automatically detect the location of settings file. We need to ask user to
provide the accurate settings file path.
"""
PARAMETER = 'django_settings_path'
MESSAGE = ('{} Enter the path of the Django settings file that should be '
'used for deployment: ')
def prompt(self, console: io.IO, step: str,
args: Dict[str, Any]) -> Dict[str, Any]:
"""Extracts user arguments through the command-line.
Args:
console: Object to use for user I/O.
step: Message to present to user regarding what step they are on.
args: Dictionary holding prompts answered by user and set up
command-line arguments.
Returns:
A Copy of args + the new parameter collected.
"""
new_args = dict(args)
django_directory_path = args.get('django_directory_path_cloudify', None)
validate = functools.partial(self._validate, django_directory_path)
if self._is_valid_passed_arg(console, step, args.get(self.PARAMETER),
validate):
return new_args
base_message = self.MESSAGE.format(step)
default_settings_path = utils.guess_settings_path(django_directory_path)
if default_settings_path:
default_message = '[{}]: '.format(default_settings_path)
msg = '\n'.join([base_message, default_message])
else:
msg = base_message
answer = _ask_prompt(msg,
console,
validate,
default=default_settings_path)
new_args[self.PARAMETER] = answer
return new_args
def _validate(self, project_dir: str, django_settings_path: str):
"""Validates that a string is a valid Django settings file path.
Args:
project_dir: Absolute path of the existing Django project.
django_settings_path: The string to validate.
Raises:
ValueError: if the input string is not a valid settings file path.
"""
if not os.path.exists(django_settings_path):
raise ValueError(
'Path ["{}"] does not exist.'.format(django_settings_path))
root, ext = os.path.splitext(django_settings_path)
if ext != '.py':
raise ValueError(
'["{}"] is not a .py file.'.format(django_settings_path))
module_relative_path = os.path.relpath(root, project_dir)
module_name = module_relative_path.replace('/', '.')
sys.path.append(project_dir)
spec = importlib.util.spec_from_file_location(module_name,
django_settings_path)
module = importlib.util.module_from_spec(spec)
try:
spec.loader.exec_module(module)
except Exception as e:
raise ValueError(('Not able to load Django project settings module '
'on {}'.format(django_settings_path))) from e
finally:
sys.path.pop()
class DjangoRequirementsPathPrompt(StringTemplatePrompt):
"""Allow the user to enter the path of requirements.txt of Django project."""
PARAMETER = 'django_requirements_path'
MESSAGE = '{} Enter the path of the requirements.txt'
def prompt(self, console: io.IO, step: str,
args: Dict[str, Any]) -> Dict[str, Any]:
"""Extracts user arguments through the command-line.
Args:
console: Object to use for user I/O.
step: Message to present to user regarding what step they are on.
args: Dictionary holding prompts answered by user and set up
command-line arguments.
Returns:
A Copy of args + the new parameter collected.
"""
new_args = dict(args)
if self._is_valid_passed_arg(console, step, args.get(self.PARAMETER),
self._validate):
return new_args
base_message = self.MESSAGE.format(step)
django_directory_path = args.get('django_directory_path_cloudify', None)
django_project_name = utils.get_django_project_name(
django_directory_path)
default_requirements_path = utils.guess_requirements_path(
django_directory_path, django_project_name)
new_args[self.PARAMETER] = default_requirements_path
if self._is_valid_passed_arg(console, step, args.get(self.PARAMETER),
self._validate):
return new_args
if default_requirements_path:
default_message = '[{}]: '.format(default_requirements_path)
msg = '\n'.join([base_message, default_message])
else:
msg = base_message
answer = _ask_prompt(msg,
console,
self._validate,
default=default_requirements_path)
new_args[self.PARAMETER] = answer
return new_args
def _validate(self, django_requirements_path: str):
"""Validates that a string is a valid path of requirements.txt.
Args:
django_requirements_path: The string to validate.
Raises:
ValueError: if the input string is not a valid requirements.txt file
path.
"""
if not os.path.exists(django_requirements_path):
raise ValueError(
'File ["{}"] does not exist.'.format(django_requirements_path))
class RootPrompt(object):
"""Class at the top level that instantiates all of the Prompts."""
NEW_PROMPT_ORDER = [
'project_id',
'project_name',
'billing_account_name',
'database_password',
'django_directory_path',
'django_project_name',
'django_app_name',
'django_superuser_login',
'django_superuser_password',
'django_superuser_email',
]
UPDATE_PROMPT_ORDER = [
'database_password',
'django_directory_path_update',
]
CLOUDIFY_PROMPT_ORDER = [
'project_id',
'project_name',
'billing_account_name',
'database_password',
'django_directory_path_cloudify',
'django_project_name_cloudify',
'django_requirements_path',
'django_settings_path',
'django_superuser_login',
'django_superuser_password',
'django_superuser_email',
]
def _get_creds(self, console: io.IO, first_step: str, args: Dict[str, Any],
auth_client: auth.AuthClient):
return CredentialsPrompt(auth_client).prompt(console, first_step,
args)['credentials']
def _setup_prompts(self, creds,
active_account: str) -> Dict[str, TemplatePrompt]:
project_client = project.ProjectClient.from_credentials(creds)
billing_client = billing.BillingClient.from_credentials(creds)
return {
'project_id': GoogleProjectId(project_client, active_account),
'project_name': GoogleProjectName(project_client),
'billing_account_name': BillingPrompt(billing_client),
'database_password': <PASSWORD>Prompt(),
'django_directory_path': DjangoFilesystemPath(),
'django_directory_path_update': DjangoFilesystemPathUpdate(),
'django_directory_path_cloudify': DjangoFilesystemPathCloudify(),
'django_project_name': DjangoProjectNamePrompt(),
'django_project_name_cloudify': DjangoProjectNamePromptCloudify(),
'django_app_name': DjangoAppNamePrompt(),
'django_requirements_path': DjangoRequirementsPathPrompt(),
'django_settings_path': DjangoSettingsPathPrompt(),
'django_superuser_login': DjangoSuperuserLoginPrompt(),
'django_superuser_password': DjangoSuperuserPasswordPrompt(),
'django_superuser_email': DjangoSuperuserEmailPrompt()
}
def prompt(self, command: Command, console: io.IO,
args: Dict[str, Any]) -> Dict[str, Any]:
"""Calls all of the prompts to collect all of the paramters.
Args:
command: Flag that picks what prompts are needed.
console: Object to use for user I/O.
args: Dictionary holding prompts answered by user and set up
command-line arguments.
Returns: A Copy of args + the new parameter collected.
"""
new_args = dict(args)
if new_args.get('use_existing_project', False):
new_args['project_creation_mode'] = (
workflow.ProjectCreationMode.MUST_EXIST)
prompt_order = []
if command == Command.NEW:
prompt_order = self.NEW_PROMPT_ORDER
elif command == Command.UPDATE:
prompt_order = self.UPDATE_PROMPT_ORDER
elif command == Command.CLOUDIFY:
prompt_order = self.CLOUDIFY_PROMPT_ORDER
total_steps = len(prompt_order) + 1
step_template = '<b>[{}/{}]</b>'
first_step = step_template.format(1, total_steps)
auth_client = auth.AuthClient()
creds = self._get_creds(console, first_step, args, auth_client)
active_account = auth_client.get_active_account()
new_args['credentials'] = creds
required_parameters_to_prompt = self._setup_prompts(
creds, active_account)
for i, prompt in enumerate(prompt_order, 2):
step = step_template.format(i, total_steps)
new_args = required_parameters_to_prompt[prompt].prompt(
console, step, new_args)
return new_args
|
StarcoderdataPython
|
12838605
|
<reponame>SMAKSS/processout-python
try:
from urllib.parse import quote_plus
except ImportError:
from urllib import quote_plus
import processout
from processout.networking.request import Request
from processout.networking.response import Response
# The content of this file was automatically generated
class Activity(object):
def __init__(self, client, prefill = None):
self._client = client
self._id = None
self._project = None
self._project_id = None
self._title = None
self._content = None
self._level = None
self._created_at = None
if prefill != None:
self.fill_with_data(prefill)
@property
def id(self):
"""Get id"""
return self._id
@id.setter
def id(self, val):
"""Set id
Keyword argument:
val -- New id value"""
self._id = val
return self
@property
def project(self):
"""Get project"""
return self._project
@project.setter
def project(self, val):
"""Set project
Keyword argument:
val -- New project value"""
if val is None:
self._project = val
return self
if isinstance(val, dict):
obj = processout.Project(self._client)
obj.fill_with_data(val)
self._project = obj
else:
self._project = val
return self
@property
def project_id(self):
"""Get project_id"""
return self._project_id
@project_id.setter
def project_id(self, val):
"""Set project_id
Keyword argument:
val -- New project_id value"""
self._project_id = val
return self
@property
def title(self):
"""Get title"""
return self._title
@title.setter
def title(self, val):
"""Set title
Keyword argument:
val -- New title value"""
self._title = val
return self
@property
def content(self):
"""Get content"""
return self._content
@content.setter
def content(self, val):
"""Set content
Keyword argument:
val -- New content value"""
self._content = val
return self
@property
def level(self):
"""Get level"""
return self._level
@level.setter
def level(self, val):
"""Set level
Keyword argument:
val -- New level value"""
self._level = val
return self
@property
def created_at(self):
"""Get created_at"""
return self._created_at
@created_at.setter
def created_at(self, val):
"""Set created_at
Keyword argument:
val -- New created_at value"""
self._created_at = val
return self
def fill_with_data(self, data):
"""Fill the current object with the new values pulled from data
Keyword argument:
data -- The data from which to pull the new values"""
if "id" in data.keys():
self.id = data["id"]
if "project" in data.keys():
self.project = data["project"]
if "project_id" in data.keys():
self.project_id = data["project_id"]
if "title" in data.keys():
self.title = data["title"]
if "content" in data.keys():
self.content = data["content"]
if "level" in data.keys():
self.level = data["level"]
if "created_at" in data.keys():
self.created_at = data["created_at"]
return self
def all(self, options = {}):
"""Get all the project activities.
Keyword argument:
options -- Options for the request"""
self.fill_with_data(options)
request = Request(self._client)
path = "/activities"
data = {
}
response = Response(request.get(path, data, options))
return_values = []
a = []
body = response.body
for v in body['activities']:
tmp = processout.Activity(self._client)
tmp.fill_with_data(v)
a.append(tmp)
return_values.append(a)
return return_values[0]
def find(self, activity_id, options = {}):
"""Find a specific activity and fetch its data.
Keyword argument:
activity_id -- ID of the activity
options -- Options for the request"""
self.fill_with_data(options)
request = Request(self._client)
path = "/activities/" + quote_plus(activity_id) + ""
data = {
}
response = Response(request.get(path, data, options))
return_values = []
body = response.body
body = body["activity"]
obj = processout.Activity(self._client)
return_values.append(obj.fill_with_data(body))
return return_values[0]
|
StarcoderdataPython
|
3259759
|
import errno
import os
from pyclfsm import State, StateMachine, StateVariable, StateMachineVariable, Visitor
def main(output):
state_machine_variables = [StateMachineVariable('int', 'currentState', 'state no')]
state_machine_includes = '''#include <iostream>
#include <cmath>
#include "CLMacros.h"
'''
one_includes = '''#include <cstdlib>
#include <Whiteboard.h>
#include <WhiteboardConstants.h>
'''
one_internal = '''{
using namespace std;
/*
cerr << "Internal state executed for " << stateName << endl;
exit(EXIT_FAILURE);
*/
}
'''
one_on_entry = '''{
using namespace std;
static int count = 0;
stateName = "One";
currentState = 0;
cout << stateName << " " << (int)fmod((double)current_time_in_microseconds() / 1000000.0L, 100) << endl;
}
'''
one_variables = [StateVariable('const char *', 'stateName', 'name of the current state')]
one_expression_0 = 'after(1) && (rand() % 10 == 1)'
one_expression_1 = 'after(1)'
two_includes = '''#include <cstdlib>
'''
two_internal = '''{
using namespace std;
/*
cerr << "Internal state executed for " << stateName << endl;
exit(EXIT_FAILURE);
*/
}
'''
two_on_entry = '''{
using namespace std;
static int count = 0;
stateName = "Two";
currentState = 1;
cout << stateName << " " << fmod((double)current_time_in_microseconds() / 1000000.0L, 100) << endl;
}
'''
two_variables = [StateVariable('const char *', 'stateName', 'name of the current state')]
two_expression = 'after_ms(500)'
three_includes = '''#include <cstdlib>
'''
three_internal = '''{
using namespace std;
/*
cerr << "Internal state executed for " << stateName << endl;
exit(EXIT_FAILURE);
*/
}
'''
three_on_entry = '''{
using namespace std;
static int count = 0;
stateName = "Two";
currentState = 1;
cout << stateName << " " << fmod((double)current_time_in_microseconds() / 1000000.0L, 100) << endl;
}
'''
three_variables = [StateVariable('const char *', 'stateName', 'name of the current state')]
three_expression = 'after_ms(500)'
sm = StateMachine('RandomDispatcher', includes=state_machine_includes, variables=state_machine_variables)
one = State('One', includes=one_includes, variables=one_variables, internal=one_internal, on_entry=one_on_entry)
two = State('Two', includes=two_includes, variables=two_variables, internal=two_internal, on_entry=two_on_entry)
three = State('Three', includes=three_includes, variables=three_variables, internal=three_internal, on_entry=three_on_entry)
sm.set_initial_state(one)
one.transition_to(two, expression=one_expression_0)
one.transition_to(three, expression=one_expression_1)
two.transition_to(one, expression=two_expression)
three.transition_to(one, expression=three_expression)
visitor = Visitor(sm)
visitor.generate(output)
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print "Usage: %s OUTPUT_DIRECTORY" % sys.argv[0]
sys.exit(1)
output = sys.argv[1]
main(output)
|
StarcoderdataPython
|
9665993
|
from nilearn.image import resample_img
import nibabel as nib
import os
import numpy as np
datafolder = "/Users/Joke/Desktop/validating-fmri/data"
subs = list(np.unique([x.split("_")[0] for x in os.listdir(os.path.join(datafolder,"CNP_rest"))]))
for sub in subs:
anatfile = os.path.join(datafolder,"CNP_rest/%s_T1w_space-MNI152NLin2009cAsym_preproc.nii.gz"%sub)
anatfile_reduced = os.path.join(datafolder,"CNP_rest/%s_T1w_space-MNI152NLin2009cAsym_preproc_reduced.nii.gz"%sub)
restfile = os.path.join(datafolder,"CNP_rest/%s_task-rest_bold_space-MNI152NLin2009cAsym_preproc.nii.gz"%sub)
anat = nib.load(anatfile)
rest = nib.load(restfile)
anat_resampled = resample_img(anat,target_affine=rest.affine,target_shape=rest.shape[:3])
anat_resampled.to_filename(anatfile_reduced)
atlasfile = os.path.join(datafolder,"MSDL_rois","msdl_rois.nii")
atlas = nib.load(atlasfile)
atlas_resampled = resample_img(atlas,target_affine=anat_resampled.affine,target_shape=anat_resampled.shape[:3])
data = atlas_resampled.get_data()
newdata = np.zeros(data.shape[:3])
for x in range(data.shape[0]):
for y in range(data.shape[1]):
for z in range(data.shape[2]):
if np.max(data[x,y,z])<0.1:
newdata[x-1.5,y-1.5,z-0] = 0
else:
newdata[x-1.5,y-1.5,z-0] = np.where(data[x,y,z]==np.max(data[x,y,z]))[0][0]+1
img = nib.Nifti1Image(newdata,affine=anat_resampled.affine,header=anat_resampled.header)
atlasfile_reduced = os.path.join(datafolder,"MSDL_rois","msdl_rois_reduced.nii.gz")
img.to_filename(atlasfile_reduced)
|
StarcoderdataPython
|
6668523
|
<gh_stars>1-10
from config import Config
from dd_tensorflow_model import Code2VecModel
import sm_helper as hp
###############################################################
g_model = None
g_all_data = []
g_cnt_dict = {}
###############################################################
if __name__ == '__main__':
config = Config(set_defaults=True, load_from_args=True, verify=True)
g_model = Code2VecModel(config)
print('Done creating code2vec model')
assert g_model is not None
# read file
file_list = hp.get_file_list()
for idx, java_file in enumerate(file_list):
print("\nStart [{}]: {}\n".format(idx + 1, java_file))
g_all_data.clear()
try:
# method_name and method_body
g_all_data.append("\npath = {}".format(java_file))
method_name, method_body = hp.load_method(java_file)
assert (len(method_name) > 0) and (len(method_body) > 0)
g_cnt_dict[method_name] = g_cnt_dict.get(method_name, 0) + 1
g_all_data.append("method_name = {}".format(method_name))
g_all_data.append("method_body = {}".format(method_body))
hp.store_method(hp.g_simp_file, method_body)
# check if prediction is correct
predict, _, _ = hp.prediction_with_c2v(g_model, hp.g_root_path, hp.g_simp_file)
assert method_name == predict
# get path-context and attention
topk_attn, topk_path = hp.get_attention(g_model, hp.g_root_path, hp.g_simp_file)
topk_terminal = []
g_all_data.append("\ntopk path-contexts:")
for i in range(len(topk_path)):
path_context = topk_path[i].strip().split(',')
topk_terminal.append([path_context[0], path_context[-1]])
g_all_data.append("[{}] {}".format(f"{topk_attn[i]:.4f}", topk_path[i]))
g_all_data.append("\ntopk terminals:\n{}".format(topk_terminal))
# print/save path-context and attention
save_name = "L{}_{}_{}.txt".format(str(idx + 1), method_name, g_cnt_dict[method_name])
output_file = "attn_data/{}".format(save_name)
hp.save_simplified_code(g_all_data, output_file)
print("\nDone [{}]: {}\n".format(idx + 1, java_file))
except:
print("\nError [{}]: {}\n".format(idx + 1, java_file))
g_model.close_session()
|
StarcoderdataPython
|
1672058
|
<reponame>aleonlein/acq4
from __future__ import print_function
from mmstage import MicroManagerStage
|
StarcoderdataPython
|
323641
|
from django.apps import AppConfig
class Models3CwappConfig(AppConfig):
name = 'models3cwapp'
|
StarcoderdataPython
|
9793558
|
<filename>Extraction_quizlet3.py
from util import *
from document_reader import *
import os
folder_name = '/shared/kairos/Data/LDC2020E30_KAIROS_Quizlet_3_Source_Data_and_Graph_G/data/source/ltf/ltf/'
documents = list()
for tmp_file_name in os.listdir(folder_name):
if 'xml' in tmp_file_name:
extracted_data = ltf_reader(folder_name, tmp_file_name)
documents.append(extracted_data)
# sentences = list()
# for tmp_s in extracted_data['sentences']:
# sentences.append(tmp_s['content'])
# new_file_name = tmp_file_name.replace('.xml', '.txt')
# with open('data/quizlet3/' + new_file_name, 'w', encoding='utf-8') as f:
# for s in sentences:
# f.write(s)
# f.write('\n')
# print('done')
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", default='1', type=str, required=False,
help="choose which gpu to use")
parser.add_argument("--representation_source", default='nyt', type=str, required=False,
help="choose which gpu to use")
parser.add_argument("--model", default='bert-large', type=str, required=False,
help="choose which gpu to use")
parser.add_argument("--pooling_method", default='final', type=str, required=False,
help="choose which gpu to use")
parser.add_argument("--weight", default=100, type=float, required=False,
help="weight assigned to triggers")
parser.add_argument("--argument_matching", default='exact', type=str, required=False,
help="weight assigned to triggers")
parser.add_argument("--eval_model", default='joint', type=str, required=False,
help="weight assigned to triggers")
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('current device:', device)
test_extractor = CogcompKairosEventExtractor(device)
results = list()
for tmp_document in documents:
print('We are working on document:', tmp_document["doc_id"])
extracted_results = list()
for tmp_s in tqdm(tmp_document['sentences']):
extracted_results.append(test_extractor.extract(tmp_s['content']))
tmp_document['event_extraction_results'] = extracted_results
results.append(tmp_document)
with open('data/quizlet3/data_after_event_extraction.json', 'w') as f:
json.dump(results, f)
print('end')
|
StarcoderdataPython
|
184736
|
""" Pipe-Soil Interaction module """
from math import pi, sin, tan, exp, sqrt, radians
import numpy as np
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
from uhb import general
#########
# GENERAL
#########
def cot(a):
return 1 / tan(a)
def calculate_soil_weight(gamma, D, H):
return gamma * D * H
def depth_to_centre(D_o, h):
return 0.5 * D_o + h
#######################
# ALA BURIED STEEL PIPE
#######################
def Nch(c, H, D):
""" Horizontal bearing capacity factor for sand
"""
if c == 0:
return 0
x = H / D
return min(6.752 + 0.065 * x - 11.063 / (x + 1) ** 2 + 7.119 / (x + 1) ** 3, 9)
def Nqh(psi, H, D):
""" Horizontal bearing capacity factor
"""
if psi == 0:
return 0
if psi < 20:
psi = 20
elif psi > 45:
psi = 45
psi_range = [20, 25, 30, 35, 40, 45]
a = [2.399, 3.332, 4.565, 6.816, 10.959, 17.658]
b = [0.439, 0.839, 1.234, 2.019, 1.783, 3.309]
c = [-0.03, -0.09, -0.089, -0.146, 0.045, 0.048]
d = [
1.059 * 10 ** -3,
5.606 * 10 ** -3,
4.275 * 10 ** -3,
7.651 * 10 ** -3,
-5.425 * 10 ** -3,
-6.443 * 10 ** -3,
]
e = [
-1.754 * 10 ** -5,
-1.319 * 10 ** -4,
-9.159 * 10 ** -5,
-1.683 * 10 ** -4,
-1.153 * 10 ** -4,
-1.299 * 10 ** -4,
]
x = H / D
def par(case):
return interp1d(psi_range, case)(psi)
return (par(a) + par(b) * x + par(c) * x ** 2 + par(d) * x ** 3 + par(e) * x ** 4)
def Ncv(c, H, D):
""" Vertical uplift factor for sand
"""
if c == 0:
return 0
return min(2 * H / D, 10)
def Nqv(psi, H, D):
""" Vertical uplift factor for sand
"""
if psi == 0:
return 0
return min(psi * H / 44 / D, Nq(psi))
def Nc(psi, H, D):
""" Soil bearing capacity factor
"""
return (
cot(radians(psi + 0.001))
* (exp(pi * tan(radians(psi + 0.001)))
* tan(radians(45 + (psi + 0.001) / 2)) ** 2 - 1)
)
def Nq(psi):
""" Soil bearing capacity factor
"""
return exp(pi * tan(radians(psi))) * tan(radians(45 + psi / 2)) ** 2
def Ngamma(psi):
""" Soil bearing capacity factor
"""
return exp(0.18 * psi - 2.5)
# AXIAL
def delta_t(soil_type):
""" Displacement at Tu
"""
delta_ts = {
"dense sand": 0.003,
"loose sand": 0.005,
"stiff clay": 0.008,
"soft clay": 0.01,
}
return delta_ts.get(soil_type, ValueError("Unknown soil type."))
def Tu(D, H, c, f, psi, gamma):
""" Maximum axial soil force per unit length
"""
alpha = 0.608 - 0.123 * c - 0.274 / (c ** 2 + 1) + 0.695 / (c ** 3 + 1)
K0 = 1 - sin(radians(psi))
return (
pi * D * alpha * c + pi * D * H * gamma *
(1 + K0) / 2 * tan(radians(f * psi))
)
# LATERAL
def delta_p(H, D):
""" Displacement at Pu
"""
return min(0.04 * (H + D / 2), 0.1 * D)
def Pu(c, H, D, psi, gamma):
""" Maximum lateral soil force per unit length
"""
return Nch(c, H, D) * c * D + Nqh(psi, H, D) * gamma * H * D
# VERTICAL UPLIFT
def delta_qu(soil, H, D):
""" Displacement at Qu
"""
if "sand" in soil:
return min(0.01 * H, 0.1 * D)
elif "clay" in soil:
return min(0.1 * H, 0.2 * D)
else:
raise ValueError("Unknown soil type.")
def Qu(psi, c, D, gamma, H):
""" Vertical uplift soil resistance per unit length
"""
return Ncv(c, H, D) * c * D + Nqv(psi, H, D) * gamma * H * D
# VERTICAL BEARING
def delta_qd(soil, D):
""" Displacement at Qu
"""
if "sand" in soil:
return 0.1 * D
elif "clay" in soil:
return 0.2 * D
else:
raise ValueError("Unknown soil type.")
def Qd(psi, c, D, gamma, H, rho_sw):
""" Vertical bearing soil resistance per unit length
"""
return (
Nc(psi, H, D) * c * D + Nq(psi) * gamma * H * D + Ngamma(psi)
* (gamma + (rho_sw * 9.81)) * D ** 2 / 2
)
###############
# DNVGL-RP-F114
###############
def F_uplift_d(soil_type, gamma, H, D):
"""Returns drained uplift resistance.
DNVGL-RP-F114 - Equation (5.6)
:param soil_type: str
:param gamma: Submerged weight of soil [N/m^-3]
:param H: Cover height (above pipe) [m]
:param D: Outer pipe diameter [m]
"""
# TODO: interpolate f using psi_s
resistance_factors = {
"loose sand": 0.29,
"medium sand": 0.47,
"dense sand": 0.62,
}
f = resistance_factors[soil_type]
return gamma * H * D + gamma * D ** 2 * (0.5 - pi / 8) + f * gamma * (
H + 0.5 * D) ** 2
#############
# DNV-RP-F110
#############
def R_max(H, D, gamma, f):
""" Returns the uplift resistance of a pipe in sand.
DNV-RP-F110 2007 - Equation (B.3)
"""
return (1 + f * H / D) * (gamma * H * D)
##########
# OTC 6486
##########
def P_otc6486(H, D, gamma, c):
""" Returns the uplift resistance of cohesive materials.
OTC6486 - Equation (7)
"""
return gamma * H * D + 2 * H * c
# def DepthEquilibrium(psi, c, D, gamma, soil):
# R = D / 2
# widths = [w for w in np.arange(D / 6, D + 0.1 * D / 6, D / 6)]
# penetrations = [R - sqrt(R ** 2 - (w / 2) ** 2) for w in widths]
# Qds = [Qd(psi, c, w, gamma, 0) for w in widths]
# p_max = 5 * D
# F_max = p_max / delta_qd(soil, D) * Qds[-1]
# penetrations.append(p_max)
# Qds.append(F_max)
# Fd = np.stack((penetrations, Qds), axis=-1)
# return Fd
def gen_uplift_spring(data, h, model="asce"):
""" Returns vertical uplift soil spring as a tuple of displacement and
resistance based on chosen soil model.
"""
D_o = general.total_outside_diameter(data.D, data.t_coat)
H = depth_to_centre(D_o, h)
disp = delta_qu(data.soil_type, H, D_o)
springs = {
"asce": (disp, Qu(data.psi_s, data.c, D_o, data.gamma_s, H)),
"f114": (disp, F_uplift_d(data.soil_type, data.gamma_s, H, D_o)),
"f110": (disp, R_max(H, D_o, data.gamma_s, data.f)),
"otc": (disp, P_otc6486(H, D_o, data.gamma_s, data.c)),
}
return springs.get(model, ValueError("Unknown uplift soil model."))
def gen_bearing_spring(data, h, model="asce"):
""" Returns bearing soil spring as a tuple of displacement and resistance
based on chosen soil model.
"""
D_o = general.total_outside_diameter(data.D, data.t_coat)
H = depth_to_centre(D_o, h)
disp = delta_qd(data.soil_type, D_o)
springs = {
"asce": (disp,
Qd(data.psi_s, data.c, D_o, data.gamma_s, H, data.rho_sw)),
}
return springs.get(model, ValueError("Unknown bearing soil model."))
def gen_axial_spring(data, h, model="asce"):
""" Returns axial soil spring as a tuple of displacement and resistance
based on chosen soil model.
"""
D_o = general.total_outside_diameter(data.D, data.t_coat)
disp = delta_t(data.soil_type)
springs = {
"asce": (disp,
Tu(D_o, depth_to_centre(D_o, h), data.c, data.f, data.psi_s,
data.gamma_s)
),
}
return springs.get(model, ValueError("Unknown axial soil model."))
def gen_lateral_spring(data, h, model="asce"):
""" Returns lateral soil spring as a tuple of displacement and resistance
based on chosen soil model.
"""
D_o = general.total_outside_diameter(data.D, data.t_coat)
H = depth_to_centre(D_o, h)
disp = delta_p(H, D_o)
springs = {
"asce": (disp, Pu(data.c, H, D_o, data.psi_s, data.gamma_s)),
}
return springs.get(model, ValueError("Unknown lateral soil model."))
|
StarcoderdataPython
|
1627670
|
# -*- coding: utf-8 -*-
# @Time : 2021/3/13 17:25
# @Author : DannyDong
# @File : Forward.py
# @describe: 前置条件
import time
import random
import string
# 前置条件-Mock数据
class ForwardMock(object):
def __init__(self):
self.num = random.randint(99, 1000)
eng_list = string.ascii_letters
self.eng = random.choice(eng_list)
self.eng1 = random.choice(eng_list)
# 生成Mock数据类型
def generate_mock_data(self, mock_type):
mock_dict = {
'studentName': '学员', 'staffName': '员工', 'courseName': '课程', 'className': '班级',
'goodsName': '物品', 'mixName': '杂费', 'courseGroups': '课时包', 'studentNotice': '学员通知',
'dimension': '评价维度', 'rules': '课酬规则', 'expenses': '支出费用', 'parameter': '字段参数',
'publicity': '机构宣传语', 'noticeWord': '学员提醒语'
}
return "AAT{}{}{}".format(mock_dict[mock_type], self.num, self.eng)
# 生成数字类型
@staticmethod
def generate_num(num_type, up, down):
# 生成数字(int)
if mock_type == 1:
result = random.randint(up, down)
# 生成数字(float)
elif mock_type == 2:
num_float = random.uniform(up, down)
result = round(num_float, 2)
else:
raise Exception('Num Type Error')
return result
@staticmethod
# 生成电话号码
def generate_phone():
return "9{}".format(random.randint(999999999, 10000000000))
# 生成单位&规格
@staticmethod
def generate_unit(unit_type):
# 生成物品单位
if mock_type == 1:
unit_list = ['个', '只', '根', '瓶', '杯', '台']
# 生成物品规格
elif mock_type == 2:
unit_list = ['S', 'L', 'M', '极简版', '家庭版', '专业版', '旗舰版']
else:
raise Exception('Unit Type Error')
return random.choice(unit_list)
# 前置条件-测试用例(深度优先搜索dfs)
class ForwardCase(object):
def __init__(self):
pass
|
StarcoderdataPython
|
1930174
|
from django.contrib.auth.mixins import AccessMixin
from django.http import HttpResponseRedirect
from django.urls import reverse_lazy
from allauth_2fa.utils import user_has_valid_totp_device
class ValidTOTPDeviceRequiredMixin(AccessMixin):
no_valid_totp_device_url = reverse_lazy('two-factor-setup')
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return self.handle_no_permission()
if not user_has_valid_totp_device(request.user):
return self.handle_missing_totp_device()
return super(ValidTOTPDeviceRequiredMixin, self).dispatch(request, *args, **kwargs)
def handle_missing_totp_device(self):
return HttpResponseRedirect(self.no_valid_totp_device_url)
|
StarcoderdataPython
|
3293037
|
<filename>mssql_dataframe/__init__.py<gh_stars>0
from mssql_dataframe.package import SQLServer # noqa: F401
|
StarcoderdataPython
|
8189259
|
<gh_stars>1-10
from .dict_flatten_accessor import mod_config, get_config
import os
from miscellanies.yaml_ops import load_yaml
def _apply_mixin_rule(rule: dict, config, value, action=None):
query_path = rule['path']
# 'replace' action is the default action
if action is None:
if 'action' not in rule:
action = 'replace'
else:
action = rule['action']
if value is None:
# fixed static rule
value = rule['value']
if action == 'replace': # replace the config in path by given value
if isinstance(query_path, (list, tuple)): # multiple paths
[mod_config(config, sub_access_path, value) for sub_access_path in query_path]
else:
mod_config(config, query_path, value)
elif action == 'insert':
if isinstance(query_path, (list, tuple)):
for sub_access_path in query_path:
config = get_config(config, sub_access_path)
if isinstance(value, (list, tuple)):
config.extend(value)
elif isinstance(value, dict):
config.update(value)
else:
config.insert(value)
else:
config = get_config(config, query_path)
if isinstance(value, (list, tuple)):
config.extend(value)
elif isinstance(value, dict):
config.update(value)
else:
config.insert(value)
elif action == 'include':
if isinstance(query_path, (list, tuple)):
for sub_access_path in query_path:
config = get_config(config, sub_access_path)
assert isinstance(config, dict)
for key in list(config.keys()):
if key != value:
del config[key]
else:
config = get_config(config, query_path)
assert isinstance(config, dict)
for key in list(config.keys()):
if key != value:
del config[key]
else:
raise NotImplementedError(action)
def apply_mixin_rules(mixin_rules, config, dynamic_values):
if 'fixed' in mixin_rules:
for fixed_modification_rule in mixin_rules['fixed']:
_apply_mixin_rule(fixed_modification_rule, config, None)
if 'dynamic' in mixin_rules:
for dynamic_parameter_name, dynamic_modification_rule in mixin_rules['dynamic'].items():
_apply_mixin_rule(dynamic_modification_rule, config, dynamic_values[dynamic_parameter_name])
def get_mixin_config(args):
configs = []
config_path = os.path.join(args.config_path, args.method_name, args.config_name, 'mixin', args.mixin_config)
if not os.path.exists(config_path):
config_path = os.path.join(args.config_path, 'mixin', args.mixin_config)
assert os.path.exists(config_path), 'Mixin config not found: {}'.format(config_path)
configs.append(load_yaml(config_path))
# for mixin_config in args.mixin_config:
# if mixin_config.startswith('/' or '\\'):
# config_path = os.path.join(args.config_path, mixin_config[1:])
# else:
# config_path = os.path.join(args.config_path, args.method_name, args.config_name, 'mixin', mixin_config)
# if not os.path.exists(config_path):
# config_path = os.path.join(args.config_path, 'mixin', mixin_config)
# assert os.path.exists(config_path), 'Mixin config not found: {}'.format(config_path)
# configs.append(load_yaml(config_path))
return configs
def load_static_mixin_config_and_apply_rules(args, config):
mixin_configs = get_mixin_config(args)
for mixin_config in mixin_configs:
apply_mixin_rules(mixin_config['mixin'], config, None)
|
StarcoderdataPython
|
8003437
|
<filename>cellacdc/models/YeaZ/acdcSegment.py<gh_stars>10-100
import os
import pathlib
import numpy as np
import skimage.exposure
import skimage.filters
from .unet import model
from .unet import neural_network
from .unet import segment
from tensorflow import keras
from tqdm import tqdm
from cellacdc import myutils
class progressCallback(keras.callbacks.Callback):
def __init__(self, signals):
self.signals = signals
def on_predict_begin(self, logs=None):
pass
def on_predict_batch_begin(self, batch, logs=None):
pass
def on_predict_batch_end(self, batch, logs=None):
innerPbar_available = self.signals[1]
if innerPbar_available:
self.signals[0].innerProgressBar.emit(1)
else:
self.signals[0].progressBar.emit(1)
class Model:
def __init__(self, is_phase_contrast=True):
# Initialize model
self.model = model.unet(
pretrained_weights=None,
input_size=(None,None,1)
)
# Get the path where the weights are saved.
# We suggest saving the weights files into a 'model' subfolder
user_path = pathlib.Path.home()
model_path = os.path.join(str(user_path), f'acdc-YeaZ')
if is_phase_contrast:
weights_fn = 'unet_weights_batchsize_25_Nepochs_100_SJR0_10.hdf5'
else:
weights_fn = 'weights_budding_BF_multilab_0_1.hdf5'
weights_path = os.path.join(model_path, weights_fn)
if not os.path.exists(model_path):
raise FileNotFoundError(f'Weights file not found in {model_path}')
self.model.load_weights(weights_path)
def yeaz_preprocess(self, image, tqdm_pbar=None):
# image = skimage.filters.gaussian(image, sigma=1)
# image = skimage.exposure.equalize_adapthist(image)
# image = image/image.max()
image = myutils.uint_to_float(image)
image = skimage.exposure.equalize_adapthist(image)
if tqdm_pbar is not None:
tqdm_pbar.emit(1)
return image
def predict3DT(self, timelapse3D):
# pad with zeros such that is divisible by 16
(nrow, ncol) = timelapse3D[0].shape
row_add = 16-nrow%16
col_add = 16-ncol%16
pad_info = ((0, 0), (0, row_add), (0, col_add))
padded = np.pad(timelapse3D, pad_info, 'constant')
x = padded[:, :, :, np.newaxis]
prediction = self.model.predict(x, batch_size=1, verbose=1)
prediction = prediction[:, 0:-row_add, 0:-col_add, 0]
return prediction
def segment(self, image, thresh_val=0.0, min_distance=10):
# Preprocess image
image = self.yeaz_preprocess(image)
if thresh_val == 0:
thresh_val = None
# pad with zeros such that is divisible by 16
(nrow, ncol) = image.shape
row_add = 16-nrow%16
col_add = 16-ncol%16
pad_info = ((0, row_add), (0, col_add))
padded = np.pad(image, pad_info, 'constant')
x = padded[np.newaxis,:,:,np.newaxis]
prediction = self.model.predict(x, batch_size=1, verbose=1)[0,:,:,0]
# remove padding with 0s
prediction = prediction[0:-row_add, 0:-col_add]
# Label the cells
thresh = neural_network.threshold(prediction, thresh_val=thresh_val)
lab = segment.segment(thresh, prediction, min_distance=min_distance)
return lab.astype(np.uint16)
def segment3DT(self, timelapse3D, thresh_val=0.0, min_distance=10, signals=None):
sig_progress_tqdm = None
if signals is not None:
signals[0].progress.emit(f'Preprocessing images...')
signals[0].create_tqdm.emit(len(timelapse3D))
sig_progress_tqdm = signals[0].progress_tqdm
timelapse3D = np.array([
self.yeaz_preprocess(image, tqdm_pbar=sig_progress_tqdm)
for image in timelapse3D
])
if signals is not None:
signals[0].signal_close_tqdm.emit()
if thresh_val == 0:
thresh_val = None
# pad with zeros such that is divisible by 16
(nrow, ncol) = timelapse3D[0].shape
row_add = 16-nrow%16
col_add = 16-ncol%16
pad_info = ((0, 0), (0, row_add), (0, col_add))
padded = np.pad(timelapse3D, pad_info, 'constant')
x = padded[:, :, :, np.newaxis]
if signals is not None:
signals[0].progress.emit(f'Predicting (the future) with YeaZ...')
callbacks = None
if signals is not None:
callbacks = [progressCallback(signals)]
prediction = self.model.predict(
x, batch_size=1, verbose=1, callbacks=callbacks
)[:,:,:,0]
if signals is not None:
signals[0].progress.emit(f'Labelling objects with YeaZ...')
# remove padding with 0s
prediction = prediction[:, 0:-row_add, 0:-col_add]
lab_timelapse = np.zeros(prediction.shape, np.uint16)
if signals is not None:
signals[0].create_tqdm.emit(len(prediction))
for t, pred in enumerate(prediction):
thresh = neural_network.threshold(pred, thresh_val=thresh_val)
lab = segment.segment(thresh, pred, min_distance=min_distance)
lab_timelapse[t] = lab.astype(np.uint16)
if signals is not None:
signals[0].progress_tqdm.emit(1)
if signals is not None:
signals[0].signal_close_tqdm.emit()
return lab_timelapse
|
StarcoderdataPython
|
1831495
|
__author__ = '<NAME> <<EMAIL>>'
from abc import ABCMeta, abstractmethod
from prxgt.domain.filter.filter import Filter
from prxgt.domain.instance import Instance
class ProcessorBase(metaclass=ABCMeta):
"""
Processor interface to be implemented for various data structure schemas.
"""
@abstractmethod
def add_instance(self, inst: Instance):
"""
Add new instance to storage.
:param inst: new instance (contains attributes and values)
:return:
"""
@abstractmethod
def get_inst_by_id(self, instance_id):
"""
Return instance with attrs by instance_id.
:param instance_id:
:return:
"""
@abstractmethod
def get_list_by_filter(self, filter_: Filter):
"""
Return list of instances filtered by filter_data.
:param filter_:
:return:
"""
@abstractmethod
def get_list_ordered(self, filter_data, order_data):
"""
Return ordered list of instances filtered by filter_data.
:param filter_data:
:return:
"""
@abstractmethod
def get_list_paged(self, filter_data, order_data, pages_data):
"""
Return paged and ordered list of instances filtered by filter_data.
:param filter_data:
:return:
"""
|
StarcoderdataPython
|
9748646
|
from django.urls import path
from . import views
urlpatterns = [
path('drone-categories/', views.DroneCategoryList.as_view(),
name=views.DroneCategoryList.name),
path('drone-categories/<int:pk>', views.DroneCategoryDetail.as_view(),
name=views.DroneCategoryDetail.name),
path('drones/', views.DroneList.as_view(), name=views.DroneList.name),
path('drones/<int:pk>', views.DroneDetail.as_view(),
name=views.DroneDetail.name),
path('pilots/', views.PilotList.as_view(), name=views.PilotList.name),
path('pilots/<int:pk>', views.PilotDetail.as_view(),
name=views.PilotDetail.name),
path('competitions/', views.CompetitionList.as_view(),
name=views.CompetitionList.name),
path('competitions/<int:pk>', views.CompetitionDetail.as_view(),
name=views.CompetitionDetail.name),
path('', views.APIRoot.as_view(), name=views.APIRoot.name)
]
|
StarcoderdataPython
|
1912071
|
<reponame>hackerwins/polyaxon
from typing import Dict
from django.db import connection
from checks.base import Check
from checks.results import Result
class PostgresCheck(Check):
@staticmethod
def pg_health() -> Result:
try:
with connection.cursor() as cursor:
cursor.execute('SELECT 1; -- Healthcheck')
health = cursor.fetchone()[0] == 1
if health:
cursor.execute("select pg_database_size('postgres') as size")
size = cursor.fetchone()
return Result(message='Service is healthy, db size {}'.format(size))
return Result(message='Service is not working.', severity=Result.WARNING)
except Exception as e:
return Result(message='Service unable to connect, encountered "{}" error.'.format(e),
severity=Result.ERROR)
@classmethod
def run(cls) -> Dict:
result = cls.pg_health()
return {'POSTGRES': result}
|
StarcoderdataPython
|
5106162
|
<reponame>Damego/Asteroid-Discord-Bot
import datetime
from enum import IntEnum
import genshin
from discord import Embed
from discord.ext import tasks
from discord_slash import SlashContext
from discord_slash.cog_ext import cog_subcommand as slash_subcommand
from utils import AsteroidBot, Cog, SystemChannels, UIDNotBinded, get_content, is_enabled
from utils.consts import DiscordColors
from utils.errors import NoData
from utils.paginator import Paginator, PaginatorStyle
class GenshinEnums(IntEnum):
ANEMOCULUS = 66
GEOCULUS = 131
ELECTROCULUS = 181
TELEPORTS = 190
DOMAINS = 35
class GenshinStats(Cog):
def __init__(self, bot: AsteroidBot):
self.bot = bot
self.emoji = 863429526632923136
self.name = "GenshinStats"
self.genshin_client: genshin.GenshinClient = None # type: ignore
self.genshin_languages = {"ru": "ru-ru", "en-US": "en-us"}
self.cookies = None
@Cog.listener()
async def on_ready(self):
if self.bot.database.global_data is None:
await self.bot.database.init_global_data()
if self.genshin_client is not None:
# on_ready was called in the runtime
return
global_data = self.bot.database.global_data
self.cookies = global_data.main.genshin_cookies
self.genshin_client = genshin.GenshinClient(self.cookies)
self.get_genshin_daily_reward.start()
@tasks.loop(hours=24)
async def get_genshin_daily_reward(self):
try:
reward = await self.genshin_client.claim_daily_reward(lang="ru-ru")
except genshin.AlreadyClaimed:
print("Reward already claimed!")
return
embed = Embed(
title="Награда!",
description=f"Название: {reward.name}\nКоличество: {reward.amount} шт.",
timestamp=datetime.datetime.utcnow(),
color=DiscordColors.BLURPLE,
)
embed.set_thumbnail(url=reward.icon)
channel = self.bot.get_channel(SystemChannels.GENSHIN_DAILY_REWARDS)
if channel is None:
channel = await self.bot.fetch_channel(SystemChannels.GENSHIN_DAILY_REWARDS)
await channel.send(embed=embed)
@slash_subcommand(
base="genshin",
name="bind",
description="Bind Hoyolab UID to your account",
base_dm_permission=False,
)
@is_enabled()
async def genshin_bind(self, ctx: SlashContext, hoyolab_uid: int):
record_card = await self.genshin_client.get_record_card(hoyolab_uid)
if not record_card.public or not record_card.has_uid:
raise NoData
global_data = self.bot.database.global_data
user_data = await global_data.get_user(ctx.author_id)
await user_data.set_genshin_uid(hoyolab_uid=hoyolab_uid, game_uid=record_card.uid)
content = get_content(
"GENSHIN_BIND_COMMAND", await self.bot.get_guild_bot_lang(ctx.guild_id)
)
await ctx.send(content)
@slash_subcommand(
base="genshin",
name="statistics",
description="Show your statistics of Genshin Impact",
)
@is_enabled()
async def genshin_statistics(self, ctx: SlashContext, uid: int = None):
await ctx.defer()
if uid is None:
uid = await self.get_uid(ctx)
lang = await self.bot.get_guild_bot_lang(ctx.guild_id)
genshin_lang = self.genshin_languages[lang]
content = get_content("GENSHIN_STATISTICS_COMMAND", lang)
user_data = await self.genshin_client.get_user(uid, lang=genshin_lang)
user_explorations = reversed(user_data.explorations)
user_stats = user_data.stats
embed = Embed(
title=content["EMBED_WORLD_EXPLORATION_TITLE"],
color=await self.bot.get_embed_color(ctx.guild_id),
)
embed.set_footer(text=f"UID: {uid}")
for region in user_explorations:
if region.explored == 0.0:
continue
description = f'{content["EXPLORED_TEXT"]}: `{region.explored}%`'
if region.offerings:
for offering in region.offerings:
if offering.name != "Reputation":
description += f"\n{offering.name}: `{offering.level}`"
if region.type == "Reputation":
description += content["REPUTATION_LEVEL_TEXT"].format(level=region.level)
embed.add_field(name=region.name, value=description)
oculus_content = f"""
<:Item_Anemoculus:870989767960059944> {content['ANEMOCULUS']}: `{user_stats.anemoculi}/{GenshinEnums.ANEMOCULUS}`
<:Item_Geoculus:870989769570676757> {content['GEOCULUS']}: `{user_stats.geoculi}/{GenshinEnums.GEOCULUS}`
<:Item_Electroculus:870989768387878912> {content['ELECTROCULUS']}: `{user_stats.electroculi}/{GenshinEnums.ELECTROCULUS}`
"""
embed.add_field(name=content["COLLECTED_OCULUS_TEXT"], value=oculus_content, inline=False)
chests_opened = f"""
{content['COMMON_CHEST']}: `{user_stats.common_chests}`
{content['EXQUISITE_CHEST']}: `{user_stats.exquisite_chests}`
{content['PRECIOUS_CHEST']}: `{user_stats.precious_chests}`
{content['LUXURIOUS_CHEST']}: `{user_stats.luxurious_chests}`
"""
embed.add_field(name=content["CHESTS_OPENED"], value=chests_opened, inline=False)
misc_content = f"""
<:teleport:871385272376504341> {content['UNLOCKED_TELEPORTS']}: `{user_stats.unlocked_waypoints}/{GenshinEnums.TELEPORTS}`
<:domains:871370995192193034> {content['UNLOCKED_DOMAINS']}: `{user_stats.unlocked_domains}/{GenshinEnums.DOMAINS}`
"""
embed.add_field(name=content["MISC_INFO"], value=misc_content, inline=False)
await ctx.send(embed=embed)
@slash_subcommand(
base="genshin",
name="characters",
description="Show your characters of Genshin Impact",
)
@is_enabled()
async def genshin_characters(self, ctx: SlashContext, uid: int = None):
await ctx.defer()
if uid is None:
uid = await self.get_uid(ctx)
lang = await self.bot.get_guild_bot_lang(ctx.guild_id)
genshin_lang = self.genshin_languages[lang]
content = get_content("GENSHIN_CHARACTERS_COMMAND", lang)
characters = await self.genshin_client.get_characters(uid, lang=genshin_lang)
embeds = []
pages = len(characters)
for _page, character in enumerate(characters, start=1):
embed = Embed(
title=f'{character.name} {"⭐" * character.rarity}',
color=await self.bot.get_embed_color(ctx.guild.id),
)
embed.set_thumbnail(url=character.icon)
embed.set_footer(text=f"UID: {uid}. {_page}/{pages}")
embed = self.get_character_info(content, embed, character)
embeds.append(embed)
paginator = Paginator(self.bot, ctx, PaginatorStyle.FIVE_BUTTONS_WITH_COUNT, embeds)
await paginator.start()
@slash_subcommand(base="genshin", name="info", description="Show account information")
@is_enabled()
async def genshin_info(self, ctx: SlashContext, hoyolab_uid: int = None):
await ctx.defer()
if hoyolab_uid is None:
hoyolab_uid = await self.get_uid(ctx, is_game_uid=False)
lang = await self.bot.get_guild_bot_lang(ctx.guild_id)
genshin_lang = self.genshin_languages[lang]
content = get_content("GENSHIN_INFO_COMMAND", lang)
card = await self.genshin_client.get_record_card(hoyolab_uid, lang=genshin_lang)
user_data = await self.genshin_client.get_user(int(card.uid), lang=genshin_lang)
user_stats = user_data.stats
description = f"""
**{content['NICKNAME_TEXT']}: {card.nickname}**
<:adventure_exp:876142502736965672> {content['ADVENTURE_RANK_TEXT']}: `{card.level}`
<:achievements:871370992839176242> {content['ACHIEVEMENTS_TEXT']}: `{user_stats.achievements}`
:mage: {content['CHARACTERS_TEXT']}: `{user_stats.characters}`
<:spiral_abyss:871370970600968233> {content['SPIRAL_ABYSS_TEXT']}: `{user_stats.spiral_abyss}`
"""
embed = Embed(
title=content["PLAYER_INFO_TEXT"],
description=description,
color=await self.bot.get_embed_color(ctx.guild.id),
)
embed.set_footer(text=f"Hoyolab UID: {hoyolab_uid} | Game UID: {card.uid}")
await ctx.send(embed=embed)
async def get_uid(self, ctx: SlashContext, *, is_game_uid: bool = True) -> int:
global_data = self.bot.database.global_data
user_data = await global_data.get_user(ctx.author_id)
uid = user_data.genshin.game_uid if is_game_uid else user_data.genshin.hoyolab_uid
if uid is None:
raise UIDNotBinded
return uid
@staticmethod
def get_character_info(content: dict, embed: Embed, character: genshin.models.Character):
character_element = (
f"» {content['CHARACTER_VISION']}: {content['GENSHIN_CHARACTER_VISION'][character.element]}"
if character.element
else ""
)
embed.description = f"""
{content['INFORMATION_TEXT']}
» <:character_exp:871389287978008616> {content['CHARACTER_LEVEL']}: `{character.level}`
» {content['CHARACTER_CONSTELLATION']}: `C{character.constellation}`
{character_element}
» <:friendship_exp:871389291740291082> {content['CHARACTER_FRIENDSHIP']}: `{character.friendship}`
**{content['WEAPON_TEXT']}**
» {content['WEAPON_NAME']}: `{character.weapon.name}`
» {content['WEAPON_RARITY']}: `{"⭐" * character.weapon.rarity}`
» {content['WEAPON_TYPE']}: `{character.weapon.type}`
» {content['WEAPON_LEVEL']}: `{character.weapon.level}`
» {content['WEAPON_ASCENSION_LEVEL']}: `{character.weapon.ascension}`
» {content['WEAPON_REFINEMENT_LEVEL']}: `{character.weapon.refinement}`
"""
if character.artifacts:
embed.description += content["ARTIFACTS_TEXT"]
for artifact in character.artifacts:
embed.description += f"""
・*{content['GENSHIN_ARTIFACT_TYPE'][str(artifact.pos)]}*
» {content['ARTIFACT_NAME']}: `{artifact.name}`
» {content['ARTIFACT_RARITY']}: `{"⭐" * artifact.rarity}`
» {content['ARTIFACT_LEVEL']}: `{artifact.level}`
"""
return embed
def setup(bot):
bot.add_cog(GenshinStats(bot))
|
StarcoderdataPython
|
8192250
|
<reponame>jnthn/intellij-community
b'{}'.format(0)
u'{}'.format(0)
|
StarcoderdataPython
|
6502517
|
<filename>scans/Kattendijkekroniek-KB_1900A008/writexml.py<gh_stars>1-10
import os, os.path, glob
import json
from pprint import pprint
from lxml import html
import requests
def finditem(obj, key):
if key in obj: return obj[key]
for k, v in obj.items():
if isinstance(v,dict):
item = finditem(v, key)
if item is not None:
return item
#config paths/ urls
current_dir = os.path.dirname(os.path.realpath(__file__))
imagedir_hires= current_dir + "\\images\\bladerboek\\"
images_base_url_hires="https://www.kb.nl/kbhtml/kattendijkekroniek/"
homepageNL = "https://www.kb.nl/themas/middeleeuwse-handschriften/kattendijkekroniek-ca-1491"
#homepageEN = "https://www.kb.nl/en/themes/middle-ages/beatrijs"
# {{Artwork}} template on Commons, as stated in GWToolset
# accessionnumber = "" (=ppn)
# artist = ""
# author = ""
# creditline = ""
# date = ""
# demo = ""
# department = ""
# description = ""
# dimensions = ""
# exhibitionhistory = ""
# inscriptions = ""
# institution = ""
# medium = ""
# notes = ""
# objecthistory = ""
# objecttype = ""
# otherfields = ""
# otherversions = ""
# permission = ""
# placeofcreation = ""
# placeofdiscovery = ""
# references = ""
# source = ""
# strict = ""
# title = ""
# wikidata =
# wikidatacat = ""
# GWToolsettitle = ""
# URLtothemediafile =""
#----------------------
ppn = "40239772X"
signature = "KB KW 1900 A 008"
institution = "Koninklijke Bibliotheek"
medium = "Paper"
dimensions = "Folium size: {{Size|mm|200|140}} - Text block size: circa {{Size|mm|140|87}} with 1 column and 26 lines"
binding="Brown leather binding with blind embossing from ca. 1560-1580"
permission = "{{PD-art|PD-old-70-1923}}"
#wikidata = "Q1929931"
source = "{{Koninklijke Bibliotheek}}"
date = "1491-1493"
placeofcreation = "[[:en:North Holland|North Holland]], [[:en:Haarlem|Haarlem]]?"
language = "Dutch"
script = "mainly [[:nl:Littera hybrida|littera hybrida]], on occasion [[:nl:Littera_textualis|littera textualis]]"
notes= "Text in "+str(language)+ ". Script is " + str(script)
nfolios = "viii + 561 + xii (1.122 pages)"
artist = "Unknown"
author = "Unknown"
title_short = "Kattendijkekroniek"
title = "Die historie of die cronicke van Hollant, van Zeelant ende van Vrieslant ende van den stichte van Uutrecht ende van veel landen die men hierna nomen sal"
title_GWT = "Kattendijkekroniek (ca. 1491)"
objecthistory ="The Kattendijkekroniek was possibly made for [[:en:Yolande van Lalaing|Yolande van Lalaing]] (1422-1497), wife of [[:en:Reinoud II van Brederode|Reinoud II van Brederode]] (1415-1473). Former owners include [[:nl:Pieter_Cornelisz._Bockenberg|<NAME>. Bockenberg]] (1548-1617), <NAME> (1544-1593) and his son <NAME> (ca. 1565-before 1607). In 1614 it was acquired by Johan [[:nl:Huyssen_van_Kattendijke|Huyssen van Kattendijke]] (1566-1634) and it remains in this family to this day. <NAME> (1948-) gave it on permanent loan to the Koninklijke Bibliotheek in 2016."
#illuminations = "27 full-page miniatures (possibly 15 missing); 16 historiated initials; decorated initials with border decoration; penwork initials"
references = "For more elaborate background information and full-text transcriptions with annotations, see [http://resources.huygens.knaw.nl/retroboeken/kattendycke Kroniek van Kattendijke tot 1490], published by <NAME>, with cooperation from Ingrid Biesheuvel, [[:en:Huygens Institute for the History of the Netherlands|Institute for the History of the Netherlands]] (ING), The Hague, 2005. This publication is in Dutch."
#de default browse entry, for pages not part of the sections below
browse_entry = "https://galerij.kb.nl/kb.html#/nl/kattendijke/page/9/zoom/3/lat/-58.859223547066584/lng/-44.6484375"
#===================================================================
XMLoutputfile = open("kattendijke.xml", "w")
xmlstring=""
xmlstring += "<?xml version='1.0' encoding='UTF-8'?>\n"
xmlstring += "<records>\n"
for infile in glob.glob(os.path.join(imagedir_hires, '*.jpg')):
picname = infile.replace(imagedir_hires, "") #"002r.jpg"
picname_trunc=picname.replace(".jpg", "") #"002r"
# split in 3 parts: 006 + 76e5 + fl002v
#imagenumber = picname_trunc.split("_")[0] #006
#folio_left = picname_trunc.split("_")[1] # 76e5- we can further ignore this part, this is always the same
#folio = picname_trunc.split("_")[2] #fl002v
folio = picname_trunc # 002r
#print(imagenumber + " -- " + str(folio) + " -- " + str(hasIlluminations(folio)) + " -- " + str(findIlluminations(folio)))
#print(imagenumber + " -- " + str(folio) + " -- " + str(hasIlluminations(folio)))# + " -- " + str(findIlluminations(folio)))
print('"'+str(folio)+'",')# + " -- " + str(findIlluminations(folio)))
xmlstring += " <record>\n"
xmlstring += " <Institution>" + institution + "</Institution>\n"
xmlstring += " <permission>"+permission+"</permission>\n"
xmlstring += " <source>"+source+"</source>\n"
xmlstring += " <folio>"+str(picname_trunc)+"</folio>\n"
xmlstring += " <objecthistory>"+ str(objecthistory)+"</objecthistory>\n"
xmlstring += " <accessionnumber>*[" + browse_entry + " View this manuscript] on the website of the KB, including a full-text transcription of each page." + "\n*" + "Read [" + homepageNL + " backgroud information in Dutch]" + "\n*" + "Description of the manuscript in the catalogue of the KB: http://opc4.kb.nl/PPN?PPN=" +ppn + "</accessionnumber>\n"
xmlstring += " <URLtothemediafile>" + images_base_url_hires + str(picname) + "</URLtothemediafile>\n"
xmlstring += " <placeofcreation>" + str(placeofcreation) + "</placeofcreation>\n"
# xmlstring += " <artist>" + "<NAME>ère" + "</artist>\n"
xmlstring += " <references>" + str(references) + "</references>\n"
#xmlstring += " <wikidata>"+str(wikidata)+"</wikidata>\n"
#xmlstring += " <otherversions>"+ str(otherversions) +"</otherversions>\n"
if folio == "moderneband-voor":
xmlstring += ""
# xmlstring += " <description>Front of brown leather binding from 1993 made by the nun <NAME>ère from the [[:nl:Onze-Lieve-Vrouweabdij|Onze-Lieve-Vrouweabdij]] in [[:nl:Oosterhout (Noord-Brabant)|Oosterhout]]. Inside is the manuscript containing "+ title_overall + " - " + signature +"</description>\n"
# xmlstring += " <URLtothemediafile>" + images_base_url_hires + str(picname) + "</URLtothemediafile>\n"
# xmlstring += " <GWToolsettitle>"+ signature+ " - Front of brown leather binding from 1993. Inside is the manuscript containing "+ title_overall_GWT+"</GWToolsettitle>\n"
# xmlstring += " <title>Front of brown leather binding from 1993 made by the nun <NAME>ère from the [[:nl:Onze-Lieve-Vrouweabdij|Onze-Lieve-Vrouweabdij]] in [[:nl:Oosterhout (Noord-Brabant)|Oosterhout]]. Inside is the manuscript containing "+ title_overall+" - " + signature +"</title>\n"
# xmlstring += " <medium>Brown leather binding</medium>\n"
# xmlstring += " <date>" + "1993" + "</date>\n"
# xmlstring += " <artist>" + "<NAME>ère" + "</artist>\n"
# xmlstring += " <placeofcreation>" + "[[:nl:Onze-Lieve-Vrouweabdij|Onze-Lieve-Vrouweabdij]] in [[:nl:Oosterhout (Noord-Brabant)|Oosterhout]]" + "</placeofcreation>\n"
# xmlstring += " <dimensions>" + "Approx. {{Size|mm|257|190}}" + "</dimensions>\n"
else: #all the normal folio
xmlstring += " <description>Folium " + str(folio) + " from the " + str(title_GWT) + " from the collection of the [[:en:National Library of the Netherlands|National Library of the Netherlands]].</description>\n"
xmlstring += " <GWToolsettitle>" + str(title_GWT)+ " - " + signature + ", folium "+ str(folio) + "</GWToolsettitle>\n"
xmlstring += " <title>"+str(title_short)+ " - " + str(title) + " - " + signature + ", folium "+ str(folio)+"</title>\n"
xmlstring += " <medium>" + str(medium) + "</medium>\n"
xmlstring += " <date>" + str(date) + "</date>\n"
xmlstring += " <notes>" + str(notes) + "</notes>\n"
xmlstring += " <dimensions>" + str(dimensions) + "</dimensions>\n"
xmlstring += " </record>\n"
xmlstring += "</records>\n"
XMLoutputfile.write(xmlstring)
XMLoutputfile.close()
|
StarcoderdataPython
|
11211746
|
<reponame>player1537-forks/spack
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGgbio(RPackage):
"""Visualization tools for genomic data.
The ggbio package extends and specializes the grammar of graphics for
biological data. The graphics are designed to answer common scientific
questions, in particular those often asked of high throughput genomics
data. All core Bioconductor data structures are supported, where
appropriate. The package supports detailed views of particular genomic
regions, as well as genome-wide overviews. Supported overviews include
ideograms and grand linear views. High-level plots include sequence
fragment length, edge-linked interval to data view, mismatch pileup, and
several splicing summaries."""
bioc = "ggbio"
version('1.42.0', commit='3<PASSWORD>')
version('1.38.0', commit='<KEY>')
version('1.32.0', commit='<KEY>')
version('1.30.0', commit='<KEY>')
version('1.28.5', commit='594521ca556ef7d97cf4882ecfa54d22c2a2faba')
version('1.26.1', commit='<KEY>')
version('1.24.1', commit='<KEY>')
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-ggplot2@1.0.0:', type=('build', 'run'))
depends_on('r-gridextra', type=('build', 'run'))
depends_on('r-scales', type=('build', 'run'))
depends_on('r-reshape2', type=('build', 'run'))
depends_on('r-gtable', type=('build', 'run'))
depends_on('r-hmisc', type=('build', 'run'))
depends_on('r-biovizbase@1.23.3:', type=('build', 'run'))
depends_on('r-biovizbase@1.28.2:', type=('build', 'run'), when='@1.28.5:')
depends_on('r-biovizbase@1.29.2:', type=('build', 'run'), when='@1.30.0:')
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-s4vectors@0.13.13:', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-iranges@2.11.16:', type=('build', 'run'), when='@1.26.1:')
depends_on('r-genomeinfodb@1.1.3:', type=('build', 'run'))
depends_on('r-genomicranges@1.21.10:', type=('build', 'run'))
depends_on('r-genomicranges@1.29.14:', type=('build', 'run'), when='@1.26.1:')
depends_on('r-summarizedexperiment', type=('build', 'run'))
depends_on('r-biostrings', type=('build', 'run'))
depends_on('r-rsamtools@1.17.28:', type=('build', 'run'))
depends_on('r-genomicalignments@1.1.16:', type=('build', 'run'))
depends_on('r-bsgenome', type=('build', 'run'))
depends_on('r-variantannotation@1.11.4:', type=('build', 'run'))
depends_on('r-rtracklayer@1.25.16:', type=('build', 'run'))
depends_on('r-genomicfeatures@1.17.13:', type=('build', 'run'))
depends_on('r-genomicfeatures@1.29.11:', type=('build', 'run'), when='@1.26.1:')
depends_on('r-organismdbi', type=('build', 'run'))
depends_on('r-ggally', type=('build', 'run'))
depends_on('r-ensembldb@1.99.13:', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-annotationfilter', type=('build', 'run'))
depends_on('r-rlang', type=('build', 'run'), when='@1.28.5:')
|
StarcoderdataPython
|
3457489
|
<gh_stars>1-10
from enum import auto, Enum
import logging
import pathlib
from .yaml import load_yaml
import tomli
from .cache import Cache
from .signals import document_loaded
from typing import (
Any,
Callable,
Dict,
Generic,
Iterable,
List,
Literal,
Optional,
Type,
TypeVar,
Union,
)
import re
import dateparser
T = TypeVar('T')
class Publisher:
"""A publisher produces the final output files, applying templates etc. as
needed.
"""
def publish_document(self, document: 'DocumentNode') -> pathlib.Path:
"""Publish a document node.
:return: The path of the generated file.
"""
pass
def publish_index(self, index: 'IndexNode') -> pathlib.Path:
"""Publish an index node.
:return: The path of the generated file."""
pass
def publish_resource(self, resource: 'ResourceNode') -> pathlib.Path:
"""Publish a resource node.
:return: The path of the generated file."""
pass
def publish_static(self, static: 'StaticNode') -> pathlib.Path:
"""Publish a static node.
:return: The path of the generated file."""
pass
def publish_generated(self, generated: 'GeneratedNode') -> pathlib.Path:
"""Publish a generated node.
:return: The path of the generated file."""
pass
class NodeKind(Enum):
Resource = auto()
Index = auto()
Document = auto()
Data = auto()
Static = auto()
Generated = auto()
class Node:
kind: NodeKind
"""The node kind, must be set in the constructor."""
src: Optional[pathlib.Path]
"""The full path to the source file.
This is an OS specific path object."""
path: pathlib.PurePosixPath
"""The output path, relative to the page root.
All paths *must* start with ``/``.
"""
metadata: Dict[str, Any]
"""Metadata associated with this node."""
parent: Optional['Node']
"""The parent node, if any."""
__nodes: Dict[str, 'Node']
"""A dictionary containing all child nodes.
The key is the path to the child node relative to this node. I.e. if the
path of this node is ``/foo``, and it has a child at ``/foo/bar``, the
key for that child would be ``bar``."""
@property
def children(self):
"""A list containing all direct children of this node."""
return self.__nodes.values()
def __init__(self):
self.__nodes = {}
self.metadata = {}
self.parent = None
def add_child(self, child: 'Node') -> None:
"""Add a new child to this node.
The path of the child node must be a sub-path of the current node
path, with exactly one more component. I.e. if the current node path is
``/foo/bar``, a node with path ``/foo/bar/baz`` can be added as a
child, but ``/baz/`` or ``/foo/bar/boo/baz`` would be invalid."""
assert self.path != child.path
name = child.path.relative_to(self.path).parts[0]
self.__nodes[name] = child
child.parent = self
def __repr__(self):
return f'{self.__class__.__name__}({self.path})'
def select_children(self):
"""Select all children of this node and return them as a
:py:class:`~liara.query.Query`."""
from .query import Query
return Query(self.children)
def get_child(self, name) -> Optional['Node']:
"""Get a child of this node.
:return: The child node or ``None`` if no such child exists."""
return self.__nodes.get(name)
def get_children(self, *, recursive=False) -> Iterable['Node']:
"""Get all children of this node.
This function differs from :py:meth:`select_children` in two important
ways:
* It returns a list of :py:class:`Node` instances and does not wrap it
in a :py:class:`~liara.query.Query`
* It can enumerate all children recursively.
"""
for child in self.children:
yield child
if recursive:
yield from child.get_children(recursive=True)
def process(self, cache: Cache) -> None:
"""Some nodes -- resources, documents, etc. need to be processed. As
this can be a resource-intense process (for instance, it may require
generating images), processing can cache results and has to be
called separately instead of being executed as part of some other
operation.
By convention this method should populate ``self.content``.
"""
pass
_metadata_marker = re.compile(r'(---|\+\+\+)\n')
class MetadataKind(Enum):
Unknown = auto()
Yaml = auto()
Toml = auto()
def extract_metadata_content(text: str):
"""Extract metadata and content.
Metadata is stored at the beginning of the file, separated using a metadata
seperation marker, for instance::
+++
this_is_toml = True
+++
content
This function splits the provided text into metadata and actual content.
"""
meta_start, meta_end = 0, 0
content_start, content_end = 0, 0
metadata_kind = MetadataKind.Unknown
# If the document doesn't end with a trailing new-line, the metadata regex
# will get confused. We'll thus add a new-line to make sure this works
if text and text[-1] != '\n':
text += '\n'
for match in _metadata_marker.finditer(text):
if meta_start == 0:
if match.group() == '---\n':
metadata_kind = MetadataKind.Yaml
elif match.group() == '+++\n':
metadata_kind = MetadataKind.Toml
meta_start = match.span()[1]
elif meta_end == 0:
if match.group() == '---\n':
if metadata_kind != MetadataKind.Yaml:
raise Exception('Metadata markers mismatch -- started '
'with "---", but ended with "+++"')
elif match.group() == '+++\n':
if metadata_kind != MetadataKind.Toml:
raise Exception('Metadata markers mismatch -- started '
'with "+++", but ended with "---"')
meta_end = match.span()[0]
content_start = match.span()[1]
content_end = len(text)
break
if metadata_kind == MetadataKind.Yaml:
metadata = load_yaml(text[meta_start:meta_end])
elif metadata_kind == MetadataKind.Toml:
metadata = tomli.loads(text[meta_start:meta_end])
else:
# We didn't find any metadata here, so everything must be content
return {}, text
content = text[content_start:content_end]
return metadata, content
def fixup_relative_links(document: 'DocumentNode'):
"""Replace relative links in the document with links relative to the
site root."""
# early out if there's no relative link in here, as the parsing is
# very expensive
if "href=\"." not in document.content:
return
from bs4 import BeautifulSoup
soup = BeautifulSoup(document.content, 'lxml')
def is_relative_url(s):
return s and s[0] == '.'
for link in soup.find_all('a', {'href': is_relative_url}):
target = link.attrs.get('href')
link.attrs['href'] = \
str(document.path.parent / pathlib.PurePosixPath(target))
document.content = str(soup)
def fixup_date(document: 'DocumentNode'):
"""If the date in the document is a string, try to parse it to produce a
datetime object."""
if 'date' in document.metadata:
date = document.metadata['date']
if isinstance(date, str):
document.metadata['date'] = dateparser.parse(date)
class FixupDateTimezone:
"""Set the timezone of the ``metadata['date']`` field to the local timezone
if no timezone has been set."""
def __init__(self):
import tzlocal
self.__tz = tzlocal.get_localzone()
def __call__(self, document: 'DocumentNode'):
'''If the date in the document has no timezone info, set it to the local
timezone.'''
if 'date' in document.metadata:
date = document.metadata['date']
if date.tzinfo is None:
document.metadata['date'] = date.replace(tzinfo=self.__tz)
class DocumentNode(Node):
_load_fixups: List[Callable]
"""These functions are called right after the document has been loaded,
and can be used to fixup metadata, content, etc. before it gets processed
(These should be called before :py:meth:`load`/:py:meth:`reload`
returns.)"""
_process_fixups: List[Callable]
"""These functions are called after a document has been processed
(These should be called before :py:meth:`process` returns)."""
def __init__(self, src, path, metadata_path=None):
super().__init__()
self.kind = NodeKind.Document
self.src = src
self.path = path
self.metadata_path = metadata_path
self.content = None
self._load_fixups = []
self._process_fixups = []
def set_fixups(self, *, load_fixups, process_fixups) -> None:
"""Set the fixups that should be applied to this document node.
The fixups should be set *before* calling :py:meth:`load`.
:param load_fixups: These functions will be executed before
:py:meth:`load` returns.
:param process_fixups: These functions will be executed before
:py:meth:`process` returns.
"""
self._load_fixups = load_fixups
self._process_fixups = process_fixups
def load(self):
"""Load the content of this node."""
self._load()
self._apply_load_fixups()
document_loaded.send(self, document=self, content=self._raw_content)
def validate_metadata(self):
if self.metadata is None:
raise Exception(f"No metadata for document: '{self.src}'")
if 'title' not in self.metadata:
raise Exception(f"'title' missing for Document: '{self.src}'")
def _apply_load_fixups(self):
for fixup in self._load_fixups:
fixup(self)
def _apply_process_fixups(self):
for fixup in self._process_fixups:
fixup(self)
def _load(self):
if self.metadata_path:
self.metadata = load_yaml(self.metadata_path.read_text())
self._raw_content = self.src.read_text('utf-8')
else:
self.metadata, self._raw_content = \
extract_metadata_content(self.src.read_text('utf-8'))
def reload(self):
"""Reload this node from disk.
By default, this just forwards to :py:meth:`_load`.
"""
self._load()
self._apply_load_fixups()
def publish(self, publisher: Publisher) -> pathlib.Path:
"""Publish this node using the provided publisher."""
return publisher.publish_document(self)
class HtmlDocumentNode(DocumentNode):
"""A node representing a Html document."""
def process(self, cache: Cache):
self.content = self._raw_content
self._apply_process_fixups()
class MarkdownDocumentNode(DocumentNode):
"""A node representing a Markdown document."""
def process(self, cache: Cache):
import markdown
from .md import HeadingLevelFixupExtension
import hashlib
byte_content = self._raw_content.encode('utf-8')
content_hash = hashlib.sha256(byte_content).digest()
if content := cache.get(content_hash):
self.content = content
return
extensions = [
'pymdownx.arithmatex',
HeadingLevelFixupExtension(),
'fenced_code',
'codehilite',
'smarty',
'tables',
'admonition'
]
extension_configs = {
'codehilite': {
'css_class': 'code'
},
'pymdownx.arithmatex': {
'generic': True
}
}
self.content = markdown.markdown(self._raw_content,
extensions=extensions,
extension_configs=extension_configs)
self._apply_process_fixups()
cache.put(content_hash, self.content)
class DataNode(Node):
"""A data node.
Data nodes consist of a dictionary. This can be used to store arbitrary
data as part of a :py:class:`liara.site.Site`, and make it available to
templates (for instance, a menu structure could go into a data node.)
"""
def __init__(self, src, path):
super().__init__()
self.kind = NodeKind.Data
self.src = src
self.path = path
self.content = load_yaml(self.src.open('r'))
class IndexNode(Node):
"""An index node.
Index nodes are created for every folder if there is no ``_index`` node
present, and from indices. An index node can optionally contain a list of
references, in case the referenced nodes by this index are not direct
children of this node.
"""
references: List[Node]
"""Nodes referenced by this index node.
An index can not rely on using ``children`` as those have to be below the
path of the parent node. The ``references`` list allows to reference nodes
elsewhere in the site."""
def __init__(self, path, metadata: Optional[Dict] = None):
super().__init__()
self.kind = NodeKind.Index
self.src = None
self.path = path
self.metadata = metadata if metadata else {}
self.references = []
def add_reference(self, node):
"""Add a reference to an arbitrary node in the site."""
self.references.append(node)
def publish(self, publisher) -> pathlib.Path:
"""Publish this node using the provided publisher."""
return publisher.publish_index(self)
class GeneratedNode(Node):
def __init__(self, path, metadata: Optional[Dict] = None):
super().__init__()
self.kind = NodeKind.Generated
self.src = None
self.path = path
self.metadata = metadata if metadata else {}
self.content: Optional[Union[bytes, str]] = None
def generate(self) -> None:
"""Generate the content of this node.
After this function has finished, ``self.content`` must be populated
with the generated content."""
pass
def publish(self, publisher: Publisher):
"""Publish this node using the provided publisher."""
return publisher.publish_generated(self)
_REDIRECTION_TEMPLATE = """<!DOCTYPE HTML>
<html lang="en-US">
<head>
<meta charset="UTF-8">
<meta http-equiv="refresh" content="0; url={{NEW_URL}}">
<script type="text/javascript">
window.location.href = "{{NEW_URL}}"
</script>
<title>Page Redirection</title>
</head>
<body>
<h1>Page has been moved</h1>
<p>If you are not redirected automatically, follow this
<a href='{{NEW_URL}}'>link.</a>.</p>
</body>
</html>"""
class RedirectionNode(GeneratedNode):
"""A redirection node triggers a redirection to another page.
This node gets processed into a simple web site which tries to redirect
using both ``<meta http-equiv="refresh">`` and Javascript code setting
``window.location``.
"""
def __init__(self,
path: pathlib.PurePosixPath,
dst: pathlib.PurePosixPath,
*,
base_url=''):
super().__init__(path)
self.dst = dst
self.__base_url = base_url
def generate(self):
text = _REDIRECTION_TEMPLATE.replace('{{NEW_URL}}',
self.__base_url
+ self.dst.as_posix())
self.content = text
class ResourceNode(Node):
"""A resource node applies some process when creating the output.
This is useful if you have content where the source cannot be interpreted,
and requires some process first before it becomes usable -- for instance,
``SASS`` to ``CSS`` compilation.
"""
def __init__(self, src, path, metadata_path=None):
super().__init__()
self.kind = NodeKind.Resource
self.src = src
self.path = path
self.content = None
if metadata_path:
self.metadata = load_yaml(open(metadata_path, 'r'))
def reload(self) -> None:
pass
def publish(self, publisher: Publisher) -> pathlib.Path:
"""Publish this node using the provided publisher."""
return publisher.publish_resource(self)
_SASS_COMPILER = Literal['cli', 'libsass']
class SassResourceNode(ResourceNode):
"""This resource node compiles ``.sass`` and ``.scss`` files to CSS
when built.
"""
__log = logging.getLogger(f'{__name__}.{__qualname__}')
def __init__(self, src, path, metadata_path=None):
super().__init__(src, path, metadata_path)
if src.suffix not in {'.scss', '.sass'}:
raise Exception("SassResource can be only created for a .scss or "
" .sass file")
self.path = self.path.with_suffix('.css')
self.__compiler: _SASS_COMPILER = 'cli'
def set_compiler(self, compiler: _SASS_COMPILER):
self.__compiler = compiler
def reload(self) -> None:
self.content = None
def process(self, cache: Cache):
import hashlib
if self.content is not None:
return
assert self.src
hash_key = hashlib.sha256(self.src.open('rb').read()).digest()
if (value := cache.get(hash_key)) is not None:
self.content = value
return
try:
if self.__compiler == 'cli':
self._compile_using_cli()
elif self.__compiler == 'libsass':
self._compile_using_libsass()
cache.put(hash_key, self.content)
except Exception as e:
self.__log.warning(f'Failed to compile SCSS file "{self.src}"',
exc_info=e)
def _compile_using_cli(self):
import subprocess
import sys
self.__log.debug(f'Processing "{self.src}" using "sass" binary')
result = subprocess.run(
['sass', str(self.src)],
# On Windows, we need to set shell=True, otherwise, the
# sass binary installed using npm install -g sass won't
# be found.
shell=sys.platform == 'win32',
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
result.check_returncode()
self.content = result.stdout
def _compile_using_libsass(self):
import sass
self.__log.debug(f'Processing "{self.src}" using "libsass"')
self.content = sass.compile(
filename=str(self.src)).encode('utf-8')
class NodeFactory(Generic[T]):
"""A generic factory for nodes, which builds nodes based on the file
type."""
__known_types: Dict[str, Type]
def __init__(self):
self.__known_types = {}
@property
def known_types(self):
return self.__known_types.keys()
def register_type(self,
suffixes: Union[str, Iterable[str]],
node_type: type) -> None:
"""Register a new node type.
:param suffixes: Either one suffix, or a list of suffixes to be
registered for this type. For instance, a node
representing an image could be registered to
``[.jpg, .png]``.
:param node_type: The type of the node to be created.
"""
if isinstance(suffixes, str):
suffixes = [suffixes]
for suffix in suffixes:
self.__known_types[suffix] = node_type
def _create_node(self, cls, src, path, metadata_path) -> T:
"""This is the actual creation function.
:param cls: The class of the node to instantiate.
:param src: The source file path.
:param path: The output path.
:param metadata_path: The path to a metadata file.
:return: An instance of ``cls``.
Derived classes can use this function to customize the node creation.
"""
return cls(src, path, metadata_path)
def create_node(self, suffix: str,
src: pathlib.Path,
path: pathlib.PurePosixPath,
metadata_path: Optional[pathlib.Path] = None) -> T:
"""Create a node using the provided parameters."""
cls = self.__known_types[suffix]
node = self._create_node(cls, src, path, metadata_path)
return self._on_node_created(node)
def _on_node_created(self, node: T):
"""Called after a node has been created.
This can be used to further configure the node from the factory
before returning it from the factory, for instance, to pass
configuration down into the node.
.. versionadded:: 2.3.4"""
return node
class ResourceNodeFactory(NodeFactory[ResourceNode]):
"""A factory for resource nodes."""
__log = logging.getLogger(f'{__name__}.{__qualname__}')
def __init__(self, configuration):
super().__init__()
self.register_type(['.sass', '.scss'], SassResourceNode)
self.__sass_compiler = configuration['build.resource.sass.compiler']
if self.__sass_compiler == 'libsass':
self.__log.info(
'Support for "libsass" as the compiler for SASS '
'files is deprecated and will be removed in a future release. '
'Please check the documentation how to use the SASS '
'command line compiler.')
def _on_node_created(self, node: T):
if isinstance(node, SassResourceNode):
node.set_compiler(self.__sass_compiler)
return node
class DocumentNodeFactory(NodeFactory[DocumentNode]):
"""A factory for document nodes."""
def __setup_fixups(self, configuration):
if configuration['relaxed_date_parsing']:
# This is tricky, as fixup_date depends on this running
# first. We thus prepend this before any other fixup and hope this
# is the only one with ordering issues.
self.__load_fixups.insert(0, fixup_date)
if configuration['allow_relative_links']:
self.__process_fixups.append(fixup_relative_links)
def __init__(self, configuration):
super().__init__()
self.__load_fixups = [FixupDateTimezone()]
self.__process_fixups = []
self.__setup_fixups(configuration)
self.register_type(['.md'], MarkdownDocumentNode)
self.register_type(['.html'], HtmlDocumentNode)
def _on_node_created(self, node):
node.set_fixups(
load_fixups=self.__load_fixups,
process_fixups=self.__process_fixups)
node.load()
return node
class StaticNode(Node):
"""A static data node.
Static nodes are suitable for large static data which never changes, for
instance, binary files, videos, images etc.
"""
def __init__(self, src, path, metadata_path=None):
super().__init__()
self.kind = NodeKind.Static
self.src = src
self.path = path
if metadata_path:
self.metadata = load_yaml(open(metadata_path, 'r'))
def update_metadata(self) -> None:
"""Update metadata by deriving some metadata from the source file,
if possible.
For static nodes pointing to images, this will create a new metadata
field ``image_size`` and populate it with the image resolution."""
from PIL import Image
if self.is_image:
image = Image.open(self.src)
self.metadata.update({
'image_size': image.size
})
@property
def is_image(self):
"""Return ``True`` if this static file is pointing to an image."""
return self.src.suffix in {'.jpg', '.png'}
def publish(self, publisher: Publisher) -> pathlib.Path:
"""Publish this node using the provided publisher."""
return publisher.publish_static(self)
class ThumbnailNode(ResourceNode):
def __init__(self, src, path, size):
super().__init__(src, path)
self.__size = size
def __get_hash_key(self) -> bytes:
import hashlib
assert self.src
hash_key = hashlib.sha256(self.src.open('rb').read()).digest()
if 'height' in self.__size:
hash_key += self.__size['height'].to_bytes(4, 'little')
else:
hash_key += bytes([0, 0, 0, 0])
if 'width' in self.__size:
hash_key += self.__size['width'].to_bytes(4, 'little')
else:
hash_key += bytes([0, 0, 0, 0])
return hash_key
def process(self, cache: Cache):
from PIL import Image
import io
hash_key = self.__get_hash_key()
if content := cache.get(hash_key):
self.content = content
return
image = Image.open(self.src)
width, height = image.size
scale = 1
if 'height' in self.__size:
scale = min(self.__size['height'] / height, scale)
if 'width' in self.__size:
scale = min(self.__size['width'] / width, scale)
width *= scale
height *= scale
image.thumbnail((width, height,))
storage = io.BytesIO()
assert self.src
if self.src.suffix == '.jpg':
image.save(storage, 'JPEG')
self.content = storage.getbuffer()
elif self.src.suffix == '.png':
image.save(storage, 'PNG')
self.content = storage.getbuffer()
else:
raise Exception("Unsupported image type for thumbnails")
cache.put(hash_key, bytes(self.content))
|
StarcoderdataPython
|
5027036
|
#!/usr/bin/env python
# Copyright 2013 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# """
# @author: <NAME> <<EMAIL>>
# """
#
# """A simple little script to exemplify/test ipmi.console module
# """
import fcntl
import os
import select
import sys
import termios
import tty
from pyghmi.ipmi import console
import threading
def _doinput(sol):
while True:
select.select((sys.stdin,), (), (), 600)
try:
data = sys.stdin.read()
except (IOError, OSError) as e:
if e.errno == 11:
continue
raise
sol.send_data(data)
def _print(data):
bailout = False
if type(data) not in (str, unicode):
bailout = True
data = repr(data)
sys.stdout.write(data)
sys.stdout.flush()
if bailout:
raise Exception(data)
def main():
tcattr = termios.tcgetattr(sys.stdin)
newtcattr = tcattr
# TODO(jbjohnso): add our exit handler
newtcattr[-1][termios.VINTR] = 0
newtcattr[-1][termios.VSUSP] = 0
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, newtcattr)
tty.setraw(sys.stdin.fileno())
currfl = fcntl.fcntl(sys.stdin.fileno(), fcntl.F_GETFL)
fcntl.fcntl(sys.stdin.fileno(), fcntl.F_SETFL, currfl | os.O_NONBLOCK)
try:
if sys.argv[3] is None:
passwd = os.environ['IPMIPASSWORD']
else:
passwd_file = sys.argv[3]
with open(passwd_file, "r") as f:
passwd = f.read()
sol = console.Console(bmc=sys.argv[1], userid=sys.argv[2], password=<PASSWORD>,
iohandler=_print, force=True)
inputthread = threading.Thread(target=_doinput, args=(sol,))
inputthread.daemon = True
inputthread.start()
sol.main_loop()
except Exception:
currfl = fcntl.fcntl(sys.stdin.fileno(), fcntl.F_GETFL)
fcntl.fcntl(sys.stdin.fileno(), fcntl.F_SETFL, currfl ^ os.O_NONBLOCK)
termios.tcsetattr(sys.stdin, termios.TCSANOW, tcattr)
return 0
if __name__ == '__main__':
sys.exit(main())
|
StarcoderdataPython
|
1668802
|
<reponame>xram64/AdventOfCode2021<filename>day03/day03.py<gh_stars>1-10
## Advent of Code 2021: Day 3
## https://adventofcode.com/2021/day/3
## <NAME> | github.com/xram64
## Answers: [Part 1]: 3633500, [Part 2]: 4550283
import sys
# Return most commonly-found bit, breaking ties in favor of '1'
def get_most_common_bit(numbers, pos):
frequency_of = [0, 0] # frequencies for each bit [<0's>, <1's>]
for num in numbers:
if num[pos] == 0: frequency_of[0] += 1
elif num[pos] == 1: frequency_of[1] += 1
most_common_bit = 0 if (frequency_of[0] > frequency_of[1]) else 1
return most_common_bit
# Return least commonly-found bit, breaking ties in favor of '0'
def get_least_common_bit(numbers, pos):
return (get_most_common_bit(numbers, pos) ^ 1)
if __name__ == '__main__':
with open('day03_input.txt', 'r') as f:
def parse(line): return [int(bit) for bit in list(line) if (bit != '\n')]
diag_numbers = list( map(parse, f.readlines()) )
diag_numbers_len = len(diag_numbers[0])
##############
### Part 1 ###
most_common_bits = ''
for pos in range(diag_numbers_len):
most_common_bits += str( get_most_common_bit(diag_numbers, pos) )
# Epsilon rate: Most common bits from input
ε = int(most_common_bits, 2)
# Gamma rate: Least common bits from input
γ = ε ^ int('1'*diag_numbers_len, 2) # invert bits in ε
print(f"[Part 1] Epsilon rate: ε = {ε}. Gamma rate: γ = {γ}. Power consumption: {ε*γ}.")
##############
### Part 2 ###
oxy_rating_filtered_nums = diag_numbers
co2_rating_filtered_nums = diag_numbers
## Oxygen generator rating
for pos in range(diag_numbers_len):
# Find most common bit (MCB) and filter numbers not matching bit criteria
mcb = get_most_common_bit(oxy_rating_filtered_nums, pos)
oxy_rating_filtered_nums = list( filter(lambda num: num[pos] == mcb, oxy_rating_filtered_nums) )
if len(oxy_rating_filtered_nums) == 1:
# Convert final element matching bit criteria to a decimal integer
oxy_rating = int(''.join( [str(n) for n in oxy_rating_filtered_nums[0]] ), 2)
break
elif len(oxy_rating_filtered_nums) <= 0:
print('Error: Oxygen rating list empty.')
sys.exit()
else:
print('Error: Multiple numbers found matching bit criteria for oxygen rating.')
sys.exit()
## CO2 scrubber rating
for pos in range(diag_numbers_len):
# Find least common bit (LCB) and filter numbers not matching bit criteria
lcb = get_least_common_bit(co2_rating_filtered_nums, pos)
co2_rating_filtered_nums = list( filter(lambda num: num[pos] == lcb, co2_rating_filtered_nums) )
if len(co2_rating_filtered_nums) == 1:
# Convert final element matching bit criteria to a decimal integer
co2_rating = int(''.join( [str(n) for n in co2_rating_filtered_nums[0]] ), 2)
break
elif len(co2_rating_filtered_nums) <= 0:
print('Error: CO2 rating list empty.')
sys.exit()
else:
print('Error: Multiple numbers found matching bit criteria for CO2 rating.')
sys.exit()
print(f"[Part 2] Oxygen generator rating: {oxy_rating}. CO2 scrubber rating: {co2_rating}. Life support rating: {oxy_rating*co2_rating}.")
|
StarcoderdataPython
|
1950605
|
class LimitOffsetPagination(object):
limit = 10
offset = 0
def __init__(self, req):
self.req = req
self.count = None
def paginate_queryset(self, queryset):
self.count = queryset.count()
self.limit = self.req.get_param_as_int('limit', default=self.limit)
self.offset = self.req.get_param_as_int('offset', default=self.offset)
return queryset.limit(self.limit).offset(self.offset)
def get_paginated_response(self, data):
return {
'count': self.count,
'next': self.get_next_link(),
'previous': self.get_prev_link(),
'results': data
}
def get_link(self, limit, offset):
absolute_url = '{scheme}://{netloc}'.format(
scheme=self.req.scheme,
netloc=self.req.netloc
)
querystring = '?limit=%s&offset=%s' % (limit, offset)
return '%s%s%s' % (absolute_url, self.req.path, querystring)
def get_next_link(self):
return self.get_link(
limit=self.limit,
offset=self.offset + self.limit
) if self.count > self.offset + self.limit else None
def get_prev_link(self):
return self.get_link(
limit=self.limit,
offset=max(0, self.offset - self.limit)
) if self.offset > 0 else None
|
StarcoderdataPython
|
220067
|
<gh_stars>0
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : security-auditing
Case Name : 开启用户登录、退出的审计功能,audit_login_logout默认值7
Description :
步骤 1.设置gs_guc reload -N all -I all -c "audit_login_logout=7"
步骤 2.正确的用户、密码,登录数据库;
步骤 3.错误的用户、密码,登录数据库
步骤 4.创建用户:create user user03 with password "{<PASSWORD>}";
步骤 5.删除用户:drop user user03 ;
步骤 6.查看审计日志select * from pg_query_audit('{start_time}','{end_time}')
where type like 'login%' or type like '%logout%' and username = 'user03';
时间设在最接近登录数据库的时间
Expect :
步骤 1.设置成功,show audit_login_logout;返回7
步骤 2.登录成功
步骤 3.登录失败
步骤 4.创建成功
步骤 5.删除成功
步骤 6.只查询到登录成功、失败和注销的记录
History :
"""
import unittest
from time import sleep
from yat.test import Node
from yat.test import macro
from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Logger import Logger
Logger = Logger()
class Auditing(unittest.TestCase):
def setUp(self):
Logger.info('Opengauss_Function_Security_Auditing_Case0003 start')
self.sh_primy = CommonSH('PrimaryDbUser')
self.userNode = Node('PrimaryDbUser')
self.common = Common()
self.DB_ENV_PATH = macro.DB_ENV_PATH
def test_security(self):
start_time_msg = self.sh_primy.execut_db_sql('SELECT sysdate;')
start_time = start_time_msg.splitlines()[2].strip()
sql_cmd1 = f'show audit_login_logout;' \
f'create user user03 with password ' \
f'\'{macro.COMMON_PASSWD}\';'
msg1 = self.sh_primy.execut_db_sql(sql_cmd1)
msg1_list = msg1.splitlines()
Logger.info(msg1_list)
if msg1_list[0].strip() == "audit_login_logout" and msg1_list[
2].strip() != '7':
excute_set1 = f'source {self.DB_ENV_PATH};' \
f'gs_guc reload -N all -I all -c ' \
f'"audit_login_logout=7"'
set_msg1 = self.userNode.sh(excute_set1).result()
Logger.info(set_msg1)
sql_set1 = f'show audit_login_logout;'
sql_msg1 = self.sh_primy.execut_db_sql(sql_set1)
self.common.equal_sql_mdg(sql_msg1, 'audit_login_logout', '7',
'(1 row)', flag='1')
new_password2 = macro.COMMON_PASSWD.lower() + "<PASSWORD>"
sleep(30)
login_success_cmd = f'source {self.DB_ENV_PATH};' \
f'gsql -d {self.userNode.db_name} -p ' \
f'{self.userNode.db_port} -U user03 -W ' \
f'"{macro.COMMON_PASSWD}" -c "\\q";'
Logger.info(login_success_cmd)
login_success_msg = self.userNode.sh(login_success_cmd).result()
self.assertTrue(login_success_msg == '')
sleep(3)
login_fail_cmd = f'source {self.DB_ENV_PATH};' \
f'gsql -d {self.userNode.db_name} -p ' \
f'{self.userNode.db_port} -U user03 -W ' \
f'"{<PASSWORD>_password2}" -c "\\q"'
Logger.info(login_fail_cmd)
login_fail_msg = self.userNode.sh(login_fail_cmd).result()
Logger.info(login_fail_msg)
self.assertIn('Invalid username/password,login denied', login_fail_msg)
sleep(30)
end_time_msg = self.sh_primy.execut_db_sql('SELECT sysdate;')
end_time = end_time_msg.splitlines()[2].strip()
sql_cmd3 = f'select * from pg_query_audit(\'{start_time}\',' \
f'\'{end_time}\');'
excute_cmd3 = f'source {self.DB_ENV_PATH};' \
f'gsql -d {self.userNode.db_name} -p ' \
f'{self.userNode.db_port} -c "{sql_cmd3}"'
Logger.info(excute_cmd3)
msg3 = self.userNode.sh(excute_cmd3).result()
Logger.info(msg3)
self.assertTrue(
msg3.find('failed,authentication for user(user03)failed') > -1)
self.assertTrue(msg3.find('success,the current user is:user03') > -1)
self.assertTrue(msg3.find('user_logout') > -1)
def tearDown(self):
sql_cmd1 = 'drop user user03;'
msg1 = self.sh_primy.execut_db_sql(sql_cmd1)
Logger.info(msg1)
Logger.info('Opengauss_Function_Security_Auditing_Case0003 end')
|
StarcoderdataPython
|
58075
|
<reponame>manojgupta3051994/ga-learner-dsmp-repo<gh_stars>0
# --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Code starts here
data = np.genfromtxt(path,delimiter=',',skip_header=1)
print("\nData: \n\n",data)
print("\nType of data: \n\n",type(data))
census = np.concatenate((new_record,data),axis=0)
print (census)
# --------------
#Code starts here
age = np.array(census)[:,0]
max_age = np.max(age)
min_age = np.min(age)
age_mean = np.mean(age)
age_std = np.std(age)
print (max_age)
print (min_age)
print (age_mean)
print (round(age_std,2))
# --------------
#Code starts here
race_0 = census[census[:,2] == 0]
race_1 = census[census[:,2] == 1]
race_2 = census[census[:,2] == 2]
race_3 = census[census[:,2] == 3]
race_4 = census[census[:,2] == 4]
minority_race = 3
len_0 = len(race_0)
len_1 = len(race_1)
len_2 = len(race_2)
len_3 = len(race_3)
len_4 = len(race_4)
print (len_0)
print (len_1)
print (len_2)
print (len_3)
print (len_4)
# --------------
#Code starts here
senior_citizens = census[census[:,0] > 60]
working_hours_sum = np.sum(senior_citizens[:,6],axis = 0)
print (working_hours_sum)
senior_citizens_len = len(senior_citizens)
avg_working_hours = working_hours_sum/senior_citizens_len
print(round(avg_working_hours,2))
# --------------
#Code starts here
high = census[census[:,1]>10]
low = census[census[:,1]<=10]
avg_pay_high = np.mean(high[:,7])
avg_pay_low = np.mean(low[:,7])
avg_pay_high==avg_pay_low
|
StarcoderdataPython
|
8151631
|
import gensim as gs
import os
import numpy as np
import codecs
import re
import logging # Log the data given
import sys
import ast
import ConfigParser
from langdetect import detect
from nltk.corpus import stopwords
stopword = set(stopwords.words("english"))
#for LDA
from nltk.tokenize import RegexpTokenizer
from nltk.stem.porter import PorterStemmer
from gensim import corpora, models, similarities
root = os.path.dirname(os.path.abspath(__file__))
utils = os.path.join(os.path.split(root)[0], 'utils')
resource = os.path.join((os.path.split(root)[0]), 'resources')
config_file = os.path.join(resource, 'config.properties')
# Insert at front of list ensuring that our util is executed first in
sys.path.insert(0, utils)
# To find files with a particular substring
from find_files import findFiles
config = ConfigParser.SafeConfigParser()
config.read(config_file)
# op_dir = config.get('FilePath', 'corpus_path')
# log_dir = config.get('FilePath', 'log_path')
# model_loc = config.get('FilePath', 'model_path')
# get parameters from config file
language_model = config.get('Training', 'language_model')
tags_model = config.get('Training', 'tags_model')
# pvdm params
pvdm_size = int(config.get('pvdm', 'size'))
pvdm_min_count = int(config.get('pvdm', 'min_count'))
pvdm_window = int(config.get('pvdm', 'window'))
pvdm_negative = int(config.get('pvdm', 'negative'))
pvdm_workers = int(config.get('pvdm', 'workers'))
pvdm_sample = float(config.get('pvdm', 'sample'))
# pvdbow params
pvdbow_size = int(config.get('pvdbow', 'size'))
pvdbow_min_count = int(config.get('pvdbow', 'min_count'))
pvdbow_window = int(config.get('pvdbow', 'window'))
pvdbow_negative = int(config.get('pvdbow', 'negative'))
pvdbow_workers = int(config.get('pvdbow', 'workers'))
pvdbow_sample = float(config.get('pvdbow', 'sample'))
pvdbow_dm = int(config.get('pvdbow', 'dm'))
# LDA params
lda_topics = int(config.get('LDA', 'topics'))
lda_passes = int(config.get('LDA', 'passes'))
# LSA params
lsa_topics = int(config.get('LSA', 'topics'))
lsa_passes = int(config.get('LSA', 'passes'))
def is_ascii(s):
return all(ord(c) < 128 for c in s)
def process_text(lines, language): # Text processing
word_list = []
for line in lines:
if language == 'tags':
pre_query = line.split(",")
word_list = []
for str_word in pre_query:
word_list.append("".join(str_word.split()).lower())
word_list = uniqfy_list(word_list)
else:
try:
line = unicode(line, "UTF-8")
line = line.replace(u"\u00A0", " ")
except:
line = line
if is_ascii(line):# Any language using ASCII characters goes here
line = re.sub("[^a-zA-Z]", " ", line)
for word in line.split(' '):
if word not in stopword and len(word) > 1:
word_list.append(word.lower())
else:
for word in line.split(' '):
word_list.append(word)
return word_list
def process_file(filename, language): # File processing
try:
with codecs.open(filename, 'r', encoding='utf-8') as f:
lines = f.readlines()
f.close()
return process_text(lines, language)
except:
return []
def load_documents(filenames, language): # Creating TaggedDocuments
doc = []
for filename in filenames:
word_list = process_file(filename, language)
if word_list:
doc.append(gs.models.doc2vec.TaggedDocument(
words=word_list, tags=[filename]))
else:
logging.warning(filename + " failed to load in load_documents")
return doc
def load_documents_LDA(filenames, language):
flag = 0
texts = []
for filename in filenames:
word_list = process_file(filename, language)
if word_list:
# tokens = word_list[0].split()
texts.append(word_list)
flag = 1
else:
logging.warning(filename + " failed to load in load_documents")
return texts
def train_model_pvdm(directory, language):
if language == ['tags']:
doc = load_documents(findFiles(directory, ['tag']), "en-text")
else:
doc = load_documents(findFiles(directory, [language]), language)
if not doc:
return 0
model = gs.models.doc2vec.Doc2Vec(doc, size=pvdm_size, min_count=pvdm_min_count,
window=pvdm_window, negative=pvdm_negative, workers=pvdm_workers, sample=pvdm_sample)
return model
def train_model_pvdbow(directory, language):
if language == ['tags']:
doc = load_documents(findFiles(directory, ['tag']), "en-text")
else:
doc = load_documents(findFiles(directory, [language]), language)
if not doc:
return 0
model = gs.models.doc2vec.Doc2Vec(doc, size=pvdbow_size, min_count=pvdbow_min_count, window=pvdbow_window,
negative=pvdbow_negative, workers=pvdbow_workers, sample=pvdbow_sample, dm=pvdbow_dm) # Apply PV-DBOW
return model
def train_model_LDA(directory, language):
# print 'in LDA func'
if language == ['tags']:
texts = load_documents_LDA(findFiles(directory, ['tag']), "en-text")
else:
texts = load_documents_LDA(findFiles(directory, [language]), language)
if not texts:
return 0
# turn our tokenized documents into a id <-> term dictionary
dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
# generate LDA model
ldamodel = gs.models.ldamodel.LdaModel(corpus, num_topics=lda_topics, id2word = dictionary, passes=lda_passes)
return ldamodel
def train_model_LSA(directory, language):
if language == ['tags']:
texts = load_documents_LDA(findFiles(directory, ['tag']), "en-text")
else:
texts = load_documents_LDA(findFiles(directory, [language]), language)
if not texts:
return 0
# turn our tokenized documents into a id <-> term dictionary
dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
# generate LSA model
tfidf = gs.models.TfidfModel(corpus)
corpus_tfidf = tfidf[corpus]
lsamodel = gs.models.lsimodel.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=lsa_topics)
return lsamodel
def train_model_TFIDF(directory, language):
if language == ['tags']:
texts = load_documents_LDA(findFiles(directory, ['tag']), "en-text")
else:
texts = load_documents_LDA(findFiles(directory, [language]), language)
if not texts:
return 0
# turn our tokenized documents into a id <-> term dictionary
dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
# generate LDA model
# lsamodel = gs.models.ldamodel.LdaModel(corpus, num_topics=lda_topics, id2word = dictionary, passes=lda_passes)
tfidf = gs.models.TfidfModel(corpus)
return tfidf
# lsi = gensim.models.lsimodel.LsiModel(corpus=mm, id2word=id2word, num_topics=400, chunksize=20000, distributed=True)
def uniqfy_list(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def get_all_lang(directory, string):
lst_lang = [name
for root, dirs, files in os.walk(directory)
for name in files
if name.endswith((string))]
lst_lang = uniqfy_list(lst_lang)
return lst_lang
|
StarcoderdataPython
|
4882260
|
"""
factor.py
Defines variables, variable sets, and dense factors over discrete variables (tables) for graphical models
Version 0.1.0 (2021-03-25)
(c) 2015-2021 <NAME> under the FreeBSD license; see license.txt for details.
"""
import numpy as np
#import autograd.numpy as np
from sortedcontainers import SortedSet as sset
## Under testing: cython-compiled variable sets for faster operations
try:
from pyGMs.varset_c import Var,VarSet
except ImportError:
#print "Compiled version not loaded; importing python version"
from pyGMs.varset_py import Var,VarSet # sortedcontainers version
#from .varset_py2 import Var,VarSet # numpy array version
inf = float('inf')
orderMethod = 'F' # TODO: currently stores in fortran order (as Matlab); should be trivially changable
#orderMethod = 'C' # Can we make this "seamless" to the user, and/or force them to do something consistent?
# Notes: column-major (order=F) puts last index sequentially ("big endian"): t[0 0 0], t[0 0 1], t[0 1 0] ...
# row major (order=C) puts 1st index sequentially ("little endian"): t[0 0 0], t[1 0 0], t[0 1 0], ...
class Factor(object):
"""A basic factor<float> class
Factors are the basic building block of our graphical model representations. In general, a factor
consists of a set of variables (its "scope"), and a table of values indicating f(x) for each
joint configuration x (a tuple of values) of its variables.
Variables are stored in sorted order; most of the time, factors are constructed by reading from files,
but if built by hand it is safest to use indexing to set the values, e.g.,
>>> f = Factor( [X,Y,Z], 0.0 ) # builds a factor over X,Y,Z filled with zeros
>>> f[0,1,0] = 1.5 # set f(X=0,Y=1,Z=0) to 1.5
Useful attributes are f.vars (the scope) and f.table (the table, a numpy array).
Factors are imbued with many basic operations for manipulation:
Operators: *, +, /, -, **, exp, log, abs, etc.
In-place versions: *=, +=, /=, -=, **=, expIP, logIP, etc.
Elimination: max, min, sum, lse (log-sum-exp), etc.
Conditioning: return a factor defined by a sub-table, assigning some variables to values
Other: argmax, argmin, sample, etc., return configurations of X (tuples)
"""
#v = VarSet([]) # internal storage for variable set (VarSet)
#t = np.ndarray([]) # internal storage for table (numpy array)
def __init__(self,vars=VarSet(),vals=1.0):
"""Constructor for Factor class
>>> f = Factor( [X,Y,Z],[vals] ) # creates factor over [X,Y,Z] with table [vals]
[vals] should be a correctly sized numpy array, or something that can be cast to the same.
"""
# TODO: add user-specified order method for values (order=)
# TODO: accept out-of-order vars list (=> permute vals as req'd)
try:
self.v = VarSet(vars) # try building varset with args
except TypeError: # if not iterable (e.g. single variable)
self.v = VarSet() # try just adding it
self.v.add(vars)
#assert( self.v.nrStates() > 0)
#if self.v.nrStatesDouble() > 1e8: raise ValueError("Too big!");
try:
self.t = np.empty(self.v.dims(), float, orderMethod);
self.t[:] = vals # try filling factor with "vals"
except ValueError: # if it's an incompatible shape,
self.t = np.reshape(np.array(vals, float), self.v.dims(), orderMethod) # try again using reshape
def __build(self,vs,ndarray):
"""Internal build function from numpy ndarray"""
self.v = vs
self.t = ndarray
return self
#TODO: def assign(self, F) : set self equal to rhs F, e.g., *this = F
def copy(self):
"""Copy constructor; make a copy of a factor"""
return Factor().__build(self.v.copy(),self.t.copy('K')) # order=orderMethod?
def changeVars(self, vars, copy=True):
"""Copy a factor but change its arguments (scope).
>>> f = Factor([X0,X1], table)
>>> g = changeVars( f, [X7,X5]) # now, g(X5=b,X7=a) = f(X0=a,X1=b)
"""
v = VarSet(vars)
newOrder = map(lambda x:vars.index(x), v)
if copy: ret = Factor(v, self.t.transpose(newOrder))
else: ret = Factor().__build(v, self.t.transpose(newOrder)) # try not to copy if possible
return ret
def __repr__(self):
"""Detailed representation: scope (varset) + table memory location"""
return 'Factor({:s},[0x{:x}])'.format(str(self.v),self.t.ctypes.data)
def __str__(self):
"""Basic string representation: scope (varset) only"""
return 'Factor({:s})'.format(str(self.v))
def latex(self, valueformat="0.4f", factorname="$f(x)$", varnames=None):
"""Return string containing latex code for table values.
Arguments:
valueformat : string formatter for values in value column; default "0.4f"
factorname : string for header of value column
varnames : dict mapping variable ID to string for that column (defaults to $x_i$ if None)
"""
tex = "\\begin{tabular}[t]{" + "".join(["c" for v in self.v]) + "|c}\n"
#tex += " & ".join(["$x"+str(int(v))+"$" for v in self.v]) + " & $f_{"+"".join([str(int(v)) for v in self.v])+"}$ \\\\ \\hline \n"
if varnames is None: varnames = {v:"$x_{"+str(int(v))+"}$" for v in self.v}
tex += " & ".join([varnames[v] for v in self.v]) + " & "+factorname+" \\\\ \\hline \n"
for s in range(self.numel()):
tex += " & ".join([str(si) for si in self.v.ind2sub(s)]) + " & " + ("{:"+valueformat+"}").format(self[s]) + "\\\\ \n"
tex += "\\end{tabular} \n"
return tex
@property
def vars(self):
"""Variables (scope) of the factor; read-only"""
return self.v
@vars.setter
def vars(self,value):
raise AttributeError("Read-only attribute")
@property
def table(self):
"""Table (values, as numpy array) of the factor"""
return self.t
@table.setter
def table(self,values):
try:
self.t[:] = values # try filling factor with "values"
except ValueError: # if it's an incompatible shape,
self.t = np.array(values,dtype=float).reshape(self.v.dims(),order=orderMethod) # try again using reshape
@property
def nvar(self):
"""Number of arguments (variables, scope size) for the factor"""
return len(self.v)
#@property
def dims(self):
"""Dimensions (table shape) of the tabular factor"""
return self.t.shape
#@property # TODO: make property?
def numel(self):
"""Number of elements (size) of the tabular factor"""
return self.t.size
################## METHODS ##########################################
def __getitem__(self,loc):
"""Accessor: F[x1,x2] = F[sub2ind(x1,x2)] = F(X1=x1,X2=x2)"""
if isinstance(loc, dict): return self.valueMap(loc)
if self.t.ndim == 1 or isinstance(loc, (tuple, list)):
return self.t[loc]
else:
try:
return self.t[self.v.ind2sub(loc)]
except ValueError:
raise IndexError("Index {} invalid for table with size {}".format(loc,self.t.shape))
def __setitem__(self,loc,val):
"""Assign values of the factor: F[i,j,k] = F[idx] = val if idx=sub2ind(i,j,k)"""
if isinstance(loc, dict): return self.setValueMap(loc,val)
if self.t.ndim == 1 or isinstance(loc, (tuple, list)):
self.t[loc] = val
else:
try:
self.t[self.v.ind2sub(loc)] = val
#self.t.flat[loc] = val # uses c-contiguous order...
except ValueError:
raise IndexError("Index {} invalid for table with size {}".format(loc,self.t.shape))
#value = __getitem__ # def f.value(loc): Alternate name for __getitem__
def value(self,x):
"""Type-safe version of __getitem__: returns scalar float entry of table at tuple x, or exception"""
if self.nvar == 0: return self.t[0]
return self.t.item(x)
def setValue(self,x,val):
"""Type-safe version of __setitem__: sets a scalar float entry of table at tuple x, or exception"""
self.t.itemset(x,val)
def valueMap(self,x):
"""Accessor: F[x[i],x[j]] where i,j = F.vars, i.e, x is a map from variables to their state values"""
if self.nvar == 0: return self.t[0] # if a scalar f'n, nothing to index
return self.t[tuple(x[v] for v in self.v)] # otherwise, find entry of table
def setValueMap(self,x,val):
"""Set F[x[i],x[j]] = val, where i,j = F.vars, i.e, x is a map from variables to their state values"""
self.t[tuple(x[v] for v in self.v) if len(self.v) else 0] = val # lookup location to set, or 0 if scalar f'n
def __float__(self):
"""Convert factor F to scalar float if possible; otherwise raises ValueError"""
if (self.nvar == 0): return self.t[0]
else: raise ValueError("Factor is not a scalar; scope {}".format(self.v))
# TODO missing comparator functions?
def isnan(self):
"""Check for NaN (not-a-number) entries in the factor's values; true if any NaN present"""
return self.isAny( (lambda x: np.isnan(x)) )
def isfinite(self):
"""Check for infinite (-inf, inf) or NaN values in the factor; false if any present"""
return not self.isAny( (lambda x: not np.isfinite(x)) )
def isAny(self,test):
"""Generic check for any entries satisfying lambda-expression "test" in the factor"""
for x in np.nditer(self.t, op_flags=['readonly']):
if test(x):
return True
return False
#### UNARY OPERATIONS ####
def __abs__(self):
"""Return the absolute value of F: G = F.abs() => G(x) = | F(x) | for all x"""
return Factor().__build( self.v.copy() , np.fabs(self.t) )
abs = __abs__
def __neg__(self):
"""Return the negative of F: G = -F => G(x) = -F(x) for all x"""
return Factor().__build( self.v.copy() , np.negative(self.t) )
def exp(self):
"""Return the exponential of F: G = F.exp() => G(x) = exp(F(x)) for all x"""
return Factor().__build( self.v.copy() , np.exp(self.t) )
def __pow__(self,power):
"""Return F raised to a power: G = F.power(p) => G(x) = ( F(x) )^p for all x"""
return Factor().__build( self.v.copy() , np.power(self.t,power) )
power = __pow__
def log(self): # just use base?
"""Return the natural log of F: G = F.log() => G(x) = log( F(x) ) for all x"""
with np.errstate(divide='ignore'):
return Factor().__build( self.v.copy() , np.log(self.t) )
def log2(self):
"""Return the log base 2 of F: G = F.log2() => G(x) = log2( F(x) ) for all x"""
with np.errstate(divide='ignore'):
return Factor().__build( self.v.copy() , np.log2(self.t) )
def log10(self):
"""Return the log base 10 of F: G = F.log10() => G(x) = log10( F(x) ) for all x"""
with np.errstate(divide='ignore'):
return Factor().__build( self.v.copy() , np.log10(self.t) )
#### IN-PLACE UNARY OPERATIONS ####
# always return "self" for chaining: f.negIP().expIP() = exp(-f(x)) in-place
def absIP(self):
"""Take the absolute value of F: F.absIP() => F(x) <- |F(x)| (in-place)"""
np.fabs(self.t, out=self.t)
return self
def expIP(self):
"""Take the exponential of F: F.expIP() => F(x) <- exp(F(x)) (in-place)"""
np.exp(self.t, out=self.t)
return self
def powerIP(self,power):
"""Raise F to a power: F.powerIP(p) => F(x) <- ( F(x) )^p (in-place)"""
np.power(self.t, power, out=self.t)
return self
__ipow__ = powerIP
def logIP(self): # just use base?
"""Take the natural log of F: F.logIP() => F(x) <- log( F(x) ) (in-place)"""
with np.errstate(divide='ignore'):
np.log(self.t, out=self.t)
return self
def log2IP(self):
"""Take the log base 2 of F: F.log2IP() => F(x) <- log2( F(x) ) (in-place)"""
with np.errstate(divide='ignore'):
np.log2(self.t, out=self.t)
return self
def log10IP(self):
"""Take the log base 10 of F: F.log10IP() => F(x) <- log10( F(x) ) (in-place)"""
with np.errstate(divide='ignore'):
np.log10(self.t, out=self.t)
return self
def negIP(self):
"""Take the negation of F: F.negIP() => F(x) <- (-F(x)) (in-place)"""
np.negative(self.t, out=self.t)
return self
#### BINARY OPERATIONS ####
# TODO: add boundary cases: 0/0 = ? inf - inf = ?
def __add__(self,that):
"""Addition of factors, e.g., G(x_1,x_2) = F1(x_1) + F2(x_2)"""
return self.__opExpand2(that,np.add)
def __radd__(self,that):
"""Right-addition, e.g. G(x) = 3.0 + F(x)"""
return self.__opExpand2(that,np.add)
def __iadd__(self,that):
"""In-place addition, F1 += F2. Most efficient if F2.vars <= F1.vars"""
return self.__opExpand2(that,np.add, out=self)
def __sub__(self,that):
"""Subtraction of factors, e.g., G(x_1,x_2) = F1(x_1) - F2(x_2)"""
return self.__opExpand2(that,np.subtract)
def __rsub__(self,that):
"""Right-subtraction, e.g. G(x) = 3.0 - F(x)"""
B = that if isinstance(that,Factor) else Factor([],that)
return B.__opExpand2(self, np.subtract)
def __isub__(self,that):
"""In-place subtraction, F1 -= F2. Most efficient if F2.vars <= F1.vars"""
return self.__opExpand2(that,np.subtract, out=self)
def __mul__(self,that):
"""Multiplication of factors, e.g., G(x_1,x_2) = F1(x_1) * F2(x_2)"""
return self.__opExpand2(that, np.multiply)
def __rmul__(self,that):
"""Right-multiplication, e.g. G(x) = 3.0 * F(x)"""
return self.__opExpand2(that, np.multiply)
def __imul__(self,that):
"""In-place multiplication, F1 *= F2. Most efficient if F2.vars <= F1.vars"""
return self.__opExpand2(that,np.multiply, out=self)
def __div__(self,that):
"""Division of factors, e.g., G(x_1,x_2) = F1(x_1) / F2(x_2)"""
with np.errstate(divide='ignore'):
return self.__opExpand2(that, np.divide)
__truediv__ = __div__
def __rdiv__(self,that):
"""Right-divide, e.g. G(x) = 3.0 / F(x)"""
B = that if isinstance(that,Factor) else Factor([],that)
with np.errstate(divide='ignore'):
return B.__opExpand2(self, np.divide)
__rtruediv__ = __rdiv__
def __idiv__(self,that):
"""In-place divide, F1 /= F2. Most efficient if F2.vars <= F1.vars"""
with np.errstate(divide='ignore'):
return self.__opExpand2(that,np.divide, out=self)
__itruediv__ = __idiv__
#### ELIMINATION OPERATIONS ####
def sum(self, elim=None, out=None):
"""Eliminate via sum on F, e.g., f(X_2) = \sum_{x_1} F(x_1,X_2) = F.sum([X1])"""
if (elim is None): elim = self.v
return self.__opReduce2(self.v & elim,np.sum, out=out)
def marginal(self, target, out=None):
"""Compute the marginal of F, e.g., f(X_2) = \sum_{x_1} F(x_1,X_2) = F.marginal([X2])"""
return self.__opReduce2(self.v - target,np.sum, out=out)
def sumPower(self, elim=None, power=1.0, out=None):
"""Eliminate via powered sum, e.g., f(X_2) = \\root^{1/p}{ sum_{x_1} F(x_1,X_2)^p } = F.sumPower([X1],p)"""
if (elim is None): elim = self.v
tmp = (self ** power).sum(elim)
tmp **= (1.0/power)
return tmp
def lse(self, elim=None, out=None):
"""Eliminate via log-sum-exp on F, e.g., f(X_2) = log \sum_{x_1} exp F(x_1,X_2) = F.lse([X1])"""
if (elim is None): elim = self.v
return self.__opReduce3(self.v & elim, np.logaddexp.reduce, out=out)
def lsePower(self, elim=None, power=1.0, out=None):
"""Eliminate via powered log-sum-exp, e.g., f(X_2) = 1/p log \sum_{x_1} exp F(x_1,X_2)*p = F.lsePower([X_1],p)"""
if (elim is None): elim = self.v
if power == inf: return self.max(elim)
elif power == -inf: return self.min(elim)
elif power == 1.0: return self.lse(elim)
else:
tmp = (self*power).lse(elim)
tmp *= (1.0/power)
return tmp
def max(self, elim=None, out=None):
"""Eliminate via max on F, e.g., f(X_2) = \max_{x_1} F(x_1,X_2) = F.max([X1])"""
if (elim is None): elim = self.v
return self.__opReduce2(self.v & elim,np.max, out=out)
def maxmarginal(self, target, out=None):
"""Compute the max-marginal of F, e.g., f(X_2) = \max_{x_1} F(x_1,X_2) = F.maxmarginal([X2])"""
return self.__opReduce2(self.v - target,np.max, out=out)
def min(self, elim=None, out=None):
"""Eliminate via min on F, e.g., f(X_2) = \min_{x_1} F(x_1,X_2) = F.min([X1])"""
if (elim is None): elim = self.v
return self.__opReduce2(self.v & elim,np.min, out=out)
def minmarginal(self, target, out=None):
"""Compute the min-marginal of F, e.g., f(X_2) = \min_{x_1} F(x_1,X_2) = F.minmarginal([X2])"""
return self.__opReduce2(self.v - target,np.min, out=out)
# use ufunc.reduceat? reduce etc seem not good?
# frompyfunc to make ufunc from python function?
# use "externalloop" flag?
#return t.max(axis=None,out=None) # use axis to specific dimensions to eliminate; out for IP version
#### TUPLE OPERATIONS ####
def argmax2(self, cvars=None, ctuple=None):
"""Find the argmax of the factor, with partial conditioning (as var list + value list) if desired.
Returns a maximizing configuration of f(X|Xc=xc) as a tuple of states
"""
if (cvars is None):
return self.v.ind2sub(self.t.argmax())
ax = tuple(ctuple[cvars.index(x)] if x in cvars else slice(None) for x in self.v)
return self.v.ind2sub(self.t[ax].argmax())
def argmax(self, evidence={}):
"""Find the argmax of the factor, with partial conditioning (as dict evidence[v]) if desired
Returns a maximizing configuration of f(X|Xc=xc) as a tuple of states
"""
if len(evidence)==0:
return self.v.ind2sub(self.t.argmax())
ax = tuple(evidence[v] if v in evidence else slice(None) for v in self.v)
return self.v.ind2sub( self.t[ax].argmax() )
def argmin2(self, cvars=None, ctuple=None):
"""Find the argmin of the factor, with partial conditioning if desired (list+list version)"""
if (cvars is None):
return self.v.ind2sub(self.t.argmin())
ax = tuple(ctuple[cvars.index(x)] if x in cvars else slice(None) for x in self.v)
return self.v.ind2sub(self.t[ax].argmin())
def argmin(self, evidence={}):
"""Find the argmin of the factor, with partial conditioning if desired (dict version)"""
if len(evidence)==0:
return self.v.ind2sub(self.t.argmax())
ax = tuple(evidence[v] if v in evidence else slice(None) for v in self.v)
return self.v.ind2sub( self.t[ax].argmax() )
def sample(self, Z=None):
"""Draw a random sample (as a tuple of states) from the factor; assumes positivity.
If option Z=<float> set, the function will assume normalization factor Z
"""
Z = Z if Z is not None else self.sum() # normalize if desired / by default
assert (Z > 0), 'Non-normalizable factor (perhaps log factor?)' # also check for positivity?
pSoFar = 0.0
pDraw = Z * np.random.random_sample()
it = np.nditer(self.t, op_flags=['readonly'], flags=['multi_index']) # for tuple return
#it = np.nditer(self.t, op_flags=['readonly'], flags=[orderMethod+'_index']) # for index return
while not it.finished:
pSoFar += it[0]
if ( pSoFar > pDraw ):
return it.multi_index # multi_index for tuple return
#return it.index # index for index return
it.iternext()
return self.v.ind2sub(self.numel()-1) # if numerical issue: return final state
def condition2(self, cvars=[],ctuple=[]):
"""Create a clamped (or "sliced") factor using partial conditioning (list+list version)
>>> F.condition2([0,2],[a,b]) # returns f(X_1,X_3) = F(X_0=a, X_1, X_2=b, X_3)
"""
ax = tuple(ctuple[cvars.index(x)] if x in cvars else slice(None) for x in self.v)
return Factor(self.v - cvars, self.t[ax]) # forces table copy in constructor
def condition(self, evidence):
"""Create a clamped (or "sliced") factor using partial conditioning (dict version)
>>> F.condition({0:a,2:b}) # returns f(X_1,X_3) = F(X_0=a, X_1, X_2=b, X_3)
"""
ax = tuple( evidence[v] if v in evidence else slice(None) for v in self.v )
cvars = [ v for j,v in enumerate(self.v) if ax[j] != slice(None) ]
return Factor(self.v - cvars, self.t[ax]) # forces table copy in constructor
slice2 = condition2 # alternate, libDAI-like names
slice = condition
# TODO: assign_slice( evidence, conditional_table )
# create ax = tuple( ... ); self.table(ax) = conditional_table
# TODO: assign_slice2( vars, vals, conditional_table )
def entropy(self):
"""Compute the entropy of the factor (normalizes, assumes positive)"""
Z = self.sum()
if not (Z > 0): raise ValueError('Non-normalizable factor (perhaps log factor?)')
tmp = np.ravel(self.t)
H = -np.dot( tmp, np.log(tmp.clip(min=1e-300)) )/Z + np.log(Z) # entropy of tmp/Z
return H
def norm(self, distance):
"""Compute any of several norm-like functions on F(x).
'distance' can be any of:
'L1' : L1 or manhattan distance, sum of absolute values
'L2' : L2 or Euclidean distance, sum of squares
'LInf' : L-Infinity distance, maximum value
'KL' : Shannon entropy (KL = Kullback Leibler)
'HPM' : Hilbert's projective metric
"""
distance = distance.lower()
if distance == 'l1': return self.abs().sum()
elif distance == 'l2': return (self*self).sum()
elif distance == 'linf': return self.abs().max()
elif distance == 'kl': return self.entropy()
elif distance == 'hpm': F = self.log(); return F.max() - F.min();
else: raise ValueError("Unrecognized norm type {}; 'L1','L2','LInf','KL','HPM'".format(distance));
def distance(self, that, distance):
"""Compute any of several norm-like functions on F(x).
'distance' can be any of:
'L1' : L1 or manhattan distance, sum of absolute values
'L2' : L2 or Euclidean distance, sum of squares
'LInf' : L-Infinity distance, maximum value
'KL' : Shannon entropy (KL = Kullback Leibler)
'HPM' : Hilbert's projective metric
"""
distance = distance.lower()
tmp = self.copy()
if distance == 'l1': tmp -= that; tmp.absIP(); return tmp.sum()
elif distance == 'l2': tmp -= that; tmp *= tmp; return tmp.sum()
elif distance == 'linf': tmp -= that; tmp.absIP(); return tmp.max()
elif distance == 'kl': Z=tmp.sum(); tmp/=that; tmp*=that.sum()/Z; tmp.logIP(); tmp*=self; return tmp.sum()/Z;
elif distance == 'hpm': tmp /= that; tmp.logIP(); return tmp.max() - tmp.min();
else: raise ValueError("Unrecognized norm type {}; 'L1','L2','LInf','KL','HPM'".format(distance));
#useful things:
# np.ndindex(shape) : iterate over tuples consistent with shape
# for index, x in np.ndenumerate(a): iterate over tuples, values
#def mean(factorList):
# return
#def geomean(factorList):
# return
############################ INTERNAL ##############################################
# slow version with arbitrary operator
def __opUnaryIP(self,op):
for x in np.nditer(self.t, op_flags=['readwrite']):
x[...] = op(x)
return self
def __opUnary(self,op):
return Factor( self.v.copy() , self.t.copy(order=orderMethod) ).__opUnaryIP(op)
#def __opAccumulate(self,r,op):
# for x in np.nditer(self.t, op_flags=['readonly']):
# r = op(r,x)
# return r
# TODO: at least use numpy "broadcast" / "external_loop" etc ; maybe define ufuncs or compile them?
#
def __opExpand1(self,that,op, out=None):
"""Internal combination function; brute force application of arbitrary function "op"; slow """
A = self
B = that if isinstance(that,Factor) else Factor([],that)
vall = A.v | B.v
axA = list(A.v.index(x) if x in A.v else -1 for x in vall)
axB = list(B.v.index(x) if x in B.v else -1 for x in vall)
if ( (not (out is None)) and (out.v == vall) ):
f = out
else:
f = Factor(vall) # TODO: should also change "out" if specified!
it = np.nditer([A.t, B.t, f.t],
op_axes = [ axA, axB, None ],
op_flags=[['readonly'], ['readonly'], ['writeonly']])
for (i,j,k) in it:
op(i,j,out=k)
return f
def __opExpand2(self,that,op, out=None):
"""Internal combination function; assumes "op" is a numpy build-in (using a ufunc)"""
if not isinstance(that,Factor): # if not a Factor, must be a scalar; use scalar version:
if out is None: return Factor(self.v, op(self.t,that)) # with constructor
else: op(self.t, that, out=out.t); return out # or direct write
# Non-scalar 2nd argument version:
A = self
B = that if isinstance(that,Factor) else Factor([],that)
vall = A.v | B.v
dA,dB = vall.expand_dims(A.v, B.v);
#dA = tuple(x.states if x in A.v else 1 for x in vall);
#dB = tuple(x.states if x in B.v else 1 for x in vall);
if ( (out is not None) and (out.v == vall) ): # if out can be written to directly, do so
op( A.t.reshape(dA,order='A') , B.t.reshape(dB,order='A'), out=out.t ) # TODO: order=A necessary?
else:
t = op( A.t.reshape(dA,order='A') , B.t.reshape(dB,order='A') ) # TODO: order=A necessary?
if (out is None): out = Factor()
if (len(vall)==0): t = np.asarray([t],dtype=float)
out.__build(vall,t)
return out
def __opReduce1(self,elim,op,init): # TODO: change to IP; caller initializes?
"""Internal reduce / eliminate function; brute force application of arbitrary f'n "op"; slow """
A = self.t
f = Factor( self.v - elim , init) # TODO: fill with ??? (0.0 for sum, -inf for lse, etc)
axA = list(range(len(self.v)))
axC = list(map(lambda x:f.v.index(x) if x in f.v else -1 ,self.v))
C = f.t
it = np.nditer([A, C], op_axes = [ axA, axC ], flags=['reduce_ok'], op_flags=[['readonly'], ['readwrite']])
for (i,j) in it:
op(i,j,out=j)
return f
def __opReduce2(self, elim, op, out=None): # assumes elim <= self.v
"""Internal reduce / eliminate function; assumes "op" is a numpy build-in (using a ufunc)"""
if ((elim is None) or (len(elim)==len(self.v))):
return op(self.t)
elif (out is None): # non-in-place version
ax = tuple(self.v.index(x) for x in elim)
out = Factor(self.v - elim, op(self.t, axis=ax))
else: # in-place version
assert (out.v == (self.v-elim) ), "Cannot eliminate into an existing factor with incorrect scope"
ax = tuple(self.v.index(x) for x in elim)
op(self.t, axis=ax, out=out.t)
return out
def __opReduce3(self, elim, op, out=None): # assumes elim <= self.v
"""Internal reduce / eliminate function; assumes "op" is a numpy build-in (using a ufunc)
works with numpy reduce ops that require single axes at a time
"""
if ((elim is None) or (len(elim)==len(self.v))):
return op(np.ravel(self.t))
else:
if (out is None):
out = Factor(self.v - elim)
else:
assert (out.v == (self.v-elim) ), "Cannot eliminate into an existing factor with incorrect scope"
ax = tuple(self.v.index(x) for x in elim)
src = self.t
while len(ax) > 1:
src = op(src,axis=ax[-1])
ax = ax[:-1]
op(src, axis=ax, out=out.t)
return out
""" NumPy reduce example:
>>> a = np.arange(24).reshape(2,3,4)
>>> b = np.array(0)
>>> for x, y in np.nditer([a, b], flags=['reduce_ok', 'external_loop'],
... op_flags=[['readonly'], ['readwrite']]):
... y[...] += x
...
Notes
xhat[ [v.label for v in f.var] ] = list(f.argmax())
"""
|
StarcoderdataPython
|
4999815
|
import logging
from spaceone.core.manager import BaseManager
from spaceone.monitoring.model.event_model import Event
_LOGGER = logging.getLogger(__name__)
class EventManager(BaseManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.event_model: Event = self.locator.get_model('Event')
def create_event(self, params):
def _rollback(event_vo):
_LOGGER.info(f'[create_event._rollback] '
f'Delete event : {event_vo.event_id}')
event_vo.delete()
event_vo: Event = self.event_model.create(params)
self.transaction.add_rollback(_rollback, event_vo)
return event_vo
def update_event(self, params):
event_vo: Event = self.get_event(params['event_id'], params['domain_id'])
return self.update_event_by_vo(params, event_vo)
def update_event_by_vo(self, params, event_vo):
def _rollback(old_data):
_LOGGER.info(f'[update_event_by_vo._rollback] Revert Data : '
f'{old_data["event_id"]}')
event_vo.update(old_data)
self.transaction.add_rollback(_rollback, event_vo.to_dict())
return event_vo.update(params)
def delete_event(self, event_id, domain_id):
event_vo: Event = self.get_event(event_id, domain_id)
event_vo.delete()
def get_event(self, event_id, domain_id, only=None):
return self.event_model.get(event_id=event_id, domain_id=domain_id, only=only)
def get_event_by_key(self, event_key):
event_vos = self.event_model.filter(event_key=event_key)
if event_vos.count() > 0:
return event_vos[0]
else:
return None
def list_events(self, query={}):
return self.event_model.query(**query)
def stat_events(self, query):
return self.event_model.stat(**query)
|
StarcoderdataPython
|
11353625
|
from .Error import *
from .tools import *
|
StarcoderdataPython
|
5175236
|
<filename>app/utils/mathLib.py
import numpy as np
import scipy.linalg as la
def constructor_matrix(M):
"""
Building matrix
"""
return np.matrix(M).transpose()
def minimum_squares(X, Y):
"""
That function shows least squares of the values
"""
media_X = np.mean(X)
media_Y = np.mean(Y)
erro_x = X-media_X
erro_y = Y-media_Y
soma_erro_xy = np.sum(erro_x*erro_y)
erro_x_quadratico = (X-media_X)**2.0
soma_erro_x_quadratico = np.sum(erro_x_quadratico)
m = soma_erro_xy / soma_erro_x_quadratico
c = media_Y - m*media_X
reta = m*X+c
return {
'media_X': media_X,
'media_Y': media_Y,
'erro_x': erro_x,
'erro_y': erro_y,
'soma_erro_xy': soma_erro_xy,
'erro_x_quadratico': erro_x_quadratico,
'soma_erro_x_quadratico': soma_erro_x_quadratico,
'm': m,
'c': c,
'reta': reta
}
def plu(A):
"""
This function shows PLU
(permutation matrices, lower triangular and upper triangular)
"""
(P, L, U) = la.lu(A)
return {
'P': P,
'L': L,
'U': U
}
def autovalores_autovetores(A):
"""
That function uses eigenvalues and eigenvectors
to build the espectral decomposition
"""
autovalores, autovetores = np.linalg.eig(A)
return {
'autovalores': autovalores,
'autovetores': autovetores
}
def espectral(autovetores, matrizDiagonal):
"""
Espectral Decomposition
"""
return np.matmul(np.matmul(autovetores,matrizDiagonal),np.linalg.inv(autovetores))
def pvd(A):
"""
That function return the singular values decomposition
"""
(U,s,V) = np.linalg.svd(A)
return {
'U': U,
's': s,
'V': V
}
def back_substitution(A, x, n):
"""
That function shows back substitution values of a matrix
"""
b = np.dot(A, x)
xcomp = np.zeros(n)
for i in range(n-1, -1, -1):
tmp = b[i]
for j in range(n-1, i, -1):
tmp -= xcomp[j]*A[i,j]
xcomp[i] = tmp/A[i,i]
return xcomp
|
StarcoderdataPython
|
396539
|
<gh_stars>0
from fintech_ibkr.synchronous_functions import *
|
StarcoderdataPython
|
300994
|
import os
import models
from flask_wtf import Form
from wtforms import StringField, PasswordField, TextAreaField, BooleanField, FileField
from wtforms.validators import ValidationError, DataRequired, regexp, Email, EqualTo, Length
from flask_bcrypt import check_password_hash
if 'HEROKU' in os.environ:
AUTH_PASS = os.environ['auth_pass']
else:
AUTH_PASS = '<PASSWORD>'
def username_exists(form, field):
print(form)
try:
models.User.get(models.User.username ** field.data)
except models.DoesNotExist:
pass
else:
raise ValidationError('User with that username already exists')
def email_exists(form, field):
print(form)
try:
models.User.get(models.User.email ** field.data)
except models.DoesNotExist:
pass
else:
raise ValidationError('User with that email already exists')
def auth_matches(form, field):
print(form)
if check_password_hash(AUTH_PASS, field.data):
pass
else:
raise ValidationError('Special password incorrect')
class SignUpForm(Form):
username = StringField(
'Username',
validators=[
DataRequired(),
username_exists,
regexp(r'^[a-z0-9]{3,10}$',
message='Username can only be lowercase letters & numbers, '
'and length can only be 3-10 characters long')
]
)
email = StringField(
'Email',
validators=[
DataRequired(),
email_exists,
Email()
]
)
first_name = StringField(
'First Name',
validators=[
DataRequired(),
regexp(r'[A-Z][a-z]+', message='Name can only be uppercase first letter and lowercase proceeding letters')
]
)
last_name = StringField(
'Last Name',
validators=[
DataRequired(),
regexp(r'[A-Z][a-z]+', message='Name can only be uppercase first letter and lowercase proceeding letters')
]
)
password = PasswordField(
'Password',
validators=[
DataRequired(),
EqualTo('password2', message='Passwords must match'),
]
)
password2 = PasswordField(
'Confirm Password',
validators=[DataRequired()]
)
auth = PasswordField(
'Special Password',
validators=[
DataRequired(),
auth_matches
]
)
class SignInForm(Form):
name_email = StringField('Username or Email', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember = BooleanField('Remember Me')
class PostForm(Form):
content = FileField()
|
StarcoderdataPython
|
9661229
|
#!/usr/bin/env python
""" This is basically scratchpad code for playing with the AVISA system. """
import argparse
import json
import pprint
import time
import uuid
import requests
AVISA = 'http://10.22.237.210:8080'
AVISA_STATUSES = {1: "New / Not Started",
2: "Started / In Progress",
3: "Completed",
-1: "Error",
0: "Stop test",
4: "No Tests"}
DEBUG = False
VERBOSE = False
QUIET = True
FAILSAFE_TIMEOUTS = {1: 15 * 60, # 15m - to start the test
2: 60 * 60} # 60m - to run the test
FAILURE_THRESHOLD = 0
PP = pprint.PrettyPrinter(indent=4, width=120)
#TODO: Need a logger. :)
class TestManager(object):
def __init__(self, playback_url, duration=120, device_type=None, device_id=None, deployment_id=None):
self.deployment_id = deployment_id
self.device_id = device_id
self.device_type = device_type
self.duration = duration
self.playback_url = playback_url
self.test_case_payload_file = 'android-demo.json'
self.test_id = None
self.test_results = None
self.test_status = None
if not device_type and not device_id:
raise Exception("TestManager requires either a device_type or device_id be specified.")
def __enter__(self):
self.reserve()
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
def _call_avisa(self, url, payload, method, debug=DEBUG):
r = None
status_code = None
response = None
if debug:
print("=== AVISA CALL START ===")
print("URL: {}".format(url))
print("PAYLOAD: \n{}".format(PP.pformat(payload)))
if method is 'post':
r = requests.post(url, json=payload)
elif method is 'put':
r = requests.put(url, json=payload)
elif method is 'get':
r = requests.get(url, json=payload)
elif method is 'delete':
r = requests.delete(url, json=payload)
status_code = r.status_code
if debug:
print("RESPONSE: {}".format(PP.pformat(r.content.decode())))
print("STATUS: {}".format(r.status_code))
if status_code != 200:
raise Exception("AVISA CALL FAILED!\nMESSAGE:{}\nSTATUS: {}".format(status_code, r.content.decode()))
else:
response = json.loads(r.content.decode())
if debug:
print("=== AVISA CALL END ===")
return status_code, response
def run(self):
with open(self.test_case_payload_file) as f:
test_payload = json.load(f)
test_payload["deployment_id"] = self.deployment_id
test_payload["tests"][0]["steps"][2]["duration"] = self.duration
test_payload["tests"][0]["steps"][2]["data"] = self.playback_url
test_url = '{}/api/tests/'.format(AVISA)
_, content = self._call_avisa(test_url, test_payload, 'post', debug=VERBOSE)
self.test_id = content['tests'][0]['test_id']
if not QUIET:
print("initiating test - test_id: {}".format(self.test_id))
def reserve(self):
if self.deployment_id is None:
self.deployment_id = str(uuid.uuid4())
if self.device_id:
reserve_url = "{}/api/reservations/device/".format(AVISA)
reserve_payload = {"deployment_id": self.deployment_id, "device_id": self.device_id}
if self.device_type:
raise Exception("Running tests by device type is not yet implemented.")
self._call_avisa(reserve_url, reserve_payload, 'post', debug=VERBOSE)
if not QUIET:
print("reservation - deployment_id: {} | device_id: {}".format(self.deployment_id, self.device_id))
def release(self):
release_url = "{}/api/reservations/{}".format(AVISA, self.deployment_id)
release_payload = {}
if not QUIET:
print("releasing device")
self._call_avisa(release_url, release_payload, 'delete', debug=DEBUG)
def status(self):
status_url = "{}/api/tests/status//{}".format(AVISA, self.test_id)
status_payload = {}
_, response = self._call_avisa(status_url, status_payload, 'get', debug=DEBUG)
self.test_status = response['status']
if not QUIET:
print("self.test_status: {} ({})".format(self.test_status, AVISA_STATUSES[self.test_status]))
return self.test_status
def summary_results(self):
results_url = "{}/api/results/{}".format(AVISA, self.test_id)
results_payload = {}
_, response = self._call_avisa(results_url, results_payload, 'get', debug=DEBUG)
self.test_results = response
if not QUIET:
print("self.test_results: {}".format(PP.pformat(self.test_results)))
return self.test_results
def detailed_results(self, rtype, count=None):
results_url = "{}/api/results/{}/{}".format(AVISA, rtype, self.test_id)
results_payload = {}
if count is not None:
results_url = "{}/api/results/{}/{}?count={}".format(AVISA, rtype, self.test_id, count)
results_payload = {'count': count}
_, response = self._call_avisa(results_url, results_payload, 'get', debug=DEBUG)
return response
def get_latest_results(self):
results = self.detailed_results('video', count=1)
results.update(self.detailed_results('audio', count=1))
return results
def set_log_level(debug=DEBUG, verbose=VERBOSE, quiet=QUIET):
global DEBUG
global VERBOSE
global QUIET
if debug:
verbose = True
quiet = False
elif verbose:
debug = False
quiet = False
elif quiet:
debug = False
verbose = False
DEBUG = debug
VERBOSE = verbose
QUIET = quiet
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run a basic playback test with AVISA.')
parser.add_argument('--url', type=str, default='http://10.22.244.94/BBC_WORLD_HD_TVE.m3u8', action='store',
help='The .m3u8 stream url from which to test playback.')
parser.add_argument('--device_id', type=int, default=None, action='store',
help='The specific device_id of the device with which you want to test playback.')
parser.add_argument('--duration', type=int, default=120, action='store',
help='The number of seconds to run the playback test.')
parser.add_argument('--failure_threshold', type=int, default=FAILURE_THRESHOLD, action='store',
help='The number of failures to tolerate before declaring failure and ending the playback.')
parser.add_argument('--device_type', type=str, default=None, action='store',
help='The type of device with which you want to test playback. (Not Implemented)')
parser.add_argument('--deployment_id', type=str, default=None, action='store',
help='The name of this test, as it will be registered in AVISA.')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--quiet', action='store_true')
args = parser.parse_args()
set_log_level(args.debug, args.verbose, args.quiet)
tm = TestManager(playback_url=args.url,
deployment_id=args.deployment_id,
duration=args.duration,
device_type=args.device_type,
device_id=args.device_id)
failures = {'audio': {}, 'video': {}}
with tm:
tm.run()
total_failures = 0
while tm.status() in [1, 2]:
if tm.test_status is 2:
latest_results = tm.get_latest_results()
print(PP.pformat(latest_results))
if 'audio' in latest_results.keys() and latest_results['audio']:
if latest_results['audio'][0]['audio_loudness'] == 0:
print("audio freeze failure detected")
failures['audio'].update({time.time(): "Audio Loudness: 0 - Frozen"})
if 'video' in latest_results.keys() and latest_results['video']:
if latest_results['video'][0]['video_motion'] == 0:
print("video freeze failure detected")
failures['video'].update({time.time(): "Video Motion: 0 - Frozen"})
total_failures = len(failures['audio'].keys()) + len(failures['video'].keys())
if total_failures > args.failure_threshold:
print('Exiting - Too many failures encountered.')
print("failures:\n{}".format(PP.pformat(failures)))
print("total failures: {}".format(total_failures))
exit(1)
time.sleep(5)
print("results: {}".format(PP.pformat(tm.summary_results())))
print("failures:\n{}".format(PP.pformat(failures)))
print("total failures: {}".format(total_failures))
exit(0)
|
StarcoderdataPython
|
12822199
|
<reponame>alisiahkoohi/survae_flows
from .mlp import *
from .autoregressive import *
from .matching import *
|
StarcoderdataPython
|
3297796
|
<filename>synoptic/accessors.py
## <NAME>
## April 27, 2021
"""
=====================================
Custom Pandas Accessor for SynopticPy
=====================================
So, I recently learned a cool trick--using Pandas custom accessors
to extend Pandas DataFrames with custom methods. Look more about them
here:
https://pandas.pydata.org/pandas-docs/stable/development/extending.html
This is work in progress, but will be useful for making quick map and
timeseries plots of data returned from the API.
"""
import warnings
import pandas as pd
import matplotlib.pyplot as plt
try:
from toolbox.cartopy_tools import common_features, pc
from paint.standard2 import cm_tmp
except:
warnings.warn("map making not available with cartopy_tools")
@pd.api.extensions.register_dataframe_accessor("synoptic")
class SynopticAccessor:
def __init__(self, pandas_obj):
self._validate(pandas_obj)
self._obj = pandas_obj
@staticmethod
def _validate(obj):
if obj.attrs["service"] in ["stations_latest", "stations_nearesttime"]:
# verify there is a latitude and a longitude index.
if "latitude" not in obj.index or "longitude" not in obj.index:
raise AttributeError("Must have 'latitude' and 'longitude'.")
else:
# verify there is a column latitude and a column longitude
if "latitude" not in obj.columns or "longitude" not in obj.columns:
raise AttributeError("Must have 'latitude' and 'longitude'.")
@property
def center(self):
# return the geographic center point of this DataFrame
lat = self._obj.latitude
lon = self._obj.longitude
return (float(lon.mean()), float(lat.mean()))
def get_stn_column(self):
"""Get df as just STATIONS columns"""
return self._obj[a._obj.attrs["STATIONS"]]
def get_dt_column(self):
"""Get df as just DATETIME columns"""
return self._obj[a._obj.attrs["DATETIMES"]]
def plot_map(
self,
ax=None,
color_by=None,
show_label="STID",
cbar_kw={},
common_features_kw={},
**kw,
):
"""
Parameters
----------
show_label : {None, 'STID', 'NAME', 'ELEVATION', etc.}
What value to show for the label.
"""
# plot this array's data on a map, e.g., using Cartopy
df = self._obj
if ax is None:
ax = common_features(**common_features_kw)
stations = df.attrs["STATIONS"]
kw.setdefault("transform", pc)
kw.setdefault("edgecolor", "k")
kw.setdefault("linewidth", 0.5)
cbar_kw.setdefault("fraction", 0.046)
cbar_kw.setdefault("pad", 0.01)
if color_by is not None:
kw["c"] = df[stations].loc[color_by]
else:
kw["c"] = "tab:blue"
if color_by == "air_temp":
kw = {**cm_tmp().cmap_kwargs, **kw}
cbar_kw = {**cm_tmp().cbar_kwargs, **cbar_kw}
for stid, info in df[stations].iteritems():
if color_by is not None:
kw["c"] = info[color_by]
art = ax.scatter(info.longitude, info.latitude, **kw)
if show_label or show_label is not None:
ax.text(
info.longitude,
info.latitude,
f" {info[show_label]}",
va="center",
ha="left",
transform=pc,
fontfamily="monospace",
clip_on=True,
)
if color_by is not None:
plt.colorbar(art, ax=ax, **cbar_kw)
ax.adjust_extent()
return ax
|
StarcoderdataPython
|
23538
|
<filename>tests/mock_dbcli_config.py
mock_dbcli_config = {
'exports_from': {
'lpass': {
'pull_lastpass_from': "{{ lastpass_entry }}",
},
'lpass_user_and_pass_only': {
'pull_lastpass_username_password_from': "{{ lastpass_entry }}",
},
'my-json-script': {
'json_script': [
'some-custom-json-script'
]
},
'invalid-method': {
},
},
'dbs': {
'baz': {
'exports_from': 'my-json-script',
},
'bing': {
'exports_from': 'invalid-method',
},
'bazzle': {
'exports_from': 'lpass',
'lastpass_entry': 'lpass entry name'
},
'bazzle-bing': {
'exports_from': 'lpass',
'lastpass_entry': 'different lpass entry name'
},
'frazzle': {
'exports_from': 'lpass',
'lastpass_entry': 'lpass entry name'
},
'frink': {
'exports_from': 'lpass_user_and_pass_only',
'lastpass_entry': 'lpass entry name',
'jinja_context_name': 'standard',
'exports': {
'some_additional': 'export',
'a_numbered_export': 123
},
},
'gaggle': {
'jinja_context_name': [
'env',
'base64',
],
'exports': {
'type': 'bigquery',
'protocol': 'bigquery',
'bq_account': 'bq_itest',
'bq_service_account_json':
"{{ env('ITEST_BIGQUERY_SERVICE_ACCOUNT_JSON_BASE64') | b64decode }}",
'bq_default_project_id': 'bluelabs-tools-dev',
'bq_default_dataset_id': 'bq_itest',
},
},
},
'orgs': {
'myorg': {
'full_name': 'MyOrg',
},
},
}
|
StarcoderdataPython
|
1844418
|
<filename>sciibo/bot/ai.py
from __future__ import division
import collections
import random
import time
from sciibo.core.helpers import nextcard, fitson
class CalculationTimeout(Exception):
pass
def enumerate_unique(cards):
"""
Enumerate but eliminate duplicates.
"""
seen = set()
for n, card in enumerate(cards):
if card in seen:
continue
seen.add(card)
yield n, card
# for card in set(cards):
# yield cards.index(card), card
def top_cards(piles):
"""
Returns the pile number and top card of each non-empty pile.
Example:
> top_cards([[1,2,3], [], [4,5,6], [7,8,9]])
[(0, 3), (2, 6), (3, 9)]
"""
for n, pile in enumerate(piles):
if not pile:
continue
yield n, pile[-1]
def pull(cards, card):
"""
Pulls the a card from a hand and returns the new hand.
Example:
> pull([4,5,6,7,8], 6)
[4,5,7,8]
"""
rest = cards[:]
rest.remove(card)
return rest
def pull_top(piles, pos):
"""
Pulls the top card of pile number `pos`,
and returns the new set of piles.
Example:
> pull_top([[1,2,3], [], [4,5,6], [7,8,9]], 2)
[[1,2,3], [], [4,5], [7,8,9]]
"""
rest = piles[:]
rest[pos] = piles[pos][:-1]
return rest
def place(cards, pos, card):
"""
Replaces the card at a given position in a list.
Example:
> place([1,4,7,10,12], 2, 9)
[1,4,9,10,12]
"""
result = cards[:]
result[pos] = card
return result
def best_moves(result):
"""
For each list of moves, count the number of hand cards and the
number of SB cards that are played, and return the set of
moves with minimum SB cards and maximum hand cards.
"""
result = [
(
sum(1 for value, source, target in moves if source == 'hand'),
sum(1 for value, source, target in moves if value == 'SB'),
moves
) for moves in result
]
# Get the least number of SB cards for moves that play the maximum number of hand cards
minsb = min(sb for hands, sb, moves in result)
# Get the maximum number of hand cards that can be played
maxhands = max(hands for hands, sb, moves in result if sb == minsb)
# Return first set of moves with maximum hand cards and minimum SB cards
for hands, sb, moves in result:
if hands == maxhands and sb == minsb:
return moves
def stock_moves(stock, discards, hand, builds, timeout=None):
"""
Returns the shortest list of cards to move
to get rid of the stock card.
Note: Five SB cards will be considered 'better'
than six non-SB cards, because it uses fewer cards.
"""
if not stock:
return
# Stock card is SB, can be placed on any pile
if stock == 'SB':
# Calculate number of possible subsequent moves per build pile.
result = []
builds_unique = list(enumerate_unique(builds))
for pos, card in builds_unique:
new_builds = place(builds, pos, nextcard(card))
limit = timeout / len(builds_unique) if timeout else None
try:
moves = most_moves(discards, hand, new_builds, timeout=limit)
result.append((pos, len(moves) if moves else 0))
except CalculationTimeout:
pass
# Get build pile number that gives us the most
# subsequent moves (might be zero)
maxmoves = max(moves for pos, moves in result)
result = [pos for pos, moves in result if moves == maxmoves]
# Choose random build pile when multiple equal most subsequent moves
pos = random.choice(result)
return [(stock, 'stock', 'build:%d' % pos)]
# Keep time to enforce calculation time limit
start_time = time.time()
# Keep moves of same length to calculate best move later
result = []
# Start with no moves
queue = collections.deque()
queue.append((stock, discards, hand, builds, []))
# Let queue empty out before moving on to results with one card more
# Prevent duplicate moves because of SB cards
queueable = collections.deque()
while queue:
# Enforce calculation time limit
if timeout and time.time() - start_time > timeout:
# Return with no results
raise CalculationTimeout
stock, discards, hand, builds, moves = queue.popleft()
unique_builds = list(enumerate_unique(builds))
# Build pile numbers the stock card can be placed upon
finalmoves = [pos for pos, card in unique_builds if fitson(card, stock)]
# Stock card can be played, store results and wait for queue to empty
if finalmoves:
for pos in finalmoves:
result.append(moves + [(stock, 'stock', 'build:%d' % pos)])
# Don't look for result with more cards if stock can be played
else:
# Hand cards take precedence over discard cards
for hand_card in set(hand):
for pos, card in unique_builds:
if fitson(card, hand_card):
new_hand = pull(hand, hand_card)
new_builds = place(builds, pos, nextcard(card))
new_moves = moves + [(hand_card, 'hand', 'build:%d' % pos)]
queueable.append((stock, discards, new_hand, new_builds, new_moves))
for discard_pos, discard_card in top_cards(discards):
for pos, card in unique_builds:
if fitson(card, discard_card):
new_discards = pull_top(discards, discard_pos)
new_builds = place(builds, pos, nextcard(card))
new_moves = moves + [(discard_card, 'discard:%d' % discard_pos, 'build:%d' % pos)]
queueable.append((stock, new_discards, hand, new_builds, new_moves))
# Queue has been emptied
if not queue:
# There are results (of equal length)
if result:
# Select result with most hand cards and least SB cards
return best_moves(result)
# No results, continue with next queue (one extra card played)
queue = queueable
queueable = collections.deque()
def most_moves(discards, hand, builds, timeout=None):
"""
Returns the list of cards to move
to get rid of as many cards as possible.
Note: Six SB cards will be considered 'better'
than five non-SB cards, because it plays more cards.
"""
# Keep time to enforce calculation time limit
start_time = time.time()
# Start with no moves
queue = collections.deque()
queue.append((discards, hand, builds, [], [None, None, None, None]))
# Keep moves of same length to calculate best move later
result = []
length = 0
while queue:
# Enforce calculation time limit
if timeout and time.time() - start_time > timeout:
# Return with no results
raise CalculationTimeout
discards, hand, builds, moves, top = queue.popleft()
unique_builds = list(enumerate_unique(builds))
# Store result if SB card was not placed on top of build pile [1]
if moves and 'SB' not in top:
# More moves than previously found, discard other results
if len(moves) > length:
result = []
length = len(moves)
result.append(moves)
# Hand cards take precedence over discard cards
for hand_card in set(hand):
for pos, card in unique_builds:
if fitson(card, hand_card):
new_hand = pull(hand, hand_card)
new_builds = place(builds, pos, nextcard(card))
new_moves = moves + [(hand_card, 'hand', 'build:%d' % pos)]
# Last hand card is an SB, allow it to pass through check 1 above
# Does not work is last *two* hand cards are SB
if not new_hand and hand_card == 'SB' and top[pos] != 'SB':
new_top = place(top, pos, nextcard(card))
else:
new_top = place(top, pos, hand_card)
queue.append((discards, new_hand, new_builds, new_moves, new_top))
for discard_pos, discard_card in top_cards(discards):
for pos, card in unique_builds:
if fitson(card, discard_card):
new_discards = pull_top(discards, discard_pos)
new_builds = place(builds, pos, nextcard(card))
new_moves = moves + [(discard_card, 'discard:%d' % discard_pos, 'build:%d' % pos)]
new_top = place(top, pos, discard_card)
queue.append((new_discards, hand, new_builds, new_moves, new_top))
# Select result with most hand cards and least SB cards
if result:
return best_moves(result)
def lucky_move(stock, discards, hand, builds):
"""
Returns if any non-SB card can be played to a build pile.
"""
unique_builds = list(enumerate_unique(builds))
# Stock card
for pos, card in unique_builds:
if fitson(card, stock):
return [(stock, 'stock', 'build:%d' % pos)]
# Non-SB hand cards
for hand_card in set(hand):
if hand_card == 'SB':
continue
for pos, card in unique_builds:
if fitson(card, hand_card):
return [(hand_card, 'hand', 'build:%d' % pos)]
# Non-SB discards
for discard_pos, discard_card in top_cards(discards):
if discard_card == 'SB':
continue
for pos, card in unique_builds:
if fitson(card, discard_card):
return [(discard_card, 'discard:%d' % discard_pos, 'build:%d' % pos)]
# SB hand cards
for hand_card in set(hand):
if hand_card != 'SB':
continue
for pos, card in unique_builds:
if fitson(card, hand_card):
return [(hand_card, 'hand', 'build:%d' % pos)]
# SB discards
for discard_pos, discard_card in top_cards(discards):
if discard_card != 'SB':
continue
for pos, card in unique_builds:
if fitson(card, discard_card):
return [(discard_card, 'discard:%d' % discard_pos, 'build:%d' % pos)]
def any_move(stock, discards, hand, builds):
"""
Returns if any card can be played to a build pile.
"""
for pile in builds:
if fitson(pile, stock):
return True
if any(fitson(pile, card) for card in hand):
return True
if any(fitson(pile, card) for pos, card in top_cards(discards)):
return True
return False
def discard_move(discards, hand):
"""
Determines which card to discard to which pile.
"""
# Same card already in discards
for discard_pos, discard_card in top_cards(discards):
for card in set(hand):
if card == discard_card:
return [(card, 'hand', 'discard:%d' % discard_pos)]
# Look for empty discard pile
for discard_pos, discard_pile in enumerate(discards):
if not discard_pile:
# Choose random non-SB card
normal_cards = [card for card in hand if card != 'SB']
if normal_cards:
card = random.choice(normal_cards)
return [(card, 'hand', 'discard:%d' % discard_pos)]
# Look for next card to 'count down'
for discard_pos, discard_card in top_cards(discards):
# Don't count down SB cards
if discard_card == 'SB':
continue
for card in set(hand):
if card == 'SB':
continue
if card + 1 == discard_card:
return [(card, 'hand', 'discard:%d' % discard_pos)]
# Choose random hand card and random discard pile
card = random.choice(hand)
discard_pos = random.randrange(4)
return [(card, 'hand', 'discard:%d' % discard_pos)]
def calculate_move(stock, discards, hand, builds, timeout=None):
"""
Calculates the next moves to make.
May take up to a fixed number of seconds, otherwise
player waits too long.
"""
if timeout:
start_time = time.time()
# Find moves that get rid of the stock card
try:
moves = stock_moves(stock, discards, hand, builds, timeout=timeout)
if moves:
return moves
except CalculationTimeout:
# There might be subsequent moves we didn't have time to calculate.
# Perform any move possible or discard
return lucky_move(stock, discards, hand, builds) or discard_move(discards, hand)
# Find moves that play the most number of cards
remaining = start_time + timeout - time.time()
try:
moves = most_moves(discards, hand, builds, timeout=remaining)
if moves:
return moves
except CalculationTimeout:
# There might be subsequent moves we didn't have time to calculate.
# Perform any move possible or discard
return lucky_move(stock, discards, hand, builds) or discard_move(discards, hand)
# Don't perform lucky_move as it will play SB cards that most_moves deemed bad to play
return discard_move(discards, hand)
return (
stock_moves(stock, discards, hand, builds) or
most_moves(discards, hand, builds) or
discard_move(discards, hand)
)
|
StarcoderdataPython
|
1842552
|
from termcolor import cprint, colored
from random import randint
INDENT = ' ' * 2
def pad(str, length, padder=' '):
while len(str) < length:
str = padder + str
return str
def print_rank(list, color, head=''):
if head != '':
cprint(head, color, attrs=['bold'])
for i, item in enumerate(list):
cprint(f'{INDENT}{pad(str(i + 1), len(str(len(list))))}:{INDENT}{item}', color)
def insert_elems(start, list, index):
list = list[::-1]
for item in list:
start.insert(index, item)
return start
def is_better(a, b):
reversed = True if randint(0, 1) == 1 else False
while True:
cprint('Which is better? ', 'cyan', end='')
if not reversed:
cprint(str(a), 'yellow', end='')
cprint(' or ', 'cyan', end='')
cprint(str(b), 'yellow', end='')
else:
cprint(str(b), 'yellow', end='')
cprint(' or ', 'cyan', end='')
cprint(str(a), 'yellow', end='')
cprint(': ', 'cyan', end='')
user_input = input()
if user_input == str(a):
return True
elif user_input == str(b):
return False
else:
cprint('Invalid choice. Choose again.', 'red')
def rank(elems):
if len(elems) <= 1:
return elems
elif len(elems) == 2:
if is_better(elems[0], elems[1]):
return elems
else:
return elems[::-1]
middle = elems[0]
less = []
more = []
for item in elems[1:]:
if is_better(middle, item):
less.append(item)
else:
more.append(item)
less = rank(less)
more = rank(more)
return more + [middle] + less
cprint('Item ranker by comparison\n', 'yellow', attrs=['bold'])
start = int(input(colored('Enter the first integer of the list: ', 'yellow')))
end = int(input(colored('Enter the last integer of the list: ', 'yellow')))
print()
list = [n for n in range(start, end + 1)]
print_rank(rank(list), 'green', head='\nRanking Results:')
|
StarcoderdataPython
|
11214779
|
import bpy
import pyblish.api
from pype.api import get_errored_instances_from_context
class SelectInvalidAction(pyblish.api.Action):
"""Select invalid objects in Blender when a publish plug-in failed."""
label = "Select Invalid"
on = "failed"
icon = "search"
def process(self, context, plugin):
errored_instances = get_errored_instances_from_context(context)
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
# Get the invalid nodes for the plug-ins
self.log.info("Finding invalid nodes...")
invalid = list()
for instance in instances:
invalid_nodes = plugin.get_invalid(instance)
if invalid_nodes:
if isinstance(invalid_nodes, (list, tuple)):
invalid.extend(invalid_nodes)
else:
self.log.warning(
"Failed plug-in doens't have any selectable objects."
)
bpy.ops.object.select_all(action='DESELECT')
# Make sure every node is only processed once
invalid = list(set(invalid))
if not invalid:
self.log.info("No invalid nodes found.")
return
invalid_names = [obj.name for obj in invalid]
self.log.info(
"Selecting invalid objects: %s", ", ".join(invalid_names)
)
# Select the objects and also make the last one the active object.
for obj in invalid:
obj.select_set(True)
bpy.context.view_layer.objects.active = invalid[-1]
|
StarcoderdataPython
|
4950266
|
<filename>examples/simple/discover_devices.py
from pupil_labs.realtime_api.simple import discover_devices, discover_one_device
# Look for devices. Returns as soon as it has found the first device.
print("Looking for the next best device...\n\t", end="")
print(discover_one_device(max_search_duration_seconds=10.0))
# List all devices that could be found within 10 seconds
print("Starting 10 second search...\n\t", end="")
print(discover_devices(search_duration_seconds=10.0))
|
StarcoderdataPython
|
3373933
|
"""
Test CBlocks functionality.
"""
# pylint: disable=missing-docstring, no-self-use, protected-access
# pylint: disable=invalid-name, redefined-outer-name, unused-argument, unused-variable
# pylint: disable=wildcard-import, unused-wildcard-import
import pytest
import edzed
from .utils import *
def test_connect_only_once(circuit):
"""Connect my be called only once."""
blk = Noop('test')
blk.connect(True)
with pytest.raises(edzed.EdzedInvalidState):
blk.connect(True)
def test_unnamed_inputs(circuit):
"""
Unnamed inputs are processed in order as they are connected.
Const blocks are created on the fly.
"""
d4 = edzed.FuncBlock(
'4 digits', func=lambda a, b, c, d: 1000*a + 100*b + 10*c + d
).connect(1, 9, 8, edzed.Const(4))
init(circuit)
d4.eval_block()
assert d4.output == 1984
def test_named_inputs(circuit):
"""
Named inputs are ordered by name.
This test also covers that no unnamed inputs is a special case in FuncBlock.
"""
d4 = edzed.FuncBlock(
'4 digits', func=lambda a, b, c, d: 1000*a + 100*b + 10*c + d
).connect(d=9, c=edzed.Const(1), a=2, b=edzed.Const(0))
init(circuit)
d4.eval_block()
assert d4.output == 2019
def test_input_groups(circuit):
"""Input group is a sequence of input values."""
d4 = edzed.FuncBlock(
'4 digits', func=lambda f4: 1000*f4[0] + 100*f4[1] + 10*f4[2] + f4[3]
).connect(f4=[edzed.Const(3), 5, 7, 9])
init(circuit)
d4.eval_block()
assert d4.output == 3579
def test_missing_funcblock_inputs(circuit):
"""Incorrect number of inputs are an error."""
errblks = [
edzed.FuncBlock(
'test1', comment='2 expected, 3 given', func=lambda a1, a2: None
).connect(1, 2, 3),
edzed.FuncBlock(
'test2', comment='2 expected, 1 given', func=lambda a1, a2: None
).connect(1),
edzed.FuncBlock(
'test3', comment='a1 name missing', func=lambda a1, a2: None
).connect(a2=2),
edzed.FuncBlock(
'test4', comment='a3 name unexpected', func=lambda a1, a2: None
).connect(a1=1, a2=2, a3=3),
]
circuit.finalize()
for blk in errblks:
with pytest.raises(TypeError, match="does not match the connected inputs"):
blk.start()
def test_input_by_name(circuit):
"""
Input block may be specified by name.
This implies that string constants require explicit Const('...')
"""
Noop('blk_X').connect('blk_Y', edzed.Const('blk_Z')) # blk_Z is just a string
Noop('blk_Y').connect(a=('blk_X', None))
init(circuit)
def test_no_unknown_inputs(circuit):
"""Test unknown block error. """
Noop('blk_X').connect('blk_Y', edzed.Const('blk_Z'))
Noop('blk_Y').connect(a=('blk_X', 'blk_Z')) # there is no blk_Z
with pytest.raises(Exception, match="not found"):
init(circuit)
def test_no_cross_circuit_inputs(circuit):
"""No cross circuit connections. Constants are an exception."""
noop1 = Noop('noop')
msg1 = edzed.Const("hello!")
init(circuit)
edzed.reset_circuit()
noop2 = Noop('noop').connect(noop1)
circuit2 = edzed.get_circuit()
with pytest.raises(ValueError, match="not in the current circuit"):
init(circuit2)
# Const objects do not belong to specific circuit
edzed.reset_circuit()
noop3 = Noop('noop').connect(msg1)
circuit3 = edzed.get_circuit()
init(circuit3)
def test_input_access(circuit):
"""Test various methods to access input values."""
class MyBlock(edzed.CBlock):
def calc_output(self):
return [
self._in['_'], # tuple of all unnanmed inputs
self._in['ctrl'], # input name as a key
self._in.ctrl, # input name as an attr
]
blk = MyBlock('test').connect(50, 51, 52, 53, 54, ctrl=edzed.Const('CTRL'))
init(circuit)
blk.eval_block()
assert blk.output == [
(50, 51, 52, 53, 54),
'CTRL',
'CTRL',
]
def test_connection_attrs(circuit):
"""Check the iconnections and oconnections."""
inp0 = Noop('inp0', comment="not connected to main").connect(-1)
inp1 = Noop('inp1').connect('inp0', loopback='inp1')
inp2 = Noop('inp2')
inp3 = Noop('inp3').connect(x=(inp1, 'inp0'))
inp4 = Noop('inp4', comment="not connected to anything").connect(x=inp3)
main = Noop('main').connect(True, inp1, a=False, b=inp2, c=(10, 20, 'inp3'))
init(circuit)
assert not inp0.iconnections # Const excl.
assert inp1.iconnections == {inp0, inp1}
assert not inp2.iconnections
assert inp3.iconnections == {inp0, inp1}
assert inp4.iconnections == {inp3}
assert main.iconnections == {inp1, inp2, inp3}
assert inp0.oconnections == {inp1, inp3}
assert inp1.oconnections == {inp1, inp3, main}
assert inp2.oconnections == {main}
assert inp3.oconnections == {inp4, main}
assert not inp4.oconnections
assert not main.oconnections
def test_signature_exc(circuit):
"""Test exception getting a signature of an unconnected block."""
blk = Noop('noname')
with pytest.raises(edzed.EdzedInvalidState):
blk.input_signature()
blk.connect(foo=(0,0,0))
assert blk.input_signature() == {'foo': 3}
def test_signature_and_get_conf(circuit):
"""Test input signature related functions."""
blk1 = Noop('test1', comment=' without unnamed inputs').connect(
inp2=20, inp3=30, inp1=10)
blk2 = Noop('test2', comment='with unnamed inputs', ).connect(
100, 101, 102, 103, # unnamed (group '_')
inpA=edzed.Const('A2'), # named single input
inpB=[edzed.Const('B2')], # named sequence
inpC=range(5), # named iterator
)
assert 'inputs' not in blk1.get_conf() # no data before finalization
init(circuit)
assert blk1.input_signature() == ({'inp1': None, 'inp2': None, 'inp3': None,})
conf1 = blk1.get_conf()
conf1ok = {
# future versions may add additional keys to get_conf()
'class': 'Noop',
'debug': False,
'comment': ' without unnamed inputs',
'inputs': {
'inp1': "<Const 10>",
'inp2': "<Const 20>",
'inp3': "<Const 30>",
},
'name': 'test1',
'type': 'combinational',
}
assert all(conf1[key] == value for key, value in conf1ok.items())
assert blk2.input_signature() == ({'inpA': None, '_': 4, 'inpB': 1, 'inpC': 5})
assert blk2.get_conf()['inputs'] == {
'_': ('<Const 100>', '<Const 101>', '<Const 102>', '<Const 103>'),
'inpA': "<Const 'A2'>",
'inpB': ("<Const 'B2'>",), # a 1-tuple
'inpC': ('<Const 0>', '<Const 1>', '<Const 2>', '<Const 3>', '<Const 4>'),
}
blk2.check_signature({'inpA': None, '_': 4, 'inpB': 1, 'inpC': 5})
blk2.check_signature({'inpA': None, '_': [4, 5], 'inpB': [None, None], 'inpC': [0, 5]})
with pytest.raises(ValueError, match="missing: 'extra'"):
blk2.check_signature({'inpA': None, 'extra': None, '_': 4, 'inpB': 1, 'inpC': 5})
with pytest.raises(ValueError, match="unexpected: 'inpA'"):
blk2.check_signature({'_': 4, 'inpB': 1, 'inpC': 5})
with pytest.raises(ValueError, match="count is 5, expected was 10"):
blk2.check_signature({'inpA': None, '_': 4, 'inpB': 1, 'inpC': 10})
with pytest.raises(ValueError, match="did you mean 'inp_A'"):
blk2.check_signature({'inp_A': None, '_': 4, 'inpB': 1, 'inpC': 5})
with pytest.raises(ValueError, match="count is 1, minimum is 2"):
blk2.check_signature({'inpA': None, '_': 4, 'inpB': [2, None], 'inpC': 5})
with pytest.raises(ValueError, match="count is 4, maximum is 3"):
blk2.check_signature({'inpA': None, '_': (2, 3), 'inpB': [0, 1], 'inpC': 5})
with pytest.raises(ValueError, match="invalid"):
blk2.check_signature({'inpA': None, '_': 4.5, 'inpB': 1, 'inpC': 5})
with pytest.raises(ValueError, match="invalid"):
blk2.check_signature({'inpA': None, '_': [4], 'inpB': 1, 'inpC': 5})
with pytest.raises(ValueError, match="invalid"):
blk2.check_signature({'inpA': None, '_': [0, 1, 2, 3], 'inpB': 1, 'inpC': 5})
def test_override(circuit):
"""Test the override block."""
SENTINEL = 999
inp = edzed.Input('inp', initdef=None)
override = edzed.Input('ctrl', initdef=SENTINEL)
out = edzed.Override('test', null_value=SENTINEL).connect(input=inp, override=override)
init(circuit)
TEST_VALUES = (17, 3.14, SENTINEL, True, False, None, "LAST")
for value in TEST_VALUES:
inp.event('put', value=value)
out.eval_block()
assert out.output == value
for value in TEST_VALUES:
override.event('put', value=value)
out.eval_block()
assert out.output == (value if value != SENTINEL else "LAST") # parenthesis required
def test_compare(circuit):
"""Test the compare block."""
with pytest.raises(ValueError, match="threshold"):
edzed.Compare(None, low=10, high=5)
inp = edzed.Input('inp', initdef=8.1)
cmp1 = edzed.Compare('cmp1', low=7.0, high=9.0).connect(inp) # 8.0 +- 1
cmp2 = edzed.Compare('cmp2', low=8.0, high=9.0).connect(inp) # 8.5 +- 0.5
cmp3 = edzed.Compare('cmp3', low=8.0, high=8.0).connect(inp) # 8.0 no hysteresis
init(circuit)
cmp1.eval_block()
cmp2.eval_block()
cmp3.eval_block()
assert cmp1.output
assert not cmp2.output
assert cmp3.output
TEST1 = (8.1, 7.5, 6.0, 7.5, 100, 7.5, 7.0, 6.999, 7.0, 7.5, 9.0, 777)
CMP1 = (True, True, False, False, True, True, True, False, False, False, True, True)
for t, c in zip(TEST1, CMP1):
inp.put(t)
cmp1.eval_block()
assert cmp1.output == c
assert inp.output == 777 # tested with all values?
TEST3 = (8.1, 7.9, 8.0, 100, 8.0, 7.0, 777)
CMP3 = (True, False, True, True, True, False, True)
for t, c in zip(TEST3, CMP3):
inp.put(t)
cmp3.eval_block()
assert cmp3.output == c
assert inp.output == 777
def test_and_or(circuit):
"""Test unpack=False on AND/OR logical gates."""
inp0 = edzed.Input('inp0', initdef=False)
inp1 = edzed.Input('inp1', initdef=False)
and_gate = edzed.And('AND').connect(inp0, inp1, True)
or_gate = edzed.Or('OR').connect(inp0, inp1, False)
init(circuit)
for v0, v1 in ((0, 0), (0, 1), (1, 0), (1, 1)):
inp0.put(v0)
inp1.put(v1)
and_gate.eval_block()
or_gate.eval_block()
assert and_gate.output == bool(v0 and v1)
assert or_gate.output == bool(v0 or v1)
def test_and_or_empty(circuit):
"""Test unpack=False with no inputs."""
and_gate = edzed.And('AND')
or_gate = edzed.Or('OR')
init(circuit)
and_gate.eval_block()
or_gate.eval_block()
assert and_gate.output
assert not or_gate.output
def test_invert(circuit):
"""Test explicitly created inverter (Not) blocks."""
src = edzed.Input('src', allowed=(True, False), initdef=True)
notsrc = edzed.Not('notsrc').connect(src)
src2 = edzed.Not('src2').connect(notsrc)
init(circuit)
# inverter blocks are a special case in finalize()
assert notsrc.iconnections == {src}
assert notsrc.oconnections == {src2}
for value in (True, False, True, False):
src.put(value)
notsrc.eval_block()
src2.eval_block()
assert notsrc.output is not value
assert src2.output is value
|
StarcoderdataPython
|
3207361
|
<reponame>vsiddhu/qinfpy<gh_stars>0
#zerOut(mt) : Removes small entries in array
import copy
import numpy as np
__all__ = ['zerOut']
def zerOut(array, tol = 1e-15):
r"""Takes as input an array and tolerance, copies it, in this copy,
nulls out real and complex part of each entry smaller than the tolerance,
returns the copy.
Parameters
----------
array : numpy.ndarray
tol : float
Optional, 1e-15 by default
Returns
----------
arr : numpy.ndarray
Identical to input array, except each entry smaller than tol is set
to zero
"""
arr = copy.deepcopy(array)
for index, val in np.ndenumerate(arr):
if (np.abs(val.real) < tol):
arr[index] = (arr[index] - arr[index].conj())/2.
if (np.abs(val.imag) < tol):
arr[index] = (arr[index] + arr[index].conj())/2.
return arr
|
StarcoderdataPython
|
3481407
|
<reponame>dimka665/dropbox
#!/usr/bin/env python
"""Desktop Tasks app using Tkinter.
This uses a background thread to be notified of incoming changes.
It demonstrates, among others:
- How to call await() in a loop in a background thread efficiently:
Use make_cursor_map() to feed the 'deltamap' return value back
into the 'datastores' parameter for the next await() call.
- How to communicate in a safe way between the background thread and
the Tk main loop: Use a Queue plus a virtual event.
- How to avoid duplicate screen updates: Keep track of the revision
that was displayed.
- How to save and restore a datastore to/from a disk file.
- How to detect whether the network goes offline or comes back online
(in approximation).
"""
import json
import os
import sys
import time
import random
from threading import Thread
from Queue import Queue, Empty
from Tkinter import Tk, Frame, Button, Checkbutton, Entry, Label, BooleanVar
from Tkconstants import W, E, BOTH, END
# We use HTTPError as an approximation for "no network", even though
# this isn't always true -- sometimes it means "bad request" and
# sometimes we may get other exceptions.
from urllib3.exceptions import HTTPError
from dropbox.client import (
DropboxClient,
ErrorResponse,
)
from dropbox.datastore import (
DatastoreManager, Date,
DatastoreError, DatastoreNotFoundError,
)
# Virtual event to wake up the Tk main loop.
REFRESH_EVENT = '<<refresh-datastore>>'
# Filename where to store the data.
SERIALIZED_DATASTORE = 'my_tasks.json'
class TaskList(Frame):
def __init__(self, master, client):
Frame.__init__(self, master)
# Connect to Dropbox and open datastore.
self.manager = DatastoreManager(client)
# Try to load serialized datastore first.
datastore = self.load_serialized_datastore(SERIALIZED_DATASTORE)
if datastore is not None:
try:
datastore.load_deltas()
except DatastoreNotFoundError:
print 'This datastore has been deleted. Exiting now.'
sys.exit(1)
except HTTPError:
print 'We are offline. Proceed with caution.'
else:
datastore = self.manager.open_default_datastore()
self.datastore = datastore
self.table = self.datastore.get_table('tasks')
# Set up communication with background thread.
self.queue = Queue() # Holds deltas sent from background thread.
self.display_rev = 0 # Last revision displayed.
self.refresh() # Initial display update.
self.bind(REFRESH_EVENT, self.refresh) # Respond to background thread.
# Create, configure and start background thread.
self.bg_thread = Thread(name='bgthread', target=self.bg_run)
self.bg_thread.setDaemon(True)
self.bg_thread.start()
def load_serialized_datastore(self, filename):
try:
f = open(filename, 'rb')
except IOError as exc:
# Don't print an error if the file doesn't exist.
if os.path.exists(filename):
print 'Cannot load saved datastore:', exc
return None
with f:
try:
data = json.load(f)
id, handle, rev, snapshot = data
except ValueError as exc:
print 'Bad JSON on %s: %s' % (filename, exc)
return None
datastore = self.manager.open_raw_datastore(id, handle)
# If this fails, the save file is bad -- you must manually delete it.
datastore.apply_snapshot(rev, snapshot)
print 'Loaded datastore from', filename
return datastore
def save_serialized_datastore(self, datastore, filename):
id = datastore.get_id()
handle = datastore.get_handle()
rev = datastore.get_rev()
snapshot = datastore.get_snapshot()
data = [id, handle, rev, snapshot]
try:
f = open(filename, 'wb')
except IOError as exc:
print 'Cannot save datastore:', exc
return
with f:
json.dump(data, f)
print 'Saved datastore to', filename
def bg_run(self):
# This code runs in a background thread. No other code does.
deltamap = None
backoff = 0
while True:
cursor_map = DatastoreManager.make_cursor_map([self.datastore], deltamap)
try:
_, _, deltamap = self.manager.await(datastores=cursor_map)
except Exception as exc:
if isinstance(exc, HTTPError):
if not backoff:
print 'We have gone offline.'
else:
print 'We are still offline.'
else:
print 'bg_run():', repr(exc), str(exc)
# Randomized exponential backoff, clipped to 5 minutes.
backoff = min(backoff*2, 300) + random.random()
time.sleep(backoff)
continue
else:
if backoff:
print 'We have come back online.'
backoff = 0
if deltamap and self.datastore in deltamap:
deltas = deltamap[self.datastore]
if deltas is None:
# Stop the bg thread.
print 'This datastore has been deleted.'
print 'Please exit.'
break
if deltas:
self.queue.put(deltas)
self.event_generate(REFRESH_EVENT, when='tail')
def save(self, event=None):
self.save_serialized_datastore(self.datastore, SERIALIZED_DATASTORE)
def refresh(self, event=None):
# This is called directly when we have made a change,
# and when the background thread sends a REFRESH_EVENT.
self.load_queue() # Update the datastore.
if self.datastore.get_rev() == self.display_rev:
return # Nothing to do.
self.forget() # Hide the frame to reduce flashing.
for w in self.winfo_children():
w.destroy() # Delete the old widgets.
self.redraw() # Create new widgets.
self.pack(fill=BOTH, expand=1) # Show the frame.
self.display_rev = self.datastore.get_rev()
title = self.datastore.get_title()
mtime = self.datastore.get_mtime()
if not title:
title = 'My Tasks'
if mtime:
fmtime = mtime.to_datetime_local().strftime('%H:%M, %d %b %Y')
title = '%s (%s)' % (title, fmtime)
self.master.title(title)
self.input.focus_set()
def load_queue(self):
# Incorporate queued deltas into the datastore.
while True:
try:
deltas = self.queue.get_nowait()
except Empty:
break
else:
self.datastore.apply_deltas(deltas)
def redraw(self):
# Even though there are never more than three widgets per row,
# we have four columns, to allow the taskname label and the
# input widget to stretch.
self.grid_columnconfigure(2, weight=1)
row = 0
# Add a new row of widgets for each task.
for rec in sorted(self.table.query(), key=lambda rec: rec.get('created')):
# Extract the fields we need.
completed = rec.get('completed')
taskname = rec.get('taskname')
# Create a button with an 'X' in it, to delete the task.
close_btn = Button(self, text='X',
command=lambda rec=rec: self.delete_rec(rec))
close_btn.grid(row=row, column=0)
# Create a checkbox, to mark it completed (or not).
var = BooleanVar(self, value=completed)
completed_btn = Checkbutton(self, variable=var,
command=lambda rec=rec, var=var:
self.toggle_rec(rec, var))
completed_btn.grid(row=row, column=1)
# Create a label showing the task name.
taskname_lbl = Label(self, text=taskname, anchor=W)
taskname_lbl.grid(row=row, column=2, columnspan=2, sticky=W)
row += 1 # Bump row index.
# Add a final row with the input and button to add new tasks.
self.input = Entry(self)
self.input.bind('<Return>', self.add_rec)
self.input.grid(row=row, column=0, columnspan=3, sticky=W+E)
add_btn = Button(self, text='Add Task', command=self.add_rec)
add_btn.grid(row=row, column=3)
# Add save button. (Auto-save is left as an exercise.)
save_btn = Button(self, text='Save local snapshot', command=self.save)
save_btn.grid(row=row+1, column=0, columnspan=3, sticky=W)
def add_rec(self, event=None):
# Callback to add a new task.
self.do_transaction(self.table.insert,
completed=False, taskname=self.input.get(), created=Date())
def delete_rec(self, rec):
# Callback to delete a task.
self.do_transaction(rec.delete_record)
def toggle_rec(self, rec, var):
# Callback to toggle a task's completed flag.
try:
self.do_transaction(rec.set, 'completed', var.get())
finally:
# In case the transaction failed, flip the variable back.
var.set(rec.get('completed'))
def do_transaction(self, func, *args, **kwds):
self.update_idletasks() # Refresh screen without handling more input.
def call_func():
func(*args, **kwds)
try:
self.datastore.transaction(call_func, max_tries=4)
except Exception as exc:
# Maybe the server is down, or we experience extreme conflicts.
# NOTE: A more user-friendly way would be to show an error dialog.
print 'do_transaction():', repr(exc)
else:
self.refresh()
def main():
if not sys.argv[1:]:
print >>sys.stderr, 'Usage: tktasks.py ACCESS_TOKEN'
print >>sys.stderr, 'You can use shtasks.py to get an access token.'
sys.exit(2)
access_token = sys.argv[1]
client = DropboxClient(access_token)
root = Tk()
root.title('My Tasks')
root.geometry('250x300+10+10')
task_list = TaskList(root, client)
root.mainloop()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3513805
|
import torch
def encode_data(dataset, tokenizer, max_seq_length=128):
"""Featurizes the dataset into input IDs and attention masks for input into a
transformer-style model.
NOTE: This method should featurize the entire dataset simultaneously,
rather than row-by-row.
Args:
dataset: A Pandas dataframe containing the data to be encoded.
tokenizer: A transformers.PreTrainedTokenizerFast object that is used to
tokenize the data.
max_seq_length: Maximum sequence length to either pad or truncate every
input example to.
Returns:
input_ids: A PyTorch.Tensor (with dimensions [len(dataset), max_seq_length])
containing token IDs for the data.
attention_mask: A PyTorch.Tensor (with dimensions [len(dataset), max_seq_length])
containing attention masks for the data.
"""
## TODO: Tokenize the questions and passages using both truncation and padding.
## Use the tokenizer provided in the argument and see the code comments above for
## more details.
encoded_data = tokenizer(dataset['question'].tolist(), dataset['passage'].tolist(), truncation = True, padding = "max_length", max_length = max_seq_length)
return torch.LongTensor(encoded_data["input_ids"]), torch.LongTensor(encoded_data["attention_mask"])
def extract_labels(dataset):
"""Converts labels into numerical labels.
Args:
dataset: A Pandas dataframe containing the labels in the column 'label'.
Returns:
labels: A list of integers corresponding to the labels for each example,
where 0 is False and 1 is True.
"""
## TODO: Convert the labels to a numeric format and return as a list.
label = []
for i in dataset["label"]:
if i == True:
label.append(1)
elif i == False:
label.append(0)
return label
|
StarcoderdataPython
|
11393047
|
import numpy as np
from sklearn.cluster import MiniBatchKMeans
from sklearn.metrics import silhouette_samples, silhouette_score
def vectorize(list_of_docs, model):
"""Generate vectors for list of documents using a Word Embedding
Args:
list_of_docs: List of documents
model: Gensim's Word Embedding
Returns:
List of document vectors
"""
features = []
for tokens in list_of_docs:
zero_vector = np.zeros(model.vector_size)
vectors = []
for token in tokens:
if token in model.wv:
try:
vectors.append(model.wv[token])
except KeyError:
continue
if vectors:
vectors = np.asarray(vectors)
avg_vec = vectors.mean(axis=0)
features.append(avg_vec)
else:
features.append(zero_vector)
return features
def mbkmeans_clusters(
X,
k,
mb,
print_silhouette_values,
):
"""Generate clusters and print Silhouette metrics using MBKmeans
Args:
X: Matrix of features.
k: Number of clusters.
mb: Size of mini-batches.
print_silhouette_values: Print silhouette values per cluster.
Returns:
Trained clustering model and labels based on X.
"""
km = MiniBatchKMeans(n_clusters=k, batch_size=mb).fit(X)
print(f"For n_clusters = {k}")
print(f"Silhouette coefficient: {silhouette_score(X, km.labels_):0.2f}")
print(f"Inertia:{km.inertia_}")
if print_silhouette_values:
sample_silhouette_values = silhouette_samples(X, km.labels_)
print(f"Silhouette values:")
silhouette_values = []
for i in range(k):
cluster_silhouette_values = sample_silhouette_values[km.labels_ == i]
silhouette_values.append(
(
i,
cluster_silhouette_values.shape[0],
cluster_silhouette_values.mean(),
cluster_silhouette_values.min(),
cluster_silhouette_values.max(),
)
)
silhouette_values = sorted(
silhouette_values, key=lambda tup: tup[2], reverse=True
)
for s in silhouette_values:
print(
f" Cluster {s[0]}: Size:{s[1]} | Avg:{s[2]:.2f} | Min:{s[3]:.2f} | Max: {s[4]:.2f}"
)
return km, km.labels_
|
StarcoderdataPython
|
5190152
|
#############################################################################
##
## Copyright (C) 2019 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing/
##
## This file is part of the Qt for Python examples of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of The Qt Company Ltd nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
## $QT_END_LICENSE$
##
#############################################################################
import os, glob, re, sys
from distutils import sysconfig
generic_error = (' Did you forget to activate your virtualenv? Or perhaps'
' you forgot to build / install PySide2 into your currently active Python'
' environment?')
pyside2_error = 'Unable to locate PySide2.' + generic_error
shiboken2_module_error = 'Unable to locate shiboken2-module.' + generic_error
shiboken2_generator_error = 'Unable to locate shiboken2-generator.' + generic_error
pyside2_libs_error = 'Unable to locate the PySide2 shared libraries.' + generic_error
python_link_error = 'Unable to locate the Python library for linking.'
python_include_error = 'Unable to locate the Python include headers directory.'
options = []
# option, function, error, description
options.append(("--shiboken2-module-path",
lambda: find_shiboken2_module(),
shiboken2_module_error,
"Print shiboken2 module location"))
options.append(("--shiboken2-generator-path",
lambda: find_shiboken2_generator(),
shiboken2_generator_error,
"Print shiboken2 generator location"))
options.append(("--pyside2-path", lambda: find_pyside2(), pyside2_error,
"Print PySide2 location"))
options.append(("--python-include-path",
lambda: get_python_include_path(),
python_include_error,
"Print Python include path"))
options.append(("--shiboken2-generator-include-path",
lambda: get_package_include_path(Package.shiboken2_generator),
pyside2_error,
"Print shiboken2 generator include paths"))
options.append(("--pyside2-include-path",
lambda: get_package_include_path(Package.pyside2),
pyside2_error,
"Print PySide2 include paths"))
options.append(("--python-link-flags-qmake", lambda: python_link_flags_qmake(), python_link_error,
"Print python link flags for qmake"))
options.append(("--python-link-flags-cmake", lambda: python_link_flags_cmake(), python_link_error,
"Print python link flags for cmake"))
options.append(("--shiboken2-module-qmake-lflags",
lambda: get_package_qmake_lflags(Package.shiboken2_module), pyside2_error,
"Print shiboken2 shared library link flags for qmake"))
options.append(("--pyside2-qmake-lflags",
lambda: get_package_qmake_lflags(Package.pyside2), pyside2_error,
"Print PySide2 shared library link flags for qmake"))
options.append(("--shiboken2-module-shared-libraries-qmake",
lambda: get_shared_libraries_qmake(Package.shiboken2_module), pyside2_libs_error,
"Print paths of shiboken2 shared libraries (.so's, .dylib's, .dll's) for qmake"))
options.append(("--shiboken2-module-shared-libraries-cmake",
lambda: get_shared_libraries_cmake(Package.shiboken2_module), pyside2_libs_error,
"Print paths of shiboken2 shared libraries (.so's, .dylib's, .dll's) for cmake"))
options.append(("--pyside2-shared-libraries-qmake",
lambda: get_shared_libraries_qmake(Package.pyside2), pyside2_libs_error,
"Print paths of PySide2 shared libraries (.so's, .dylib's, .dll's) for qmake"))
options.append(("--pyside2-shared-libraries-cmake",
lambda: get_shared_libraries_cmake(Package.pyside2), pyside2_libs_error,
"Print paths of PySide2 shared libraries (.so's, .dylib's, .dll's) for cmake"))
options_usage = ''
for i, (flag, _, _, description) in enumerate(options):
options_usage += ' {:<45} {}'.format(flag, description)
if i < len(options) - 1:
options_usage += '\n'
usage = """
Utility to determine include/link options of shiboken2/PySide2 and Python for qmake/CMake projects
that would like to embed or build custom shiboken2/PySide2 bindings.
Usage: pyside2_config.py [option]
Options:
{}
-a Print all options and their values
--help/-h Print this help
""".format(options_usage)
option = sys.argv[1] if len(sys.argv) == 2 else '-a'
if option == '-h' or option == '--help':
print(usage)
sys.exit(0)
class Package(object):
shiboken2_module = 1
shiboken2_generator = 2
pyside2 = 3
def clean_path(path):
return path if sys.platform != 'win32' else path.replace('\\', '/')
def shared_library_suffix():
if sys.platform == 'win32':
return 'lib'
elif sys.platform == 'darwin':
return 'dylib'
# Linux
else:
return 'so.*'
def import_suffixes():
if (sys.version_info >= (3, 4)):
import importlib.machinery
return importlib.machinery.EXTENSION_SUFFIXES
else:
import imp
result = []
for t in imp.get_suffixes():
result.append(t[0])
return result
def is_debug():
debug_suffix = '_d.pyd' if sys.platform == 'win32' else '_d.so'
return any([s.endswith(debug_suffix) for s in import_suffixes()])
def shared_library_glob_pattern():
glob = '*.' + shared_library_suffix()
return glob if sys.platform == 'win32' else 'lib' + glob
def filter_shared_libraries(libs_list):
def predicate(lib_name):
basename = os.path.basename(lib_name)
if 'shiboken' in basename or 'pyside2' in basename:
return True
return False
result = [lib for lib in libs_list if predicate(lib)]
return result
# Return qmake link option for a library file name
def link_option(lib):
# On Linux:
# Since we cannot include symlinks with wheel packages
# we are using an absolute path for the libpyside and libshiboken
# libraries when compiling the project
baseName = os.path.basename(lib)
link = ' -l'
if sys.platform in ['linux', 'linux2']: # Linux: 'libfoo.so' -> '/absolute/path/libfoo.so'
link = lib
elif sys.platform in ['darwin']: # Darwin: 'libfoo.so' -> '-lfoo'
link += os.path.splitext(baseName[3:])[0]
else: # Windows: 'libfoo.dll' -> 'libfoo.dll'
link += os.path.splitext(baseName)[0]
return link
# Locate PySide2 via sys.path package path.
def find_pyside2():
return find_package_path("PySide2")
def find_shiboken2_module():
return find_package_path("shiboken2")
def find_shiboken2_generator():
return find_package_path("shiboken2_generator")
def find_package(which_package):
if which_package == Package.shiboken2_module:
return find_shiboken2_module()
if which_package == Package.shiboken2_generator:
return find_shiboken2_generator()
if which_package == Package.pyside2:
return find_pyside2()
return None
def find_package_path(dir_name):
for p in sys.path:
if 'site-' in p:
package = os.path.join(p, dir_name)
if os.path.exists(package):
return clean_path(os.path.realpath(package))
return None
# Return version as "3.5"
def python_version():
return str(sys.version_info[0]) + '.' + str(sys.version_info[1])
def get_python_include_path():
return sysconfig.get_python_inc()
def python_link_flags_qmake():
flags = python_link_data()
if sys.platform == 'win32':
libdir = flags['libdir']
# This will add the "~1" shortcut for directories that
# contain white spaces
# e.g.: "Program Files" to "Progra~1"
for d in libdir.split("\\"):
if " " in d:
libdir = libdir.replace(d, d.split(" ")[0][:-1]+"~1")
return '-L{} -l{}'.format(libdir, flags['lib'])
elif sys.platform == 'darwin':
return '-L{} -l{}'.format(flags['libdir'], flags['lib'])
else:
# Linux and anything else
return '-L{} -l{}'.format(flags['libdir'], flags['lib'])
def python_link_flags_cmake():
flags = python_link_data()
libdir = flags['libdir']
lib = re.sub(r'.dll$', '.lib', flags['lib'])
return '{};{}'.format(libdir, lib)
def python_link_data():
# @TODO Fix to work with static builds of Python
libdir = sysconfig.get_config_var('LIBDIR')
if libdir is None:
libdir = os.path.abspath(os.path.join(
sysconfig.get_config_var('LIBDEST'), "..", "libs"))
version = python_version()
version_no_dots = version.replace('.', '')
flags = {}
flags['libdir'] = libdir
if sys.platform == 'win32':
suffix = '_d' if is_debug() else ''
flags['lib'] = 'python{}{}'.format(version_no_dots, suffix)
elif sys.platform == 'darwin':
flags['lib'] = 'python{}'.format(version)
# Linux and anything else
else:
if sys.version_info[0] < 3:
suffix = '_d' if is_debug() else ''
flags['lib'] = 'python{}{}'.format(version, suffix)
else:
flags['lib'] = 'python{}{}'.format(version, sys.abiflags)
return flags
def get_package_include_path(which_package):
package_path = find_package(which_package)
if package_path is None:
return None
includes = "{0}/include".format(package_path)
return includes
def get_package_qmake_lflags(which_package):
package_path = find_package(which_package)
if package_path is None:
return None
link = "-L{}".format(package_path)
glob_result = glob.glob(os.path.join(package_path, shared_library_glob_pattern()))
for lib in filter_shared_libraries(glob_result):
link += ' '
link += link_option(lib)
return link
def get_shared_libraries_data(which_package):
package_path = find_package(which_package)
if package_path is None:
return None
glob_result = glob.glob(os.path.join(package_path, shared_library_glob_pattern()))
filtered_libs = filter_shared_libraries(glob_result)
libs = []
if sys.platform == 'win32':
for lib in filtered_libs:
libs.append(os.path.realpath(lib))
else:
for lib in filtered_libs:
libs.append(lib)
return libs
def get_shared_libraries_qmake(which_package):
libs = get_shared_libraries_data(which_package)
if libs is None:
return None
if sys.platform == 'win32':
if not libs:
return ''
dlls = ''
for lib in libs:
dll = os.path.splitext(lib)[0] + '.dll'
dlls += dll + ' '
return dlls
else:
libs_string = ''
for lib in libs:
libs_string += lib + ' '
return libs_string
def get_shared_libraries_cmake(which_package):
libs = get_shared_libraries_data(which_package)
result = ';'.join(libs)
return result
print_all = option == "-a"
for argument, handler, error, _ in options:
if option == argument or print_all:
handler_result = handler()
if handler_result is None:
sys.exit(error)
line = handler_result
if print_all:
line = "{:<40}: ".format(argument) + line
print(line)
|
StarcoderdataPython
|
1700544
|
<gh_stars>1-10
#import fire
from pprint import pprint
from copy import deepcopy
from .framework import Framework
from .dataset import TACRED
class GridSearch(object):
""" Grid Search algorithm implementation to search for optimal
hyperparameter setup.
TODO: Finish the implementation
Usage:
```
>>> search = GridSearch(Framework)
>>> dataset = TACRED('path/to/dataset')
>>> configurations = {
...
}
>>> best_solution = search(configurations, dataset)
```
"""
def __init__(self, framework):
super().__init__()
self.framework = framework
def _generate_posible_configurations(self, configurations):
posible_configurations = [{}]
for key, values in configurations.items():
if isinstance(values, list):
new_dicts = []
for value in values:
for conf in posible_configurations:
new_d = deepcopy(conf)
new_d[key] = value
new_dicts.append(new_d)
posible_configurations = new_dicts
else:
for conf in posible_configurations:
conf[key] = values
return posible_configurations
def __call__(self, configurations, dataset):
posible_configurations = self._generate_posible_configurations(configurations)
best_conf = None
best_score = (0., 0., 0.)
scores = []
for configuration in posible_configurations:
rgem = self.framework(**configuration)
rgem.fit(dataset)
_, prec, rec, f1 = rgem.evaluate(dataset)
scores.append(
(configuration, prec, rec, f1)
)
if f1 > best_score[-1]:
best_score = (prec, rec, f1)
best_conf = configuration
return best_conf, best_score, scores
def test():
search = GridSearch(Framework)
dataset = TACRED('data/tacred')
config = {
'n_rel' : 42, # Number of relations
'hidden_size' : [200, 300], # Heads hidden size
'dropout_p' : .2, # Dropout p
'device': "cuda", # Device
'epochs': 10, # Epochs
'lr': [2e-5, 2e-4, 2e-3], # Learning rate
'l2': [0.01, 0.02, .001], # L2 normalization
'lambda': [2e-2, 2e-3, .1] # No-Relation class weigth
}
best_conf, best_score, scores = search(config, dataset)
print(f"Best configuration with scores: {best_score}")
pprint(best_conf)
for conf, pre, rec, f1 in scores:
print("Configuration:")
pprint(conf)
print(f"Precision: {pre}\tRecall: {rec}\tF1-Score: {f1}")
if __name__ == "__main__":
#fire.Fire(test)
test()
|
StarcoderdataPython
|
6444966
|
<reponame>twaddle-dev/CoVid-19
from Crypto.Hash import (
keccak,
)
from eth_hash.preimage import (
BasePreImage,
)
def keccak256(prehash: bytes) -> bytes:
hasher = keccak.new(data=prehash, digest_bits=256)
return hasher.digest()
class preimage(BasePreImage):
_hash = None
def __init__(self, prehash) -> None:
self._hash = keccak.new(data=prehash, digest_bits=256, update_after_digest=True)
# pycryptodome doesn't expose a `copy` mechanism for it's hash objects
# so we keep a record of all of the parts for when/if we need to copy
# them.
self._parts = [prehash]
def update(self, prehash) -> None:
self._hash.update(prehash)
self._parts.append(prehash)
def digest(self) -> bytes:
return self._hash.digest()
def copy(self) -> 'preimage':
return preimage(b''.join(self._parts))
|
StarcoderdataPython
|
1814924
|
import json
import os
from os import makedirs
from os.path import exsits, getmtime, join, splitext
import time
def jsonize(dct):
""" Remvoes non-json elements from a dictionary.
Converts VirtualFile objects to the integral value 1.
Content stored in VirtualFile is lost. Write to file to avoid loss.
Returns a dict in json format.
"""
result = dict()
for k, v in dct.items():
#! all keys should be strings
v_t = type(v)
if v_t == VirtualFile:
result[k] = 1
if v_t == dict:
result[k] = jsonize(v)
if v_t == list or v_t == tuple:
result[k] = [jsonize(item) for item in v]
elif v is None or v_t in [str, int, float, bool]:
result[k] = v
else:
raise Exception("ERROR: {} to json dict is not supported".format(v_t))
return result
class Project:
def __init__(self, path=os.curdir):
self.main_path = path
self.config_path = join(path, "project.json")
self.is_fresh = not exists(self.config_path)
self.ran_subs = False
self.on_setup = []
self.build_rules = dict()
self.autos = dict()
self.perm = {
"name": "Project",
"version": "0.0.0",
"author": "",
"license": "",
"structure": {},
"targets": [],
"tests": [],
"dependencies": {},
"includes": [],
"build_scripts": [],
}
def json_data(self):
return jsonize(self.perm)
def str_data(self):
return json.dumps(self.json_data())
def save(self):
cfg_file = open(self.config_path, "w+")
json.dump(self.json_data(), cfg_file)
self.is_fresh = False
def __del__(self):
self.save()
def setup(self):
for func in self.on_setup:
func(self)
def add_setup(self, func):
""" This method decorates a function that will only run
when the project is fresh.
"""
self.on_setup.append(func)
if self.is_fresh:
func(self)
return func
def add_structure(self, dct, path=None):
if path == None:
path = self.main_path
# Internal function
def merge_structs(dct1, dct2, pth):
for k, v in dct1.items():
v_t = type(v)
inner_pth = join(pth, k)
if v_t == dict:
if k in dct2.keys():
v2_t = type(dct2[k])
if v2_t == dict:
# Merge items from dct1/$k into dct2/$k
merge_structs(dct1[k], dct2[k], inner_pth)
else:
raise Exception("ERROR: File and directory with same name")
else:
dct2[k] = v
else:
if k in dct2.keys():
print("(!) Structure already exists here, ignoring {}.".format(inner_path))
else:
dct2[k] = v
# / Internal Function
merge_structs(self.perm["structure"], path)
def make_structure(self, path=None, struct=None):
if path == None:
path = self.main_path
if struct == None:
struct = self.perm["structure"]
for k, v in struct.items():
v_t = type(v)
inner_path = join(path, k)
if v_t == dict:
makedirs(inner_path)
self.make_structure(inner_path, struct[k])
else:
if not exists(inner_path):
open(inner_path, "a+").close()
def add_targets(self, *targets):
self.perm["targets"] += targets
def add_includes(self, *includes):
self.perm["includes"] += includes
def add_rule(self, target, dependencies):
def decorator(func):
for dep in dependencies:
if not (dep in self.perm["dependencies"][target]):
self.perm["dependencies"][target].append(dep)
self.build_rules[target] = func
return func
return decorator
def add_auto(self, name, **kwargs):
def decorator(func):
self.autos[name] = (func, kwargs)
return func
return decorator
def set_xpath(self, path):
self.perm["xpath"] = path
def set_opath(self, path):
self.perm["opath"] = path
def set_test_xpath(self, path):
self.perm["test_xpath"] = path
def set_test_opath(self, path):
self.perm["test_opath"] = path
def add_subscript(self, path):
if not (path in self.perm["build_scripts"]):
self.perm["build_scripts"].append(path)
def run_subscripts(self):
for path in self.perm["build_scripts"]:
execfile(path)
self.ran_subs = True
def do_auto(self, name, **kwargs):
#! This might be fronked up
auto = self.autos[name]
func = auto[0]
prekwargs = auto[1]
func(**prekwargs, **kwargs)
def build(self, target):
if not self.ran_subs:
self.run_subscripts()
# Build dependencies recursively
for dep in self.perm["dependencies"][target]:
if getmtime(target) < getmtime(dep):
self.build(dep)
# call registered build funcion
self.build_rules[target]()
def default_setup(project):
project.add_structure({
"build": {
"bin": {
"tests": {
},
"programs": {
}
},
"parts": {
"libraries": {
},
"tests": {
},
"programs": {
},
}
},
"programs": {},
"libraries": {},
"docs": {},
"tests": {},
})
project.set_xpath("build/bin/programs/")
project.set_opath("build/parts/programs/")
project.set_test_xpath("build/bin/tests/")
project.set_test_opath("build/parts/tests/")
def default_addlib(project, name):
project.add_strucrure({
name : {
"include": {
name : {}
},
"src": {},
"test": {},
}
}, join(project.main_path, "libraries"))
def interactive_build(project):
print(" ----- Interactive Build Textual Interface ----- ")
done = False
while not done:
resp = input("!> ").split()
# Check if resp matches builds
if resp[0] == "build":
project.build(resp[1])
continue
# Check if resp matches automations
for auto_name in self.autos.keys():
if resp[0] == auto_name:
print("Running automation {}".format(auto_name))
project.do_auto(auto_name, resp[0:])
print(" ----------------------------------------------- ")
|
StarcoderdataPython
|
11273810
|
<reponame>linz/topo-processor<filename>topo_processor/cli/validate.py
import os
from functools import wraps
import click
import linz_logger
from linz_logger import LogLevel, get_log, logger, set_level
from topo_processor.file_system.get_fs import is_s3_path
from topo_processor.stac import DataType, collection_store, process_directory
from topo_processor.stac.item_factory import process_metadata
from topo_processor.util import time_in_ms
from topo_processor.util.transfer_collection import transfer_collection
@click.command()
@click.option(
"-s",
"--source",
required=True,
help="The name of the metadata csv file to import",
)
@click.option(
"-v",
"--verbose",
is_flag=True,
help="Use verbose to display trace logs",
)
def main(source, verbose):
if verbose:
set_level(LogLevel.trace)
else:
set_level(LogLevel.info)
start_time = time_in_ms()
if not is_s3_path(source):
source = os.path.abspath(source)
process_metadata(source)
get_log().debug(
"validate completed",
file=source,
duration=time_in_ms() - start_time,
)
|
StarcoderdataPython
|
1939460
|
import argparse
import pegasusio as io
import numpy as np
import pandas as pd
from collections import namedtuple
from typing import List, Dict, Tuple
demux_type_dict = {'SNG': 'singlet', 'DBL': 'doublet', 'AMB': 'unknown'}
SNP = namedtuple('SNP', ['CHROM', 'POS', 'REF', 'ALT'])
def check_colnames(fields: List[str]) -> bool:
template = ['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT']
for i, key in enumerate(template):
if fields[i] != key:
return False
return True
def parse_denovo_vcf(input_vcf: str) -> Tuple[List[str], Dict[object, object]]:
sample_names = None
snp2geno = {}
file_ext = input_vcf.split('.')[-1]
if file_ext == 'gz':
import gzip
fin = gzip.open(input_vcf, 'rt')
else:
fin = open(input_vcf, 'rt')
for line in fin:
if line.startswith('##'):
continue
if line.startswith('#'):
fields = line.strip()[1:].split('\t')
assert check_colnames(fields)
sample_names = fields[9:]
else:
fields = line.strip().split('\t')
assert fields[8].startswith('GT')
snp = SNP(fields[0], fields[1], fields[3], fields[4])
snp2geno[snp] = [x.split(':')[0] for x in fields[9:]]
fin.close()
return sample_names, snp2geno
def calc_matching(denovo_geno: List[str], ref_geno: List[str], mmat: np.array) -> None:
for i, r_geno in enumerate(ref_geno):
for j, d_geno in enumerate(denovo_geno):
mmat[i, j] += (r_geno == d_geno)
def parse_reference_vcf(reference_vcf: str, snp2geno: dict, sample_names: List[str]) -> Tuple[List[str], np.array]:
nsample = len(sample_names)
cnt = 0
nbingo = 0
ref_names = None
mmat = None # mmat: matching matrix
file_ext = reference_vcf.split('.')[-1]
if file_ext == 'gz':
import gzip
fin = gzip.open(reference_vcf, 'rt')
else:
fin = open(reference_vcf, 'rt')
for line in fin:
if line.startswith('##'):
continue
if line.startswith('#'):
fields = line.strip()[1:].split('\t')
if 'FORMAT' in fields:
assert check_colnames(fields)
else:
return None, None, False
ref_names = fields[9:]
mmat = np.zeros((len(ref_names), nsample), dtype = int)
else:
fields = line.strip().split('\t')
snp = SNP(fields[0], fields[1], fields[3], fields[4])
d_geno = snp2geno.get(snp, None)
if d_geno is not None:
assert fields[8].startswith('GT')
r_geno = [x.split(':')[0] for x in fields[9:]]
calc_matching(d_geno, r_geno, mmat)
nbingo += 1
cnt += 1
if cnt % 100000 == 0:
print("Parsed {0} variants, matched {1} variants.".format(cnt, nbingo))
fin.close()
print("\n{0} variants are parsed and {1} SNPs are matched.".format(cnt, nbingo))
return ref_names, mmat, True
def replace_ref_names(ref_str: str, ref_names: List[str], has_ref_genotypes: bool) -> List[str]:
if ref_str is not None:
res_arr = ref_str.split(',')
assert len(res_arr) == len(ref_names)
ref_names = res_arr
elif not has_ref_genotypes: # Rename freemuxlet's cluster names from "CLUST<num>" to "Donor<num>" format.
ref_names = ['Donor' + str(int(x[5:]) + 1) for x in ref_names]
ref_names = ['_ref_' + x for x in ref_names]
return ref_names
def find_max_matching(ref_names: List[str], sample_names: List[str], mmat: np.array) -> dict:
import itertools
import networkx as nx
from networkx.algorithms import bipartite
nref = len(ref_names)
nsample = len(sample_names)
G = nx.Graph()
G.add_nodes_from(ref_names, bipartite = 0)
G.add_nodes_from(sample_names, bipartite = 1)
G.add_weighted_edges_from([(ref_names[x], sample_names[y], -mmat[x, y]) for x, y in itertools.product(range(nref), range(nsample))])
result = bipartite.matching.minimum_weight_full_matching(G)
for j, sample_name in enumerate(sample_names):
if sample_name not in result:
i = np.where(mmat[:, j] == mmat[:, j].max())[0][0]
result[sample_name] = ref_names[i]
if isinstance(result[ref_names[i]], str):
result[ref_names[i]] = [result[ref_names[i]]]
result[ref_names[i]].append(sample_name)
ref_n2i = {}
for i, ref_name in enumerate(ref_names):
ref_n2i[ref_name] = i
for j, sample_name in enumerate(sample_names):
i = ref_n2i[result[sample_name]]
if mmat[i, j] != mmat[:, j].max():
k = np.where(mmat[:, j] == mmat[:, j].max())[0][0]
print("Warning: popscle donor {} shares most SNPs with ref donor {}, but matches to ref donor {}!".format(sample_name, ref_names[k][5:], ref_names[i][5:]))
print()
for sample_name in sample_names:
print("Popscle donor {} matches reference donor {}.".format(sample_name, result[sample_name][5:]))
print()
return result
def translate_donor_name(inp_str: str, matching: dict) -> str:
if inp_str == '':
return inp_str
res_str = []
for donor_id in inp_str.split(','):
res_str.append(matching[f"CLUST{donor_id}"][5:])
return ','.join(res_str)
def gen_summary_of_status(df):
type_counts = df['DROPLET.TYPE'].value_counts()
for type in demux_type_dict.keys():
if type not in type_counts.index:
type_counts[type] = 0
return type_counts
def set_matching_no_reference(sample_numbers: List[str], ref_names: List[str]) -> dict:
assert len(sample_numbers) == len(ref_names)
matching = dict()
pairs = list(zip(sample_numbers, ref_names))
for (sample_number, ref_name) in pairs:
matching[sample_number] = ref_name
matching[ref_name] = sample_number
return matching
def write_output(assignment_file: str, input_mat_file: str, output_zarr_file: str, matching: dict = None) -> None:
algorithm = 'demuxlet' if matching is None else 'freemuxlet'
df = pd.read_csv(assignment_file, sep='\t', header=0, index_col='BARCODE')
df.index = pd.Index([x.split('-')[0] for x in df.index])
type_counts = gen_summary_of_status(df)
print("\nSinglets = {}, doublets = {}, unknown = {}.".format(type_counts['SNG'], type_counts['DBL'], type_counts['AMB']))
df['demux_type'] = df['DROPLET.TYPE'].apply(lambda s: demux_type_dict[s])
df['assignment'] = ''
df.loc[df['demux_type']=='singlet', 'assignment'] = df.loc[df['demux_type']=='singlet', 'SNG.BEST.GUESS']
if algorithm == 'demuxlet':
df.loc[df['demux_type']=='doublet', 'assignment'] = df.loc[df['demux_type']=='doublet', 'DBL.BEST.GUESS'].apply(lambda s: ','.join(s.split(',')[:-1]))
else:
df.loc[df['demux_type']=='doublet', 'assignment'] = df.loc[df['demux_type']=='doublet', 'DBL.BEST.GUESS']
f = np.vectorize(translate_donor_name)
df['assignment'] = f(df['assignment'].astype('str').values, matching)
data = io.read_input(input_mat_file)
data.obs['demux_type'] = ''
data.obs['assignment'] = ''
idx = data.obs_names.isin(df.index)
barcodes = data.obs_names[idx]
df_valid = df.loc[barcodes, ['demux_type', 'assignment']]
data.obs.loc[idx, 'demux_type'] = df_valid['demux_type'].values
data.obs.loc[idx, 'assignment'] = df_valid['assignment'].values
io.write_output(data, output_zarr_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Merge popscle result with gene-count matrix, matching donor names if needed.')
parser.add_argument('demux_res', metavar='(demux_result.best | demux_result.clust1.samples.gz)', help='Demuxlet or Freemuxlet demultiplexing results.')
parser.add_argument('raw_mat', metavar='raw_feature_bc_matrix.h5', help='Raw gene count matrix in 10x format.')
parser.add_argument('out_file', metavar='output_result.zarr.zip', help='Output zarr file.')
parser.add_argument('--cluster-genotypes', metavar = '(demux_result.clust1.vcf.gz | demux_result.clust1.vcf)', dest='cluster_genotypes', help = 'Genotypes detected by freemuxlet from RNA-seq reads.')
parser.add_argument('--ref-genotypes', metavar = '(reference_genotypes.vcf.gz | reference_genotypes.vcf)', dest = 'ref_genotypes', help = 'Reference genotypes called from exome or genome sequencing data.')
parser.add_argument('--donor-names', dest = 'ref_names', help = 'A comma-separated list containing donor names that are used to replace the ones in reference_genotypes.vcf.gz. Must match the order in the .vcf.gz file.')
args = parser.parse_args()
if args.cluster_genotypes is not None: # freemuxlet
sample_names, snp2geno = parse_denovo_vcf(args.cluster_genotypes)
has_format = False
if args.ref_genotypes is not None:
ref_names, mmat, has_format = parse_reference_vcf(args.ref_genotypes, snp2geno, sample_names)
if has_format:
ref_names = replace_ref_names(args.ref_names, ref_names, has_ref_genotypes = True)
matching = find_max_matching(ref_names, sample_names, mmat)
else:
ref_names = replace_ref_names(args.ref_names, sample_names, has_ref_genotypes = False)
matching = set_matching_no_reference(sample_names, ref_names)
write_output(args.demux_res, args.raw_mat, args.out_file, matching)
else: # demuxlet
write_output(args.demux_res, args.raw_mat, args.out_file)
|
StarcoderdataPython
|
6454363
|
<reponame>rginjapan/DeepLIO<filename>deeplio/losses/__init__.py
from .losses import HWSLoss, LWSLoss, GeometricConsistencyLoss
def get_loss_function(cfg, device):
loss_cfg = cfg['losses']
loss_name = loss_cfg['active'].lower()
loss_type = loss_cfg.get(loss_name, {})
params = loss_type.get('params', {})
loss_type = loss_cfg['loss-type'].lower()
# check if have both local and global loss
if "+" in loss_type:
loss_types = [True, True]
elif loss_type == "global":
loss_types = [False, True]
elif loss_type == "local":
loss_types = [True, False]
else:
raise ValueError("Wrong loss type selected!")
if loss_name == 'hwsloss':
learn_smooth = params.get('learn', False)
sx = params.get('sx', 0.)
sq = params.get('sq', -2.5)
return HWSLoss(sx=sx, sq=sq, learn_hyper_params=learn_smooth, device=device, loss_Types=loss_types)
elif loss_name == 'lwsloss':
beta = params.get('beta', 1125.)
return LWSLoss(beta=beta, loss_Types=loss_types)
else:
raise ValueError("Loss {} is not supported!".format(loss_name))
|
StarcoderdataPython
|
1691038
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from ansible.module_utils.openshift_common import OpenShiftAnsibleModule, OpenShiftAnsibleException
DOCUMENTATION = '''
module: openshift_v1_image_stream_tag_list
short_description: OpenShift ImageStreamTagList
description:
- Retrieve a list of image_stream_tags. List operations provide a snapshot read of
the underlying objects, returning a resource_version representing a consistent version
of the listed objects.
version_added: 2.3.0
author: OpenShift (@openshift)
options:
api_key:
description:
- Token used to connect to the API.
cert_file:
description:
- Path to a certificate used to authenticate with the API.
type: path
context:
description:
- The name of a context found in the Kubernetes config file.
debug:
description:
- Enable debug output from the OpenShift helper. Logging info is written to KubeObjHelper.log
default: false
type: bool
force:
description:
- If set to C(True), and I(state) is C(present), an existing object will updated,
and lists will be replaced, rather than merged.
default: false
type: bool
host:
description:
- Provide a URL for acessing the Kubernetes API.
key_file:
description:
- Path to a key file used to authenticate with the API.
type: path
kubeconfig:
description:
- Path to an existing Kubernetes config file. If not provided, and no other connection
options are provided, the openshift client will attempt to load the default
configuration file from I(~/.kube/config.json).
type: path
password:
description:
- Provide a password for connecting to the API. Use in conjunction with I(username).
resource_definition:
description:
- Provide the YAML definition for the object, bypassing any modules parameters
intended to define object attributes.
type: dict
src:
description:
- Provide a path to a file containing the YAML definition of the object. Mutually
exclusive with I(resource_definition).
type: path
ssl_ca_cert:
description:
- Path to a CA certificate used to authenticate with the API.
type: path
state:
description:
- Determines if an object should be created, patched, or deleted. When set to
C(present), the object will be created, if it does not exist, or patched, if
parameter values differ from the existing object's attributes, and deleted,
if set to C(absent). A patch operation results in merging lists and updating
dictionaries, with lists being merged into a unique set of values. If a list
contains a dictionary with a I(name) or I(type) attribute, a strategic merge
is performed, where individual elements with a matching I(name_) or I(type)
are merged. To force the replacement of lists, set the I(force) option to C(True).
default: present
choices:
- present
- absent
username:
description:
- Provide a username for connecting to the API.
verify_ssl:
description:
- Whether or not to verify the API server's SSL certificates.
type: bool
requirements:
- openshift == 0.4.0.a1
'''
EXAMPLES = '''
'''
RETURN = '''
api_version:
description: Requested API version
type: string
image_stream_tag_list:
type: complex
returned: when I(state) = C(present)
contains:
api_version:
description:
- APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
type: str
items:
description:
- Items is the list of image stream tags
type: list
contains:
api_version:
description:
- APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value,
and may reject unrecognized values.
type: str
conditions:
description:
- conditions is an array of conditions that apply to the image stream tag.
type: list
contains:
generation:
description:
- Generation is the spec tag generation that this status corresponds
to
type: int
last_transition_time:
description:
- LastTransitionTIme is the time the condition transitioned from one
status to another.
type: complex
contains: {}
message:
description:
- Message is a human readable description of the details about last
transition, complementing reason.
type: str
reason:
description:
- Reason is a brief machine readable explanation for the condition's
last transition.
type: str
status:
description:
- Status of the condition, one of True, False, Unknown.
type: str
type:
description:
- Type of tag event condition, currently only ImportSuccess
type: str
generation:
description:
- generation is the current generation of the tagged image - if tag is provided
and this value is not equal to the tag generation, a user has requested
an import that has not completed, or conditions will be filled out indicating
any error.
type: int
image:
description:
- image associated with the ImageStream and tag.
type: complex
kind:
description:
- Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated. In CamelCase.
type: str
lookup_policy:
description:
- lookupPolicy indicates whether this tag will handle image references in
this namespace.
type: complex
metadata:
description:
- Standard object's metadata.
type: complex
tag:
description:
- tag is the spec tag associated with this image stream tag, and it may
be null if only pushes have occurred to this image stream.
type: complex
kind:
description:
- Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to. Cannot
be updated. In CamelCase.
type: str
metadata:
description:
- Standard object's metadata.
type: complex
'''
def main():
try:
module = OpenShiftAnsibleModule('image_stream_tag_list', 'v1')
except OpenShiftAnsibleException as exc:
# The helper failed to init, so there is no module object. All we can do is raise the error.
raise Exception(exc.message)
try:
module.execute_module()
except OpenShiftAnsibleException as exc:
module.fail_json(msg="Module failed!", error=str(exc))
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
4881849
|
<gh_stars>0
# class Solution(object):
# def reverseWords(self, s):
# """
# :type s: a list of 1 length strings (List[str])
# :rtype: nothing
# """
# stack = []
# final_string = ""
# for item in s:
# if item != " ":
# stack.append(item)
# else:
# while len(stack) != 0:
# final_string += stack.pop()
# final_string += " "
# while(len(stack)) != 0:
# final_string += stack.pop()
# final_string += " "
# return final_string[::-1]
class Solution(object):
def reverseWords(self, s):
"""
:type s: a list of 1 length strings (List[str])
:rtype: nothing
"""
for i in xrange(len(s) / 2):
s[i], s[len(s)-i-1] = s[len(s)-i-1], s[i]
return s
a = Solution().reverseWords(['t','h','e'])
print a
|
StarcoderdataPython
|
3382652
|
import unittest
import numpy as np
import pysal
#import pysal.spreg as EC
from scipy import sparse
from pysal.contrib.handler import Model
from functools import partial
OLS = partial(Model, mtype='OLS')
PEGP = pysal.examples.get_path
class TestOLS(unittest.TestCase):
def setUp(self):
db = pysal.open(PEGP('columbus.dbf'),'r')
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.w = pysal.weights.rook_from_shapefile(PEGP("columbus.shp"))
def test_OLS(self):
self.X = sparse.csr_matrix(self.X)
ols = OLS(self.y, self.X, w=self.w, spat_diag=True, moran=True, \
name_y='home value', name_x=['income','crime'], \
name_ds='columbus', nonspat_diag=True, white_test=True)
np.testing.assert_array_almost_equal(ols.aic, \
408.73548964604873 ,7)
np.testing.assert_array_almost_equal(ols.ar2, \
0.32123239427957662 ,7)
np.testing.assert_array_almost_equal(ols.betas, \
np.array([[ 46.42818268], [ 0.62898397], \
[ -0.48488854]]), 7)
bp = np.array([2, 5.7667905131212587, 0.05594449410070558])
ols_bp = np.array([ols.breusch_pagan['df'], ols.breusch_pagan['bp'], ols.breusch_pagan['pvalue']])
np.testing.assert_array_almost_equal(bp, ols_bp, 7)
np.testing.assert_array_almost_equal(ols.f_stat, \
(12.358198885356581, 5.0636903313953024e-05), 7)
jb = np.array([2, 39.706155069114878, 2.387360356860208e-09])
ols_jb = np.array([ols.jarque_bera['df'], ols.jarque_bera['jb'], ols.jarque_bera['pvalue']])
np.testing.assert_array_almost_equal(ols_jb,jb, 7)
white = np.array([5, 2.90606708, 0.71446484])
ols_white = np.array([ols.white['df'], ols.white['wh'], ols.white['pvalue']])
np.testing.assert_array_almost_equal(ols_white,white, 7)
np.testing.assert_equal(ols.k, 3)
kb = {'df': 2, 'kb': 2.2700383871478675, 'pvalue': 0.32141595215434604}
for key in kb:
self.assertAlmostEqual(ols.koenker_bassett[key], kb[key], 7)
np.testing.assert_array_almost_equal(ols.lm_error, \
(4.1508117035117893, 0.041614570655392716),7)
np.testing.assert_array_almost_equal(ols.lm_lag, \
(0.98279980617162233, 0.32150855529063727), 7)
np.testing.assert_array_almost_equal(ols.lm_sarma, \
(4.3222725729143736, 0.11519415308749938), 7)
np.testing.assert_array_almost_equal(ols.logll, \
-201.3677448230244 ,7)
np.testing.assert_array_almost_equal(ols.mean_y, \
38.436224469387746,7)
np.testing.assert_array_almost_equal(ols.moran_res[0], \
0.20373540938,7)
np.testing.assert_array_almost_equal(ols.moran_res[1], \
2.59180452208,7)
np.testing.assert_array_almost_equal(ols.moran_res[2], \
0.00954740031251,7)
np.testing.assert_array_almost_equal(ols.mulColli, \
12.537554873824675 ,7)
np.testing.assert_equal(ols.n, 49)
np.testing.assert_equal(ols.name_ds, 'columbus')
np.testing.assert_equal(ols.name_gwk, None)
np.testing.assert_equal(ols.name_w, 'unknown')
np.testing.assert_equal(ols.name_x, ['CONSTANT', 'income', 'crime'])
np.testing.assert_equal(ols.name_y, 'home value')
np.testing.assert_array_almost_equal(ols.predy[3], np.array([
33.53969014]),7)
np.testing.assert_array_almost_equal(ols.r2, \
0.34951437785126105 ,7)
np.testing.assert_array_almost_equal(ols.rlm_error, \
(3.3394727667427513, 0.067636278225568919),7)
np.testing.assert_array_almost_equal(ols.rlm_lag, \
(0.17146086940258459, 0.67881673703455414), 7)
np.testing.assert_equal(ols.robust, 'unadjusted')
np.testing.assert_array_almost_equal(ols.schwarz, \
414.41095054038061,7 )
np.testing.assert_array_almost_equal(ols.sig2, \
231.4568494392652,7 )
np.testing.assert_array_almost_equal(ols.sig2ML, \
217.28602192257551,7 )
np.testing.assert_array_almost_equal(ols.sig2n, \
217.28602192257551, 7)
np.testing.assert_array_almost_equal(ols.t_stat[2][0], \
-2.65440864272,7)
np.testing.assert_array_almost_equal(ols.t_stat[2][1], \
0.0108745049098,7)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
9693589
|
import pytest
import torch
from torch import nn
from deepqmc import Molecule
from deepqmc.fit import LossEnergy, fit_wf
from deepqmc.physics import local_energy
from deepqmc.sampling import LangevinSampler
from deepqmc.wf import PauliNet
from deepqmc.wf.paulinet.gto import GTOBasis
from deepqmc.wf.paulinet.schnet import ElectronicSchNet
try:
import pyscf.gto
except ImportError:
pyscf_marks = [pytest.mark.skip(reason='Pyscf not installed')]
else:
pyscf_marks = []
def assert_alltrue_named(items):
dct = dict(items)
assert dct == {k: True for k in dct}
@pytest.fixture
def rs():
return torch.randn(5, 3, 3)
@pytest.fixture
def mol():
mol = Molecule.from_name('H2')
mol.charge = -1
mol.spin = 3
return mol
@pytest.fixture(params=[pytest.param(PauliNet, marks=pyscf_marks)])
def net_factory(request):
return request.param
class JastrowNet(nn.Module):
def __init__(self, n_atoms, dist_feat_dim, n_up, n_down):
super().__init__()
self.schnet = ElectronicSchNet(
n_up,
n_down,
n_atoms,
dist_feat_dim=dist_feat_dim,
n_interactions=2,
kernel_dim=8,
embedding_dim=16,
version=1,
)
self.orbital = nn.Linear(16, 1, bias=False)
def forward(self, *xs, **kwargs):
xs = self.schnet(*xs)
return self.orbital(xs).squeeze(dim=-1).sum(dim=-1)
@pytest.fixture
def wf(net_factory, mol):
args = (mol,)
kwargs = {}
if net_factory is PauliNet:
mole = pyscf.gto.M(atom=mol.as_pyscf(), unit='bohr', basis='6-311g', cart=True)
basis = GTOBasis.from_pyscf(mole)
args += (basis,)
kwargs.update(
{
'cusp_correction': True,
'cusp_electrons': True,
'jastrow_factory': JastrowNet,
'dist_feat_dim': 4,
}
)
return net_factory(*args, **kwargs)
def test_batching(wf, rs):
assert_alltrue_named(
(name, torch.allclose(wf(rs[:2])[i], wf(rs)[i][:2], atol=0))
for i, name in enumerate(['log(abs(psi))', 'sign(psi)'])
)
def test_antisymmetry(wf, rs):
assert_alltrue_named(
(name, torch.allclose(wf(rs[:, [0, 2, 1]])[i], (-1) ** i * wf(rs)[i]))
for i, name in enumerate(['log(abs(psi))', 'sign(psi)'])
)
def test_antisymmetry_trained(wf, rs):
sampler = LangevinSampler(wf, torch.rand_like(rs), tau=0.1)
fit_wf(
wf,
LossEnergy(),
torch.optim.Adam(wf.parameters(), lr=1e-2),
sampler,
range(10),
)
assert_alltrue_named(
(name, torch.allclose(wf(rs[:, [0, 2, 1]])[i], (-1) ** i * wf(rs)[i]))
for i, name in enumerate(['log(abs(psi))', 'sign(psi)'])
)
def test_backprop(wf, rs):
wf(rs)[0].sum().backward()
assert_alltrue_named(
(name, param.grad is not None) for name, param in wf.named_parameters()
)
assert_alltrue_named(
(name, (param.grad.sum().abs().item() > 0 or name == 'mo.cusp_corr.shifts'))
for name, param in wf.named_parameters()
)
# mo.cusp_corr.shifts is excluded, as gradients occasionally vanish
def test_grad(wf, rs):
rs.requires_grad_()
wf(rs)[0].sum().backward()
assert rs.grad.sum().abs().item() > 0
def test_loc_ene_backprop(wf, rs):
rs.requires_grad_()
Es_loc, _, _ = local_energy(rs, wf, create_graph=True)
Es_loc.sum().backward()
assert_alltrue_named(
(name, (param.grad.sum().abs().item() > 0 or name == 'mo.cusp_corr.shifts'))
for name, param in wf.named_parameters()
)
# mo.cusp_corr.shifts is excluded, as gradients occasionally vanish
|
StarcoderdataPython
|
6463974
|
<reponame>enmanuelbt92/docs
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate tensorflow.org style API Reference docs for a Python module."""
import collections
import os
import pathlib
import shutil
import tempfile
from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union
from tensorflow_docs.api_generator import config
from tensorflow_docs.api_generator import doc_generator_visitor
from tensorflow_docs.api_generator import parser
from tensorflow_docs.api_generator import public_api
from tensorflow_docs.api_generator import reference_resolver as reference_resolver_lib
from tensorflow_docs.api_generator import toc as toc_lib
from tensorflow_docs.api_generator import traverse
from tensorflow_docs.api_generator.pretty_docs import docs_for_object
from tensorflow_docs.api_generator.report import utils
import yaml
# Used to add a collections.OrderedDict representer to yaml so that the
# dump doesn't contain !!OrderedDict yaml tags.
# Reference: https://stackoverflow.com/a/21048064
# Using a normal dict doesn't preserve the order of the input dictionary.
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
def dict_representer(dumper, data):
return dumper.represent_dict(data.items())
def dict_constructor(loader, node):
return collections.OrderedDict(loader.construct_pairs(node))
yaml.add_representer(collections.OrderedDict, dict_representer)
yaml.add_constructor(_mapping_tag, dict_constructor)
def write_docs(
*,
output_dir: Union[str, pathlib.Path],
parser_config: config.ParserConfig,
yaml_toc: Union[bool, Type[toc_lib.TocBuilder]],
root_module_name: str,
root_title: str = 'TensorFlow',
search_hints: bool = True,
site_path: str = 'api_docs/python',
gen_redirects: bool = True,
gen_report: bool = True,
extra_docs: Optional[Dict[int, str]] = None,
page_builder_classes: Optional[docs_for_object.PageBuilderDict] = None,
):
"""Write previously extracted docs to disk.
Write a docs page for each symbol included in the indices of parser_config to
a tree of docs at `output_dir`.
Symbols with multiple aliases will have only one page written about
them, which is referenced for all aliases.
Args:
output_dir: Directory to write documentation markdown files to. Will be
created if it doesn't exist.
parser_config: A `config.ParserConfig` object, containing all the necessary
indices.
yaml_toc: Set to `True` to generate a "_toc.yaml" file.
root_module_name: (str) the name of the root module (`tf` for tensorflow).
root_title: The title name for the root level index.md.
search_hints: (bool) include meta-data search hints at the top of each
output file.
site_path: The output path relative to the site root. Used in the
`_toc.yaml` and `_redirects.yaml` files.
gen_redirects: Bool which decides whether to generate _redirects.yaml file
or not.
gen_report: If True, a report for the library is generated by linting the
docstrings of its public API symbols.
extra_docs: To add docs for a particular object instance set it's __doc__
attribute. For some classes (list, tuple, etc) __doc__ is not writable.
Pass those docs like: `extra_docs={id(obj): "docs"}`
page_builder_classes: A optional dict of `{ObjectType:Type[PageInfo]}` for
overriding the default page builder classes.
Raises:
ValueError: if `output_dir` is not an absolute path
"""
output_dir = pathlib.Path(output_dir)
site_path = pathlib.Path('/', site_path)
# Make output_dir.
if not output_dir.is_absolute():
raise ValueError("'output_dir' must be an absolute path.\n"
f" output_dir='{output_dir}'")
output_dir.mkdir(parents=True, exist_ok=True)
# Collect redirects for an api _redirects.yaml file.
redirects = []
api_report = None
if gen_report:
api_report = utils.ApiReport()
# Parse and write Markdown pages, resolving cross-links (`tf.symbol`).
num_docs_output = 0
for api_node in parser_config.api_tree.iter_nodes():
full_name = api_node.full_name
if api_node.output_type() is api_node.OutputType.FRAGMENT:
continue
# Generate docs for `py_object`, resolving references.
try:
page_info = docs_for_object.docs_for_object(
api_node=api_node,
parser_config=parser_config,
extra_docs=extra_docs,
search_hints=search_hints,
page_builder_classes=page_builder_classes)
if api_report is not None and not full_name.startswith(
('tf.compat.v', 'tf.keras.backend', 'tf.numpy',
'tf.experimental.numpy')):
api_report.fill_metrics(page_info)
except Exception as e:
raise ValueError(
f'Failed to generate docs for symbol: `{full_name}`') from e
path = output_dir / parser.documentation_path(full_name)
try:
path.parent.mkdir(exist_ok=True, parents=True)
path.write_text(page_info.page_text, encoding='utf-8')
num_docs_output += 1
except OSError as e:
raise OSError('Cannot write documentation for '
f'{full_name} to {path.parent}') from e
duplicates = parser_config.duplicates.get(full_name, [])
if not duplicates:
continue
duplicates = [item for item in duplicates if item != full_name]
if gen_redirects:
for dup in duplicates:
from_path = site_path / dup.replace('.', '/')
to_path = site_path / full_name.replace('.', '/')
redirects.append({'from': str(from_path), 'to': str(to_path)})
if api_report is not None:
api_report.write(output_dir / root_module_name / 'api_report.pb')
if num_docs_output <= 1:
raise ValueError('The `DocGenerator` failed to generate any docs. Verify '
'your arguments (`base_dir` and `callbacks`). '
'Everything you want documented should be within '
'`base_dir`.')
if yaml_toc:
if isinstance(yaml_toc, bool):
yaml_toc = toc_lib.FlatModulesTocBuilder
toc = yaml_toc(site_path).build(parser_config.api_tree)
toc_path = output_dir / root_module_name / '_toc.yaml'
toc.write(toc_path)
if redirects and gen_redirects:
redirects_dict = {
'redirects': sorted(redirects, key=lambda redirect: redirect['from'])
}
api_redirects_path = output_dir / root_module_name / '_redirects.yaml'
with open(api_redirects_path, 'w') as redirect_file:
yaml.dump(redirects_dict, redirect_file, default_flow_style=False)
# Write a global index containing all full names with links.
with open(output_dir / root_module_name / 'all_symbols.md', 'w') as f:
global_index = parser.generate_global_index(
root_title, parser_config.index, parser_config.reference_resolver)
if not search_hints:
global_index = 'robots: noindex\n' + global_index
f.write(global_index)
def add_dict_to_dict(add_from, add_to):
for key in add_from:
if key in add_to:
add_to[key].extend(add_from[key])
else:
add_to[key] = add_from[key]
def extract(
py_modules,
base_dir,
private_map: Dict[str, Any],
visitor_cls: Type[
doc_generator_visitor.DocGeneratorVisitor] = doc_generator_visitor
.DocGeneratorVisitor,
callbacks: Optional[public_api.ApiFilter] = None,
include_default_callbacks=True):
"""Walks the module contents, returns an index of all visited objects.
The return value is an instance of `self._visitor_cls`, usually:
`doc_generator_visitor.DocGeneratorVisitor`
Args:
py_modules: A list containing a single (short_name, module_object) pair.
like `[('tf',tf)]`.
base_dir: The package root directory. Nothing defined outside of this
directory is documented.
private_map: A {'path':["name"]} dictionary listing particular object
locations that should be ignored in the doc generator.
visitor_cls: A class, typically a subclass of
`doc_generator_visitor.DocGeneratorVisitor` that acumulates the indexes of
objects to document.
callbacks: Additional callbacks passed to `traverse`. Executed between the
`PublicApiFilter` and the accumulator (`DocGeneratorVisitor`). The
primary use case for these is to filter the list of children (see:
`public_api.local_definitions_filter`)
include_default_callbacks: When true the long list of standard
visitor-callbacks are included. When false, only the `callbacks` argument
is used.
Returns:
The accumulator (`DocGeneratorVisitor`)
"""
if callbacks is None:
callbacks = []
if len(py_modules) != 1:
raise ValueError("only pass one [('name',module)] pair in py_modules")
short_name, py_module = py_modules[0]
# The objects found during traversal, and their children are passed to each
# of these filters in sequence. Each visitor returns the list of children
# to be passed to the next visitor.
if include_default_callbacks:
filters = [
# filter the api.
public_api.FailIfNestedTooDeep(10),
public_api.filter_module_all,
public_api.add_proto_fields,
public_api.filter_builtin_modules,
public_api.filter_private_symbols,
public_api.FilterBaseDirs(base_dir),
public_api.FilterPrivateMap(private_map),
public_api.filter_doc_controls_skip,
public_api.ignore_typing
]
else:
filters = []
accumulator = visitor_cls()
traverse.traverse(
py_module, filters + callbacks, accumulator, root_name=short_name)
accumulator.build()
return accumulator
EXCLUDED = set(['__init__.py', 'OWNERS', 'README.txt'])
class DocGenerator:
"""Main entry point for generating docs."""
def __init__(
self,
root_title: str,
py_modules: Sequence[Tuple[str, Any]],
base_dir: Optional[Sequence[Union[str, pathlib.Path]]] = None,
code_url_prefix: Union[Optional[str], Sequence[Optional[str]]] = (),
search_hints: bool = True,
site_path: str = 'api_docs/python',
private_map: Optional[Dict[str, str]] = None,
visitor_cls: Type[doc_generator_visitor.DocGeneratorVisitor] = (
doc_generator_visitor.DocGeneratorVisitor),
api_cache: bool = True,
callbacks: Optional[List[public_api.ApiFilter]] = None,
yaml_toc: Union[bool, Type[toc_lib.TocBuilder]] = True,
gen_redirects: bool = True,
gen_report: bool = True,
extra_docs: Optional[Dict[int, str]] = None,
page_builder_classes: Optional[docs_for_object.PageBuilderDict] = None,
):
"""Creates a doc-generator.
Args:
root_title: A string. The main title for the project. Like "TensorFlow"
py_modules: The python module to document.
base_dir: String or tuple of strings. Directories that "Defined in" links
are generated relative to. **Modules outside one of these directories
are not documented**. No `base_dir` should be inside another.
code_url_prefix: String or tuple of strings. The prefix to add to "Defined
in" paths. These are zipped with `base-dir`, to set the `defined_in`
path for each file. The defined in link for `{base_dir}/path/to/file` is
set to `{code_url_prefix}/path/to/file`.
search_hints: Bool. Include metadata search hints at the top of each file.
site_path: Path prefix in the "_toc.yaml"
private_map: DEPRECATED. Use `api_generator.doc_controls`, or pass a
filter to the `callbacks` argument. A
`{"module.path.to.object": ["names"]}` dictionary. Specific
aliases that should not be shown in the resulting docs.
visitor_cls: An option to override the default visitor class
`doc_generator_visitor.DocGeneratorVisitor`.
api_cache: Bool. Generate an api_cache file. This is used to easily add
api links for backticked symbols (like `tf.add`) in other docs.
callbacks: Additional callbacks passed to `traverse`. Executed between the
`PublicApiFilter` and the accumulator (`DocGeneratorVisitor`). The
primary use case for these is to filter the list of children (see:
`public_api.ApiFilter` for the required signature)
yaml_toc: Bool which decides whether to generate _toc.yaml file or not.
gen_redirects: Bool which decides whether to generate _redirects.yaml file
or not.
gen_report: If True, a report for the library is generated by linting the
docstrings of its public API symbols.
extra_docs: To add docs for a particular object instance set it's __doc__
attribute. For some classes (list, tuple, etc) __doc__ is not writable.
Pass those docs like: `extra_docs={id(obj): "docs"}`
page_builder_classes: An optional dict of `{ObjectType:Type[PageInfo]}`
for overriding the default page builder classes.
"""
self._root_title = root_title
self._py_modules = py_modules
self._short_name = py_modules[0][0]
self._py_module = py_modules[0][1]
if base_dir is None:
# Determine the base_dir for the module
base_dir = public_api.get_module_base_dirs(self._py_module)
else:
if isinstance(base_dir, (str, pathlib.Path)):
base_dir = (base_dir,)
base_dir = tuple(pathlib.Path(d) for d in base_dir)
self._base_dir = base_dir
if not self._base_dir:
raise ValueError('`base_dir` cannot be empty')
if isinstance(code_url_prefix, str) or code_url_prefix is None:
code_url_prefix = (code_url_prefix,)
self._code_url_prefix = tuple(code_url_prefix)
if not self._code_url_prefix:
raise ValueError('`code_url_prefix` cannot be empty')
if len(self._code_url_prefix) != len(base_dir):
raise ValueError('The `base_dir` list should have the same number of '
'elements as the `code_url_prefix` list (they get '
'zipped together).')
self._search_hints = search_hints
self._site_path = site_path
self._private_map = private_map or {}
self._visitor_cls = visitor_cls
self.api_cache = api_cache
if callbacks is None:
callbacks = []
self._callbacks = callbacks
self._yaml_toc = yaml_toc
self._gen_redirects = gen_redirects
self._gen_report = gen_report
self._extra_docs = extra_docs
self._page_builder_classes = page_builder_classes
def make_reference_resolver(self, visitor):
return reference_resolver_lib.ReferenceResolver.from_visitor(
visitor, py_module_names=[self._short_name])
def make_parser_config(self,
visitor: doc_generator_visitor.DocGeneratorVisitor):
reference_resolver = self.make_reference_resolver(visitor)
return config.ParserConfig(
reference_resolver=reference_resolver,
duplicates=visitor.duplicates,
duplicate_of=visitor.duplicate_of,
tree=visitor.tree,
index=visitor.index,
reverse_index=visitor.reverse_index,
path_tree=visitor.path_tree,
api_tree=visitor.api_tree,
base_dir=self._base_dir,
code_url_prefix=self._code_url_prefix)
def run_extraction(self):
"""Walks the module contents, returns an index of all visited objects.
The return value is an instance of `self._visitor_cls`, usually:
`doc_generator_visitor.DocGeneratorVisitor`
Returns:
"""
visitor = extract(
py_modules=self._py_modules,
base_dir=self._base_dir,
private_map=self._private_map,
visitor_cls=self._visitor_cls,
callbacks=self._callbacks)
# Write the api docs.
parser_config = self.make_parser_config(visitor)
return parser_config
def build(self, output_dir):
"""Build all the docs.
This produces python api docs:
* generated from `py_module`.
* written to '{output_dir}/api_docs/python/'
Args:
output_dir: Where to write the resulting docs.
"""
workdir = pathlib.Path(tempfile.mkdtemp())
# Extract the python api from the _py_modules
parser_config = self.run_extraction()
work_py_dir = workdir / 'api_docs/python'
write_docs(
output_dir=str(work_py_dir),
parser_config=parser_config,
yaml_toc=self._yaml_toc,
root_title=self._root_title,
root_module_name=self._short_name.replace('.', '/'),
search_hints=self._search_hints,
site_path=self._site_path,
gen_redirects=self._gen_redirects,
gen_report=self._gen_report,
extra_docs=self._extra_docs,
page_builder_classes=self._page_builder_classes,
)
if self.api_cache:
parser_config.reference_resolver.to_json_file(
str(work_py_dir / self._short_name.replace('.', '/') /
'_api_cache.json'))
os.makedirs(output_dir, exist_ok=True)
# Typical results are something like:
#
# out_dir/
# {short_name}/
# _redirects.yaml
# _toc.yaml
# api_report.pb
# index.md
# {short_name}.md
#
# Copy the top level files to the `{output_dir}/`, delete and replace the
# `{output_dir}/{short_name}/` directory.
for work_path in work_py_dir.glob('*'):
out_path = pathlib.Path(output_dir) / work_path.name
out_path.parent.mkdir(exist_ok=True, parents=True)
if work_path.is_file():
shutil.copy2(work_path, out_path)
elif work_path.is_dir():
shutil.rmtree(out_path, ignore_errors=True)
shutil.copytree(work_path, out_path)
|
StarcoderdataPython
|
4991833
|
<reponame>LoopSun/Django-starter<filename>neptune/conf/__init__.py
from .secret import *
from .base import *
from .debug import *
from .database import *
from .log import *
from .static import *
from .api import *
from .customer import *
from .email import *
from .celery import *
|
StarcoderdataPython
|
3220926
|
<filename>jmetal/util/aggregative_function.py
from abc import ABCMeta, abstractmethod
from jmetal.util.point import IdealPoint
"""
.. module:: aggregative_function
:platform: Unix, Windows
:synopsis: Implementation of aggregative (scalarizing) functions.
.. moduleauthor:: <NAME> <<EMAIL>>, <NAME> <<EMAIL>>
"""
class AggregativeFunction():
__metaclass__ = ABCMeta
@abstractmethod
def compute(self, vector, weight_vector):
pass
@abstractmethod
def update(self, vector):
pass
class WeightedSum(AggregativeFunction):
def compute(self, vector, weight_vector):
return sum(map(lambda x, y: x * y, vector, weight_vector))
def update(self, vector):
pass
class Tschebycheff(AggregativeFunction):
def __init__(self, dimension):
self.ideal_point = IdealPoint(dimension)
def compute(self, vector, weight_vector):
max_fun = -1.0e+30
for i in range(len(vector)):
diff = abs(vector[i] - self.ideal_point.point[i])
if weight_vector[i] == 0:
feval = 0.0001 * diff
else:
feval = diff * weight_vector[i]
if feval > max_fun:
max_fun = feval
return max_fun
def update(self, vector):
self.ideal_point.update(vector)
|
StarcoderdataPython
|
3269617
|
<filename>tester_web/__init__.py
from flask import Flask
from flask_cors import CORS
from tester_web.user.api import api
from tester_web.user.project import project
from tester_web.user.scripts import scripts
from tester_web.user.test_results import results
app = Flask(__name__)
CORS(app, supports_credentials=True)
from tester_web.user.login import auth
app.register_blueprint(auth)
app.register_blueprint(scripts)
app.register_blueprint(project)
app.register_blueprint(results)
app.register_blueprint(api)
|
StarcoderdataPython
|
121533
|
<reponame>zekroTJA/pytter
import os
import sys
from pytter import Client, Credentials
def main():
creds = Credentials(
consumer_key=os.environ.get('tw_consumer_key'),
consumer_secret=os.environ.get('tw_consumer_secret'),
access_token_key=os.environ.get('tw_access_token_key'),
access_token_secret=os.environ.get('tw_access_token_secret'))
client = Client(creds)
arg = sys.argv[1] if len(sys.argv) > 1 else 'zekroTJA'
id = None
name = None
if arg.startswith('id:'):
id = arg[3:]
else:
name = arg
user = client.user(screen_name=name, id=id)
print((
"\nUsername: {}\n" +
"Display Name: {}\n" +
"ID: {}\n" +
"Created: {}\n" +
"Followers: {}\n" +
"Follows: {}\n" +
"Tweets: {}\n" +
"Favorites: {}"
).format(
user.username,
user.name,
user.id,
user.created_at,
user.stats.followers_count,
user.stats.following_count,
user.stats.tweet_count,
user.stats.favorites_count
))
if __name__ == '__main__':
exit(main())
|
StarcoderdataPython
|
4815807
|
#!/usr/bin/python3
import subprocess
from dg_storage import *
from shutil import copyfile
import re
import tempfile
import multiprocessing
RE = re.compile(r'RE\[([^\]]+)\]')
def score_game(sgf):
"""
Returns the winner of the game in the given SGF file as
judged by `gnugo`.
"""
with tempfile.NamedTemporaryFile() as sgf_file:
sgf_file.write(sgf.encode())
sgf_file.flush()
# start-up our judge (gnugo)
gnugo = subprocess.Popen(
['/usr/games/gnugo',
'--score', 'aftermath',
'--chinese-rules', '--positional-superko',
'-l', sgf_file.name],
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL
)
try:
for line in gnugo.stdout:
line = line.decode('utf-8').strip()
if 'White wins by' in line: # White wins by 8.5 points
return 'W+' + line.split()[3]
elif 'Black wins by' in line: # Black wins by 32.5 points
return 'B+' + line.split()[3]
finally:
gnugo.communicate()
def clean_game(sgf):
""" Returns the given game after it has been _cleaned up_. """
winner = RE.search(sgf)
resign = winner and 'R' in winner.group(1).upper()
if winner and not resign:
winner = score_game(sgf)
if winner:
sgf = re.sub(RE, 'RE[' + winner + ']', sgf)
return sgf
# (1) download recent network
# (2) generate 1,000 fresh game records
if __name__ == '__main__':
best_network = copy_most_recent_network()
if best_network:
copyfile(best_network, '/app/dream_go.json')
game_records = ''
env = {}
proc = subprocess.Popen([
'/app/dream_go',
'--self-play', '1000',
'--num-rollout', '800',
'--num-threads', '64',
'--batch-size', '16'
], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
with multiprocessing.Pool() as pool:
def add_game_record(x):
global game_records
game_records += x
game_records += '\r\n'
# score the games as they get finished by the engine
for line in proc.stdout:
line = line.decode('utf-8').strip()
pool.apply_async(clean_game, [line], callback=add_game_record)
# wait for everything to finish
_stdout, _stderr = proc.communicate()
if proc.returncode != 0:
quit(proc.returncode)
pool.close()
pool.join()
upload_game_records(game_records, from_network=best_network, env=env, args=proc.args)
|
StarcoderdataPython
|
11230099
|
<gh_stars>0
# -*- coding: utf-8 -*-
from sys import argv
import socketserver
from json import loads
from time import strftime, time
from models import *
class ChatServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
daemon_threads = True
allow_reuse_address = True
users = []
messages = []
sockets = []
def __init__(self, server_address, RequestHandlerClass):
socketserver.TCPServer.__init__(self, server_address, RequestHandlerClass)
class RequestHandler(socketserver.BaseRequestHandler):
def handle(self):
self.socket = self.request
self.user = None
while True:
data = self.request.recv(2**20).decode('utf-8')
try:
json = loads(data)
except ValueError:
return
request = Request(**json)
request.sender = self.user
'''
Vi har nå et Request-objekt.
Vi vil bruke ChatServer sin funksjon,
`handle` til å håndtere denne.`
'''
if request.request == 'help':
self.send_help()
continue
if self.user:
self.handle_logged_in(request)
else:
self.handle_not_logged_in(request)
def handle_logged_in(self, request):
type = request.request
if type == 'msg':
self.msg(request)
elif type == 'logout':
self.logout()
elif type == 'names':
self.names()
# handle errors
elif type == 'login':
res = self.create_response(
'Server',
'error',
'You need to log out in order to log in.')
self.send_response(res)
def handle_not_logged_in(self, request):
type = request.request
if type == 'login':
self.login(request)
self.server.sockets.append(self.socket)
self.send_log()
else:
self.send_login_error()
def login(self, request):
'''Håndterer inloggingslogikk'''
# svarer klienten
self.user = User(request.content)
res = self.create_response(
'login',
'info',
self.user.username)
self.send_response(res)
# forteller alle om login
res = self.create_response(
'Server',
'message',
'User {} logged in.'.format(self.user.username))
self.broadcast_response(res)
self.server.users.append(self.user)
def logout(self):
'''Håndterer utloggingslogikk'''
# svarer klienten
res = self.create_response(
'logout',
'info',
self.user.username)
self.send_response(res)
# forteller alle om logout
res = self.create_response(
'Server',
'message',
'User {} logged out.'.format(self.user.username))
self.server.users.remove(self.user)
self.user = None
self.broadcast_response(res)
self.server.sockets.remove(self.socket)
def names(self):
'''Håndterer logikk rundt `names`'''
usernames = [u.username for u in self.server.users]
res = self.create_response(
'Server',
'info',
", ".join(usernames))
self.send_response(res)
def msg(self, request):
'''Håndterer logikk rundt å sende meldinger'''
res = self.create_response(self.user.username,
'message',
request.content)
self.broadcast_response(res)
def create_response(self, sender, response, content, time=None):
'''Lager et Response-objekt basert på parameterene'''
res = Response()
if time == None:
res.timestamp = strftime('%H:%M')
else: res.timestamp = time
res.sender = sender
res.response = response
res.content = content
return res
def send_response(self, res):
'''Sender responsen'''
d = res.__dict__
if res.response == 'history':
d['content'] = [c.__dict__ for c in d['content']]
json = to_json(res.__dict__)
self.socket.sendall(json.encode('utf-8'))
self.log(res)
def broadcast_response(self, res):
'''Sender responsen til alle som er logget inn'''
json = to_json(res.__dict__)
for s in self.server.sockets:
try:
s.sendall(json.encode('utf-8'))
except Exception as e:
self.log(e)
self.server.sockets.remove(s)
self.log(res)
self.server.messages.append(Message(res.sender,
res.content, res.timestamp))
def send_log(self):
msgs = [self.create_response(m.user, 'message', m.message, m.timestamp)\
for m in self.server.messages]
res = self.create_response('Server',
'history',
msgs)
self.send_response(res)
def send_help(self):
'''Lager og sender hjelpetekst'''
res = self.create_response('Server',
'info',
'Available commands are:\n' +
'\thelp\n' +
'\tlogin <username>\n' +
'If you are logged in:\n' +
'\tmsg <message>\n' +
'\tnames\n' +
'\tlogout')
self.send_response(res)
def send_login_error(self):
'''Sender feil når bruker ikke er logget inn, men
prøverå gjøre noe som krever innloggelse'''
res = self.create_response('Server',
'error',
'You must be logged in to do this.')
self.send_response(res)
def log(self, s):
s = str(s).replace('\n', "\n")
print('[LOG]: ' + s[:80])
if __name__ == '__main__':
if len(argv) < 2:
print('Usage: python server.py <port>')
else:
port = int(argv[1])
host = '0.0.0.0'
server = ChatServer((host, port), RequestHandler)
server.serve_forever()
|
StarcoderdataPython
|
8188703
|
<reponame>pfnet/chainerchem
import chainer
from chainer import functions
def shifted_softplus(x, beta=1, shift=0.5, threshold=20):
"""shifted softplus function, which holds f(0)=0.
Args:
x (Variable): Input variable
beta (float): Parameter :math:`\\beta`.
shift (float): Shift Parameter
threshold (float): threshold to avoid overflow
Returns:
output (Variable): Output variable whose shape is same with `x`
"""
xp = chainer.cuda.get_array_module(x)
cond = chainer.as_variable(x).array > threshold
x = functions.where(cond, x,
functions.softplus(x, beta=beta))
x += xp.log(shift)
return x
|
StarcoderdataPython
|
6400681
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
from paddlex.ppdet.core.workspace import register, create
from .meta_arch import BaseArch
from paddlex.ppdet.modeling.mot.utils import Detection, get_crops, scale_coords, clip_box
__all__ = ['DeepSORT']
@register
class DeepSORT(BaseArch):
"""
DeepSORT network, see https://arxiv.org/abs/1703.07402
Args:
detector (object): detector model instance
reid (object): reid model instance
tracker (object): tracker instance
"""
__category__ = 'architecture'
def __init__(self,
detector='YOLOv3',
reid='PCBPyramid',
tracker='DeepSORTTracker'):
super(DeepSORT, self).__init__()
self.detector = detector
self.reid = reid
self.tracker = tracker
@classmethod
def from_config(cls, cfg, *args, **kwargs):
if cfg['detector'] != 'None':
detector = create(cfg['detector'])
else:
detector = None
reid = create(cfg['reid'])
tracker = create(cfg['tracker'])
return {
"detector": detector,
"reid": reid,
"tracker": tracker,
}
def _forward(self):
assert 'ori_image' in self.inputs
load_dets = 'pred_bboxes' in self.inputs and 'pred_scores' in self.inputs
ori_image = self.inputs['ori_image']
input_shape = self.inputs['image'].shape[2:]
im_shape = self.inputs['im_shape']
scale_factor = self.inputs['scale_factor']
if self.detector and not load_dets:
outs = self.detector(self.inputs)
if outs['bbox_num'] > 0:
pred_bboxes = scale_coords(outs['bbox'][:, 2:], input_shape,
im_shape, scale_factor)
pred_scores = outs['bbox'][:, 1:2]
else:
pred_bboxes = []
pred_scores = []
else:
pred_bboxes = self.inputs['pred_bboxes']
pred_scores = self.inputs['pred_scores']
if len(pred_bboxes) > 0:
pred_bboxes = clip_box(pred_bboxes, input_shape, im_shape,
scale_factor)
bbox_tlwh = paddle.concat(
(pred_bboxes[:, 0:2],
pred_bboxes[:, 2:4] - pred_bboxes[:, 0:2] + 1),
axis=1)
crops, pred_scores = get_crops(
pred_bboxes, ori_image, pred_scores, w=64, h=192)
if len(crops) > 0:
features = self.reid(paddle.to_tensor(crops))
detections = [Detection(bbox_tlwh[i], conf, features[i])\
for i, conf in enumerate(pred_scores)]
else:
detections = []
else:
detections = []
self.tracker.predict()
online_targets = self.tracker.update(detections)
return online_targets
def get_pred(self):
return self._forward()
|
StarcoderdataPython
|
160931
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='EditHistory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content', models.TextField(null=True, verbose_name='\u4fee\u6539\u5185\u5bb9', blank=True)),
('is_deleted', models.BooleanField(default=False, verbose_name='\u662f\u5426\u5df2\u5220\u9664')),
('ctime', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('utime', models.DateTimeField(auto_now=True, verbose_name='\u66f4\u65b0\u65f6\u95f4')),
],
options={
'db_table': 'edithistory',
'verbose_name': '\u4fee\u6539\u8bb0\u5f55',
'verbose_name_plural': '\u4fee\u6539\u8bb0\u5f55',
},
),
migrations.CreateModel(
name='ErrorCode',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('error_name', models.CharField(max_length=50, null=True, verbose_name='\u9519\u8bef\u7801\u540d\u79f0', blank=True)),
('display_message', models.CharField(max_length=200, null=True, verbose_name='\u63d0\u793a\u4fe1\u606f', blank=True)),
('description', models.TextField(null=True, verbose_name='\u9519\u8bef\u7801\u63cf\u8ff0', blank=True)),
('remark', models.TextField(null=True, verbose_name='\u5907\u6ce8', blank=True)),
('is_active', models.BooleanField(default=True, verbose_name='\u662f\u5426\u542f\u7528')),
('is_deleted', models.BooleanField(default=False, verbose_name='\u662f\u5426\u5df2\u5220\u9664')),
('ctime', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('utime', models.DateTimeField(auto_now=True, verbose_name='\u66f4\u65b0\u65f6\u95f4')),
('author', models.ForeignKey(related_name='errorcode_author', verbose_name='\u521b\u5efa\u4eba', to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'db_table': 'errorcode',
'verbose_name': '\u9519\u8bef\u7801',
'verbose_name_plural': '\u9519\u8bef\u7801',
},
),
migrations.CreateModel(
name='Interface',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('interface_name', models.CharField(max_length=200, null=True, verbose_name='API\u63a5\u53e3\u540d\u79f0', blank=True)),
('description', models.TextField(null=True, verbose_name='API\u63a5\u53e3\u63cf\u8ff0', blank=True)),
('url', models.CharField(max_length=200, unique=True, null=True, verbose_name='\u8bf7\u6c42\u5730\u5740', blank=True)),
('method', models.SmallIntegerField(default=1, verbose_name='\u8bf7\u6c42\u7c7b\u578b', choices=[(1, b'GET'), (2, b'POST'), (3, b'PUT'), (4, b'DELETE')])),
('content_type', models.SmallIntegerField(default=1, verbose_name='Content type', choices=[(1, b'application/json'), (2, b'text/html'), (3, b'x-www-form-urlencode')])),
('remark', models.TextField(null=True, verbose_name='\u5907\u6ce8', blank=True)),
('is_active', models.BooleanField(default=True, verbose_name='\u662f\u5426\u542f\u7528')),
('is_deleted', models.BooleanField(default=False, verbose_name='\u662f\u5426\u5df2\u5220\u9664')),
('ctime', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('utime', models.DateTimeField(auto_now=True, verbose_name='\u66f4\u65b0\u65f6\u95f4')),
('author', models.ForeignKey(related_name='interface_author', verbose_name='\u521b\u5efa\u4eba', to=settings.AUTH_USER_MODEL, null=True)),
('modifier', models.ForeignKey(related_name='interface_modifier', verbose_name='\u4fee\u6539\u4eba', to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'db_table': 'interface',
'verbose_name': 'API\u5e94\u7528\u63a5\u53e3',
'verbose_name_plural': 'API\u5e94\u7528\u63a5\u53e3',
},
),
migrations.CreateModel(
name='LockInfo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_locked', models.BooleanField(default=False, verbose_name='\u662f\u5426\u88ab\u9501')),
('is_deleted', models.BooleanField(default=False, verbose_name='\u662f\u5426\u5df2\u5220\u9664')),
('ctime', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('utime', models.DateTimeField(auto_now=True, verbose_name='\u66f4\u65b0\u65f6\u95f4')),
('interface', models.ForeignKey(related_name='lockinfo', verbose_name='API\u5e94\u7528\u63a5\u53e3', to='atm.Interface', null=True)),
('lock_user', models.ForeignKey(related_name='lockinfo', verbose_name='\u9501\u8868\u4eba', to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'db_table': 'lockinfo',
'verbose_name': '\u9501\u63a5\u53e3\u4fe1\u606f',
'verbose_name_plural': '\u9501\u63a5\u53e3\u4fe1\u606f',
},
),
migrations.CreateModel(
name='MetaData',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('position', models.SmallIntegerField(default=1, verbose_name='Postion', choices=[(1, b'ReqHeader'), (2, b'ReqPath'), (3, b'ReqQueryString'), (4, b'ReqForm'), (5, b'ReqBody'), (6, b'ReqCookie'), (7, b'RespHeader'), (8, b'RespBody ')])),
('metadata_name', models.CharField(max_length=200, null=True, verbose_name='\u5143\u6570\u636e\u540d\u79f0', blank=True)),
('data', models.TextField(null=True, verbose_name='\u6570\u636e\u503c', blank=True)),
('remark', models.TextField(null=True, verbose_name='\u5907\u6ce8', blank=True)),
('is_active', models.BooleanField(default=True, verbose_name='\u662f\u5426\u542f\u7528')),
('is_deleted', models.BooleanField(default=False, verbose_name='\u662f\u5426\u5df2\u5220\u9664')),
('ctime', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('utime', models.DateTimeField(auto_now=True, verbose_name='\u66f4\u65b0\u65f6\u95f4')),
('author', models.ForeignKey(related_name='metadata_author', verbose_name='\u521b\u5efa\u4eba', to=settings.AUTH_USER_MODEL, null=True)),
('interface', models.ForeignKey(related_name='metadata', verbose_name='API\u5e94\u7528\u63a5\u53e3', to='atm.Interface', null=True)),
('modifier', models.ForeignKey(related_name='metadata_modifier', verbose_name='\u4fee\u6539\u4eba', to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'db_table': 'metadata',
'verbose_name': '\u5143\u6570\u636e',
'verbose_name_plural': '\u5143\u6570\u636e',
},
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('project_name', models.CharField(max_length=200, null=True, verbose_name='\u9879\u76ee\u540d\u79f0', blank=True)),
('description', models.TextField(null=True, verbose_name='\u9879\u76ee\u63cf\u8ff0', blank=True)),
('host', models.CharField(max_length=200, null=True, verbose_name='Host', blank=True)),
('remark', models.TextField(null=True, verbose_name='\u5907\u6ce8', blank=True)),
('is_active', models.BooleanField(default=True, verbose_name='\u662f\u5426\u542f\u7528')),
('is_deleted', models.BooleanField(default=False, verbose_name='\u662f\u5426\u5df2\u5220\u9664')),
('ctime', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('utime', models.DateTimeField(auto_now=True, verbose_name='\u66f4\u65b0\u65f6\u95f4')),
('author', models.ForeignKey(related_name='project_author', verbose_name='\u521b\u5efa\u4eba', to=settings.AUTH_USER_MODEL, null=True)),
('modifier', models.ForeignKey(related_name='project_modifier', verbose_name='\u4fee\u6539\u4eba', to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'db_table': 'project',
'verbose_name': '\u9879\u76ee',
'verbose_name_plural': '\u9879\u76ee',
},
),
migrations.CreateModel(
name='ProjectMember',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(default=True, verbose_name='\u662f\u5426\u542f\u7528')),
('is_deleted', models.BooleanField(default=False, verbose_name='\u662f\u5426\u5df2\u5220\u9664')),
('ctime', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('utime', models.DateTimeField(auto_now=True, verbose_name='\u66f4\u65b0\u65f6\u95f4')),
('project', models.ForeignKey(related_name='projectmember', verbose_name='\u9879\u76ee', to='atm.Project', null=True)),
('user', models.ForeignKey(related_name='projectmember', verbose_name='\u6210\u5458', to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'db_table': 'projectmember',
'verbose_name': '\u9879\u76ee\u4eba\u5458',
'verbose_name_plural': '\u9879\u76ee\u4eba\u5458',
},
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('team_name', models.CharField(max_length=200, unique=True, null=True, verbose_name='\u56e2\u961f\u540d\u79f0', blank=True)),
('description', models.TextField(null=True, verbose_name='\u56e2\u961f\u63cf\u8ff0', blank=True)),
('remark', models.TextField(null=True, verbose_name='\u5907\u6ce8', blank=True)),
('is_active', models.BooleanField(default=True, verbose_name='\u662f\u5426\u542f\u7528')),
('is_deleted', models.BooleanField(default=False, verbose_name='\u662f\u5426\u5df2\u5220\u9664')),
('ctime', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('utime', models.DateTimeField(auto_now=True, verbose_name='\u66f4\u65b0\u65f6\u95f4')),
('author', models.ForeignKey(related_name='team_author', verbose_name='\u521b\u5efa\u4eba', to=settings.AUTH_USER_MODEL, null=True)),
('modifier', models.ForeignKey(related_name='team_modifier', verbose_name='\u4fee\u6539\u4eba', to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'db_table': 'team',
'verbose_name': '\u56e2\u961f',
'verbose_name_plural': '\u56e2\u961f',
},
),
migrations.AddField(
model_name='project',
name='team',
field=models.ForeignKey(related_name='project', verbose_name='\u56e2\u961f', to='atm.Team', null=True),
),
migrations.AddField(
model_name='interface',
name='project',
field=models.ForeignKey(related_name='interface', verbose_name='\u9879\u76ee', to='atm.Project', null=True),
),
migrations.AddField(
model_name='errorcode',
name='interface',
field=models.ForeignKey(related_name='errorcode', verbose_name='API\u5e94\u7528\u63a5\u53e3', to='atm.Interface', null=True),
),
migrations.AddField(
model_name='errorcode',
name='modifier',
field=models.ForeignKey(related_name='errorcode_modifier', verbose_name='\u4fee\u6539\u4eba', to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AddField(
model_name='edithistory',
name='interface',
field=models.ForeignKey(related_name='edithistory', verbose_name='API\u5e94\u7528\u63a5\u53e3', to='atm.Interface', null=True),
),
migrations.AddField(
model_name='edithistory',
name='modifier',
field=models.ForeignKey(related_name='edithistory', verbose_name='\u4fee\u6539\u4eba', to=settings.AUTH_USER_MODEL, null=True),
),
]
|
StarcoderdataPython
|
3515758
|
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
# 实例化一个启动参数对象
chrome_options = Options()
# 设置浏览器窗口大小
chrome_options.add_argument('--window-size=1366, 768')
# 启动浏览器
driver = webdriver.Chrome(chrome_options=chrome_options)
url = 'https://www.geekdigging.com/'
driver.get(url)
title = driver.find_element_by_xpath('//*[@id="text-4"]/div/div/div[1]/div[2]/a')
print(title)
# 获取属性信息
print(title.get_attribute('href'))
# 获取文本信息
print(title.text)
# 获取位置
print(title.location)
# 获取大小
print(title.size)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.