repository_name
stringlengths 7
107
| function_path
stringlengths 4
190
| function_identifier
stringlengths 1
236
| language
stringclasses 1
value | function
stringlengths 9
647k
| docstring
stringlengths 5
488k
| function_url
stringlengths 71
285
| context
stringlengths 0
2.51M
| license
stringclasses 5
values |
---|---|---|---|---|---|---|---|---|
docusign/docusign-python-client
|
docusign_esign/models/draw.py
|
Draw.conditional_parent_value
|
python
|
def conditional_parent_value(self):
return self._conditional_parent_value
|
Gets the conditional_parent_value of this Draw. # noqa: E501
For conditional fields, this is the value of the parent tab that controls the tab's visibility. If the parent tab is a Checkbox, Radio button, Optional Signature, or Optional Initial use \"on\" as the value to show that the parent tab is active. # noqa: E501
:return: The conditional_parent_value of this Draw. # noqa: E501
:rtype: str
|
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/draw.py#L870-L878
|
import pprint
import re
import six
from docusign_esign.client.configuration import Configuration
class Draw(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'allow_signer_upload': 'str',
'anchor_allow_white_space_in_characters': 'str',
'anchor_allow_white_space_in_characters_metadata': 'PropertyMetadata',
'anchor_case_sensitive': 'str',
'anchor_case_sensitive_metadata': 'PropertyMetadata',
'anchor_horizontal_alignment': 'str',
'anchor_horizontal_alignment_metadata': 'PropertyMetadata',
'anchor_ignore_if_not_present': 'str',
'anchor_ignore_if_not_present_metadata': 'PropertyMetadata',
'anchor_match_whole_word': 'str',
'anchor_match_whole_word_metadata': 'PropertyMetadata',
'anchor_string': 'str',
'anchor_string_metadata': 'PropertyMetadata',
'anchor_tab_processor_version': 'str',
'anchor_tab_processor_version_metadata': 'PropertyMetadata',
'anchor_units': 'str',
'anchor_units_metadata': 'PropertyMetadata',
'anchor_x_offset': 'str',
'anchor_x_offset_metadata': 'PropertyMetadata',
'anchor_y_offset': 'str',
'anchor_y_offset_metadata': 'PropertyMetadata',
'conditional_parent_label': 'str',
'conditional_parent_label_metadata': 'PropertyMetadata',
'conditional_parent_value': 'str',
'conditional_parent_value_metadata': 'PropertyMetadata',
'custom_tab_id': 'str',
'custom_tab_id_metadata': 'PropertyMetadata',
'document_id': 'str',
'document_id_metadata': 'PropertyMetadata',
'error_details': 'ErrorDetails',
'form_order': 'str',
'form_order_metadata': 'PropertyMetadata',
'form_page_label': 'str',
'form_page_label_metadata': 'PropertyMetadata',
'form_page_number': 'str',
'form_page_number_metadata': 'PropertyMetadata',
'height': 'str',
'height_metadata': 'PropertyMetadata',
'locked': 'str',
'locked_metadata': 'PropertyMetadata',
'merge_field': 'MergeField',
'merge_field_xml': 'str',
'page_number': 'str',
'page_number_metadata': 'PropertyMetadata',
'recipient_id': 'str',
'recipient_id_guid': 'str',
'recipient_id_guid_metadata': 'PropertyMetadata',
'recipient_id_metadata': 'PropertyMetadata',
'required': 'str',
'required_metadata': 'PropertyMetadata',
'shared': 'str',
'shared_metadata': 'PropertyMetadata',
'smart_contract_information': 'SmartContractInformation',
'source': 'str',
'status': 'str',
'status_metadata': 'PropertyMetadata',
'tab_group_labels': 'list[str]',
'tab_group_labels_metadata': 'PropertyMetadata',
'tab_id': 'str',
'tab_id_metadata': 'PropertyMetadata',
'tab_label_metadata': 'PropertyMetadata',
'tab_order': 'str',
'tab_order_metadata': 'PropertyMetadata',
'tab_type': 'str',
'tab_type_metadata': 'PropertyMetadata',
'template_locked': 'str',
'template_locked_metadata': 'PropertyMetadata',
'template_required': 'str',
'template_required_metadata': 'PropertyMetadata',
'tooltip': 'str',
'tool_tip_metadata': 'PropertyMetadata',
'use_background_as_canvas': 'str',
'width': 'str',
'width_metadata': 'PropertyMetadata',
'x_position': 'str',
'x_position_metadata': 'PropertyMetadata',
'y_position': 'str',
'y_position_metadata': 'PropertyMetadata'
}
attribute_map = {
'allow_signer_upload': 'allowSignerUpload',
'anchor_allow_white_space_in_characters': 'anchorAllowWhiteSpaceInCharacters',
'anchor_allow_white_space_in_characters_metadata': 'anchorAllowWhiteSpaceInCharactersMetadata',
'anchor_case_sensitive': 'anchorCaseSensitive',
'anchor_case_sensitive_metadata': 'anchorCaseSensitiveMetadata',
'anchor_horizontal_alignment': 'anchorHorizontalAlignment',
'anchor_horizontal_alignment_metadata': 'anchorHorizontalAlignmentMetadata',
'anchor_ignore_if_not_present': 'anchorIgnoreIfNotPresent',
'anchor_ignore_if_not_present_metadata': 'anchorIgnoreIfNotPresentMetadata',
'anchor_match_whole_word': 'anchorMatchWholeWord',
'anchor_match_whole_word_metadata': 'anchorMatchWholeWordMetadata',
'anchor_string': 'anchorString',
'anchor_string_metadata': 'anchorStringMetadata',
'anchor_tab_processor_version': 'anchorTabProcessorVersion',
'anchor_tab_processor_version_metadata': 'anchorTabProcessorVersionMetadata',
'anchor_units': 'anchorUnits',
'anchor_units_metadata': 'anchorUnitsMetadata',
'anchor_x_offset': 'anchorXOffset',
'anchor_x_offset_metadata': 'anchorXOffsetMetadata',
'anchor_y_offset': 'anchorYOffset',
'anchor_y_offset_metadata': 'anchorYOffsetMetadata',
'conditional_parent_label': 'conditionalParentLabel',
'conditional_parent_label_metadata': 'conditionalParentLabelMetadata',
'conditional_parent_value': 'conditionalParentValue',
'conditional_parent_value_metadata': 'conditionalParentValueMetadata',
'custom_tab_id': 'customTabId',
'custom_tab_id_metadata': 'customTabIdMetadata',
'document_id': 'documentId',
'document_id_metadata': 'documentIdMetadata',
'error_details': 'errorDetails',
'form_order': 'formOrder',
'form_order_metadata': 'formOrderMetadata',
'form_page_label': 'formPageLabel',
'form_page_label_metadata': 'formPageLabelMetadata',
'form_page_number': 'formPageNumber',
'form_page_number_metadata': 'formPageNumberMetadata',
'height': 'height',
'height_metadata': 'heightMetadata',
'locked': 'locked',
'locked_metadata': 'lockedMetadata',
'merge_field': 'mergeField',
'merge_field_xml': 'mergeFieldXml',
'page_number': 'pageNumber',
'page_number_metadata': 'pageNumberMetadata',
'recipient_id': 'recipientId',
'recipient_id_guid': 'recipientIdGuid',
'recipient_id_guid_metadata': 'recipientIdGuidMetadata',
'recipient_id_metadata': 'recipientIdMetadata',
'required': 'required',
'required_metadata': 'requiredMetadata',
'shared': 'shared',
'shared_metadata': 'sharedMetadata',
'smart_contract_information': 'smartContractInformation',
'source': 'source',
'status': 'status',
'status_metadata': 'statusMetadata',
'tab_group_labels': 'tabGroupLabels',
'tab_group_labels_metadata': 'tabGroupLabelsMetadata',
'tab_id': 'tabId',
'tab_id_metadata': 'tabIdMetadata',
'tab_label_metadata': 'tabLabelMetadata',
'tab_order': 'tabOrder',
'tab_order_metadata': 'tabOrderMetadata',
'tab_type': 'tabType',
'tab_type_metadata': 'tabTypeMetadata',
'template_locked': 'templateLocked',
'template_locked_metadata': 'templateLockedMetadata',
'template_required': 'templateRequired',
'template_required_metadata': 'templateRequiredMetadata',
'tooltip': 'tooltip',
'tool_tip_metadata': 'toolTipMetadata',
'use_background_as_canvas': 'useBackgroundAsCanvas',
'width': 'width',
'width_metadata': 'widthMetadata',
'x_position': 'xPosition',
'x_position_metadata': 'xPositionMetadata',
'y_position': 'yPosition',
'y_position_metadata': 'yPositionMetadata'
}
def __init__(self, _configuration=None, **kwargs):
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._allow_signer_upload = None
self._anchor_allow_white_space_in_characters = None
self._anchor_allow_white_space_in_characters_metadata = None
self._anchor_case_sensitive = None
self._anchor_case_sensitive_metadata = None
self._anchor_horizontal_alignment = None
self._anchor_horizontal_alignment_metadata = None
self._anchor_ignore_if_not_present = None
self._anchor_ignore_if_not_present_metadata = None
self._anchor_match_whole_word = None
self._anchor_match_whole_word_metadata = None
self._anchor_string = None
self._anchor_string_metadata = None
self._anchor_tab_processor_version = None
self._anchor_tab_processor_version_metadata = None
self._anchor_units = None
self._anchor_units_metadata = None
self._anchor_x_offset = None
self._anchor_x_offset_metadata = None
self._anchor_y_offset = None
self._anchor_y_offset_metadata = None
self._conditional_parent_label = None
self._conditional_parent_label_metadata = None
self._conditional_parent_value = None
self._conditional_parent_value_metadata = None
self._custom_tab_id = None
self._custom_tab_id_metadata = None
self._document_id = None
self._document_id_metadata = None
self._error_details = None
self._form_order = None
self._form_order_metadata = None
self._form_page_label = None
self._form_page_label_metadata = None
self._form_page_number = None
self._form_page_number_metadata = None
self._height = None
self._height_metadata = None
self._locked = None
self._locked_metadata = None
self._merge_field = None
self._merge_field_xml = None
self._page_number = None
self._page_number_metadata = None
self._recipient_id = None
self._recipient_id_guid = None
self._recipient_id_guid_metadata = None
self._recipient_id_metadata = None
self._required = None
self._required_metadata = None
self._shared = None
self._shared_metadata = None
self._smart_contract_information = None
self._source = None
self._status = None
self._status_metadata = None
self._tab_group_labels = None
self._tab_group_labels_metadata = None
self._tab_id = None
self._tab_id_metadata = None
self._tab_label_metadata = None
self._tab_order = None
self._tab_order_metadata = None
self._tab_type = None
self._tab_type_metadata = None
self._template_locked = None
self._template_locked_metadata = None
self._template_required = None
self._template_required_metadata = None
self._tooltip = None
self._tool_tip_metadata = None
self._use_background_as_canvas = None
self._width = None
self._width_metadata = None
self._x_position = None
self._x_position_metadata = None
self._y_position = None
self._y_position_metadata = None
self.discriminator = None
setattr(self, "_{}".format('allow_signer_upload'), kwargs.get('allow_signer_upload', None))
setattr(self, "_{}".format('anchor_allow_white_space_in_characters'), kwargs.get('anchor_allow_white_space_in_characters', None))
setattr(self, "_{}".format('anchor_allow_white_space_in_characters_metadata'), kwargs.get('anchor_allow_white_space_in_characters_metadata', None))
setattr(self, "_{}".format('anchor_case_sensitive'), kwargs.get('anchor_case_sensitive', None))
setattr(self, "_{}".format('anchor_case_sensitive_metadata'), kwargs.get('anchor_case_sensitive_metadata', None))
setattr(self, "_{}".format('anchor_horizontal_alignment'), kwargs.get('anchor_horizontal_alignment', None))
setattr(self, "_{}".format('anchor_horizontal_alignment_metadata'), kwargs.get('anchor_horizontal_alignment_metadata', None))
setattr(self, "_{}".format('anchor_ignore_if_not_present'), kwargs.get('anchor_ignore_if_not_present', None))
setattr(self, "_{}".format('anchor_ignore_if_not_present_metadata'), kwargs.get('anchor_ignore_if_not_present_metadata', None))
setattr(self, "_{}".format('anchor_match_whole_word'), kwargs.get('anchor_match_whole_word', None))
setattr(self, "_{}".format('anchor_match_whole_word_metadata'), kwargs.get('anchor_match_whole_word_metadata', None))
setattr(self, "_{}".format('anchor_string'), kwargs.get('anchor_string', None))
setattr(self, "_{}".format('anchor_string_metadata'), kwargs.get('anchor_string_metadata', None))
setattr(self, "_{}".format('anchor_tab_processor_version'), kwargs.get('anchor_tab_processor_version', None))
setattr(self, "_{}".format('anchor_tab_processor_version_metadata'), kwargs.get('anchor_tab_processor_version_metadata', None))
setattr(self, "_{}".format('anchor_units'), kwargs.get('anchor_units', None))
setattr(self, "_{}".format('anchor_units_metadata'), kwargs.get('anchor_units_metadata', None))
setattr(self, "_{}".format('anchor_x_offset'), kwargs.get('anchor_x_offset', None))
setattr(self, "_{}".format('anchor_x_offset_metadata'), kwargs.get('anchor_x_offset_metadata', None))
setattr(self, "_{}".format('anchor_y_offset'), kwargs.get('anchor_y_offset', None))
setattr(self, "_{}".format('anchor_y_offset_metadata'), kwargs.get('anchor_y_offset_metadata', None))
setattr(self, "_{}".format('conditional_parent_label'), kwargs.get('conditional_parent_label', None))
setattr(self, "_{}".format('conditional_parent_label_metadata'), kwargs.get('conditional_parent_label_metadata', None))
setattr(self, "_{}".format('conditional_parent_value'), kwargs.get('conditional_parent_value', None))
setattr(self, "_{}".format('conditional_parent_value_metadata'), kwargs.get('conditional_parent_value_metadata', None))
setattr(self, "_{}".format('custom_tab_id'), kwargs.get('custom_tab_id', None))
setattr(self, "_{}".format('custom_tab_id_metadata'), kwargs.get('custom_tab_id_metadata', None))
setattr(self, "_{}".format('document_id'), kwargs.get('document_id', None))
setattr(self, "_{}".format('document_id_metadata'), kwargs.get('document_id_metadata', None))
setattr(self, "_{}".format('error_details'), kwargs.get('error_details', None))
setattr(self, "_{}".format('form_order'), kwargs.get('form_order', None))
setattr(self, "_{}".format('form_order_metadata'), kwargs.get('form_order_metadata', None))
setattr(self, "_{}".format('form_page_label'), kwargs.get('form_page_label', None))
setattr(self, "_{}".format('form_page_label_metadata'), kwargs.get('form_page_label_metadata', None))
setattr(self, "_{}".format('form_page_number'), kwargs.get('form_page_number', None))
setattr(self, "_{}".format('form_page_number_metadata'), kwargs.get('form_page_number_metadata', None))
setattr(self, "_{}".format('height'), kwargs.get('height', None))
setattr(self, "_{}".format('height_metadata'), kwargs.get('height_metadata', None))
setattr(self, "_{}".format('locked'), kwargs.get('locked', None))
setattr(self, "_{}".format('locked_metadata'), kwargs.get('locked_metadata', None))
setattr(self, "_{}".format('merge_field'), kwargs.get('merge_field', None))
setattr(self, "_{}".format('merge_field_xml'), kwargs.get('merge_field_xml', None))
setattr(self, "_{}".format('page_number'), kwargs.get('page_number', None))
setattr(self, "_{}".format('page_number_metadata'), kwargs.get('page_number_metadata', None))
setattr(self, "_{}".format('recipient_id'), kwargs.get('recipient_id', None))
setattr(self, "_{}".format('recipient_id_guid'), kwargs.get('recipient_id_guid', None))
setattr(self, "_{}".format('recipient_id_guid_metadata'), kwargs.get('recipient_id_guid_metadata', None))
setattr(self, "_{}".format('recipient_id_metadata'), kwargs.get('recipient_id_metadata', None))
setattr(self, "_{}".format('required'), kwargs.get('required', None))
setattr(self, "_{}".format('required_metadata'), kwargs.get('required_metadata', None))
setattr(self, "_{}".format('shared'), kwargs.get('shared', None))
setattr(self, "_{}".format('shared_metadata'), kwargs.get('shared_metadata', None))
setattr(self, "_{}".format('smart_contract_information'), kwargs.get('smart_contract_information', None))
setattr(self, "_{}".format('source'), kwargs.get('source', None))
setattr(self, "_{}".format('status'), kwargs.get('status', None))
setattr(self, "_{}".format('status_metadata'), kwargs.get('status_metadata', None))
setattr(self, "_{}".format('tab_group_labels'), kwargs.get('tab_group_labels', None))
setattr(self, "_{}".format('tab_group_labels_metadata'), kwargs.get('tab_group_labels_metadata', None))
setattr(self, "_{}".format('tab_id'), kwargs.get('tab_id', None))
setattr(self, "_{}".format('tab_id_metadata'), kwargs.get('tab_id_metadata', None))
setattr(self, "_{}".format('tab_label_metadata'), kwargs.get('tab_label_metadata', None))
setattr(self, "_{}".format('tab_order'), kwargs.get('tab_order', None))
setattr(self, "_{}".format('tab_order_metadata'), kwargs.get('tab_order_metadata', None))
setattr(self, "_{}".format('tab_type'), kwargs.get('tab_type', None))
setattr(self, "_{}".format('tab_type_metadata'), kwargs.get('tab_type_metadata', None))
setattr(self, "_{}".format('template_locked'), kwargs.get('template_locked', None))
setattr(self, "_{}".format('template_locked_metadata'), kwargs.get('template_locked_metadata', None))
setattr(self, "_{}".format('template_required'), kwargs.get('template_required', None))
setattr(self, "_{}".format('template_required_metadata'), kwargs.get('template_required_metadata', None))
setattr(self, "_{}".format('tooltip'), kwargs.get('tooltip', None))
setattr(self, "_{}".format('tool_tip_metadata'), kwargs.get('tool_tip_metadata', None))
setattr(self, "_{}".format('use_background_as_canvas'), kwargs.get('use_background_as_canvas', None))
setattr(self, "_{}".format('width'), kwargs.get('width', None))
setattr(self, "_{}".format('width_metadata'), kwargs.get('width_metadata', None))
setattr(self, "_{}".format('x_position'), kwargs.get('x_position', None))
setattr(self, "_{}".format('x_position_metadata'), kwargs.get('x_position_metadata', None))
setattr(self, "_{}".format('y_position'), kwargs.get('y_position', None))
setattr(self, "_{}".format('y_position_metadata'), kwargs.get('y_position_metadata', None))
@property
def allow_signer_upload(self):
return self._allow_signer_upload
@allow_signer_upload.setter
def allow_signer_upload(self, allow_signer_upload):
self._allow_signer_upload = allow_signer_upload
@property
def anchor_allow_white_space_in_characters(self):
return self._anchor_allow_white_space_in_characters
@anchor_allow_white_space_in_characters.setter
def anchor_allow_white_space_in_characters(self, anchor_allow_white_space_in_characters):
self._anchor_allow_white_space_in_characters = anchor_allow_white_space_in_characters
@property
def anchor_allow_white_space_in_characters_metadata(self):
return self._anchor_allow_white_space_in_characters_metadata
@anchor_allow_white_space_in_characters_metadata.setter
def anchor_allow_white_space_in_characters_metadata(self, anchor_allow_white_space_in_characters_metadata):
self._anchor_allow_white_space_in_characters_metadata = anchor_allow_white_space_in_characters_metadata
@property
def anchor_case_sensitive(self):
return self._anchor_case_sensitive
@anchor_case_sensitive.setter
def anchor_case_sensitive(self, anchor_case_sensitive):
self._anchor_case_sensitive = anchor_case_sensitive
@property
def anchor_case_sensitive_metadata(self):
return self._anchor_case_sensitive_metadata
@anchor_case_sensitive_metadata.setter
def anchor_case_sensitive_metadata(self, anchor_case_sensitive_metadata):
self._anchor_case_sensitive_metadata = anchor_case_sensitive_metadata
@property
def anchor_horizontal_alignment(self):
return self._anchor_horizontal_alignment
@anchor_horizontal_alignment.setter
def anchor_horizontal_alignment(self, anchor_horizontal_alignment):
self._anchor_horizontal_alignment = anchor_horizontal_alignment
@property
def anchor_horizontal_alignment_metadata(self):
return self._anchor_horizontal_alignment_metadata
@anchor_horizontal_alignment_metadata.setter
def anchor_horizontal_alignment_metadata(self, anchor_horizontal_alignment_metadata):
self._anchor_horizontal_alignment_metadata = anchor_horizontal_alignment_metadata
@property
def anchor_ignore_if_not_present(self):
return self._anchor_ignore_if_not_present
@anchor_ignore_if_not_present.setter
def anchor_ignore_if_not_present(self, anchor_ignore_if_not_present):
self._anchor_ignore_if_not_present = anchor_ignore_if_not_present
@property
def anchor_ignore_if_not_present_metadata(self):
return self._anchor_ignore_if_not_present_metadata
@anchor_ignore_if_not_present_metadata.setter
def anchor_ignore_if_not_present_metadata(self, anchor_ignore_if_not_present_metadata):
self._anchor_ignore_if_not_present_metadata = anchor_ignore_if_not_present_metadata
@property
def anchor_match_whole_word(self):
return self._anchor_match_whole_word
@anchor_match_whole_word.setter
def anchor_match_whole_word(self, anchor_match_whole_word):
self._anchor_match_whole_word = anchor_match_whole_word
@property
def anchor_match_whole_word_metadata(self):
return self._anchor_match_whole_word_metadata
@anchor_match_whole_word_metadata.setter
def anchor_match_whole_word_metadata(self, anchor_match_whole_word_metadata):
self._anchor_match_whole_word_metadata = anchor_match_whole_word_metadata
@property
def anchor_string(self):
return self._anchor_string
@anchor_string.setter
def anchor_string(self, anchor_string):
self._anchor_string = anchor_string
@property
def anchor_string_metadata(self):
return self._anchor_string_metadata
@anchor_string_metadata.setter
def anchor_string_metadata(self, anchor_string_metadata):
self._anchor_string_metadata = anchor_string_metadata
@property
def anchor_tab_processor_version(self):
return self._anchor_tab_processor_version
@anchor_tab_processor_version.setter
def anchor_tab_processor_version(self, anchor_tab_processor_version):
self._anchor_tab_processor_version = anchor_tab_processor_version
@property
def anchor_tab_processor_version_metadata(self):
return self._anchor_tab_processor_version_metadata
@anchor_tab_processor_version_metadata.setter
def anchor_tab_processor_version_metadata(self, anchor_tab_processor_version_metadata):
self._anchor_tab_processor_version_metadata = anchor_tab_processor_version_metadata
@property
def anchor_units(self):
return self._anchor_units
@anchor_units.setter
def anchor_units(self, anchor_units):
self._anchor_units = anchor_units
@property
def anchor_units_metadata(self):
return self._anchor_units_metadata
@anchor_units_metadata.setter
def anchor_units_metadata(self, anchor_units_metadata):
self._anchor_units_metadata = anchor_units_metadata
@property
def anchor_x_offset(self):
return self._anchor_x_offset
@anchor_x_offset.setter
def anchor_x_offset(self, anchor_x_offset):
self._anchor_x_offset = anchor_x_offset
@property
def anchor_x_offset_metadata(self):
return self._anchor_x_offset_metadata
@anchor_x_offset_metadata.setter
def anchor_x_offset_metadata(self, anchor_x_offset_metadata):
self._anchor_x_offset_metadata = anchor_x_offset_metadata
@property
def anchor_y_offset(self):
return self._anchor_y_offset
@anchor_y_offset.setter
def anchor_y_offset(self, anchor_y_offset):
self._anchor_y_offset = anchor_y_offset
@property
def anchor_y_offset_metadata(self):
return self._anchor_y_offset_metadata
@anchor_y_offset_metadata.setter
def anchor_y_offset_metadata(self, anchor_y_offset_metadata):
self._anchor_y_offset_metadata = anchor_y_offset_metadata
@property
def conditional_parent_label(self):
return self._conditional_parent_label
@conditional_parent_label.setter
def conditional_parent_label(self, conditional_parent_label):
self._conditional_parent_label = conditional_parent_label
@property
def conditional_parent_label_metadata(self):
return self._conditional_parent_label_metadata
@conditional_parent_label_metadata.setter
def conditional_parent_label_metadata(self, conditional_parent_label_metadata):
self._conditional_parent_label_metadata = conditional_parent_label_metadata
@property
|
MIT License
|
zigpy/zha-device-handlers
|
tests/test_xiaomi.py
|
raw_device
|
python
|
def raw_device():
ieee = t.EUI64.convert("11:22:33:44:55:66:77:88")
device = zigpy.device.Device(mock.MagicMock(), ieee, 0x1234)
with mock.patch.object(device, "cancel_initialization"):
yield device
|
Raw device fixture.
|
https://github.com/zigpy/zha-device-handlers/blob/869b78d33d1127b2db2c28a6507dfbb625c3c4d1/tests/test_xiaomi.py#L115-L121
|
import asyncio
from unittest import mock
import pytest
import zigpy.device
import zigpy.types as t
from zigpy.zcl import foundation
import zhaquirks
from zhaquirks.const import (
DEVICE_TYPE,
ENDPOINTS,
INPUT_CLUSTERS,
MANUFACTURER,
MODEL,
NODE_DESCRIPTOR,
OFF,
ON,
OUTPUT_CLUSTERS,
PROFILE_ID,
ZONE_STATE,
)
from zhaquirks.xiaomi import (
LUMI,
XIAOMI_NODE_DESC,
BasicCluster,
XiaomiCustomDevice,
XiaomiQuickInitDevice,
handle_quick_init,
)
import zhaquirks.xiaomi.aqara.motion_aq2
import zhaquirks.xiaomi.aqara.motion_aq2b
import zhaquirks.xiaomi.mija.motion
from tests.common import ZCL_OCC_ATTR_RPT_OCC, ClusterListener
zhaquirks.setup()
def test_basic_cluster_deserialize_wrong_len():
cluster = BasicCluster(mock.MagicMock())
data = b"\x1c_\x11\x12\n"
data += b'\x05\x00B\x15lumi.sensor_wleak.aq1\x01\xffB"\x01!\xb3\x0b\x03('
data += b"\x17\x04!\xa8C\x05!\xa7\x00\x06$\x00\x00\x00\x00\x00\x08!\x04"
data += b"\x02\n!\x00\x00d\x10\x01"
deserialized = cluster.deserialize(data)
assert deserialized[1]
def test_basic_cluster_deserialize_wrong_len_2():
cluster = BasicCluster(mock.MagicMock())
data = b"\x1c_\x11\x12\n"
data += b'\x01\xffB"\x01!\xb3\x0b\x03(\x17\x04!\xa8C\x05!\xa7\x00\x06$\x15'
data += b"\x00\x14\x00\x00\x08!\x04\x02\n!\x00\x00d\x10\x01"
deserialized = cluster.deserialize(data)
assert deserialized[1]
@pytest.mark.parametrize(
"quirk",
(
zhaquirks.xiaomi.aqara.motion_aq2.MotionAQ2,
zhaquirks.xiaomi.aqara.motion_aq2b.MotionAQ2,
zhaquirks.xiaomi.mija.motion.Motion,
),
)
async def test_konke_motion(zigpy_device_from_quirk, quirk):
motion_dev = zigpy_device_from_quirk(quirk)
motion_cluster = motion_dev.endpoints[1].ias_zone
motion_listener = ClusterListener(motion_cluster)
occupancy_cluster = motion_dev.endpoints[1].occupancy
occupancy_listener = ClusterListener(occupancy_cluster)
p1 = mock.patch.object(motion_cluster, "reset_s", 0)
p2 = mock.patch.object(occupancy_cluster, "reset_s", 0)
hdr, args = occupancy_cluster.deserialize(ZCL_OCC_ATTR_RPT_OCC)
with p1, p2:
occupancy_cluster.handle_message(hdr, args)
assert len(motion_listener.cluster_commands) == 1
assert len(motion_listener.attribute_updates) == 1
assert motion_listener.cluster_commands[0][1] == ZONE_STATE
assert motion_listener.cluster_commands[0][2][0] == ON
assert len(occupancy_listener.cluster_commands) == 0
assert len(occupancy_listener.attribute_updates) == 1
assert occupancy_listener.attribute_updates[0][0] == 0x0000
assert occupancy_listener.attribute_updates[0][1] == 1
await asyncio.sleep(0.1)
assert len(motion_listener.cluster_commands) == 2
assert motion_listener.cluster_commands[1][1] == ZONE_STATE
assert motion_listener.cluster_commands[1][2][0] == OFF
assert len(occupancy_listener.cluster_commands) == 0
assert len(occupancy_listener.attribute_updates) == 2
assert occupancy_listener.attribute_updates[1][0] == 0x0000
assert occupancy_listener.attribute_updates[1][1] == 0
@pytest.fixture
|
Apache License 2.0
|
muxinc/mux-python
|
mux_python/models/create_playback_id_response.py
|
CreatePlaybackIDResponse.__ne__
|
python
|
def __ne__(self, other):
if not isinstance(other, CreatePlaybackIDResponse):
return True
return self.to_dict() != other.to_dict()
|
Returns true if both objects are not equal
|
https://github.com/muxinc/mux-python/blob/57c10a3002a0bc65a0dc8938f08176bd5b030a93/mux_python/models/create_playback_id_response.py#L124-L129
|
import inspect
import pprint
import re
import six
from mux_python.configuration import Configuration
class CreatePlaybackIDResponse(object):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'data': 'PlaybackID'
}
attribute_map = {
'data': 'data'
}
def __init__(self, data=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._data = None
self.discriminator = None
if data is not None:
self.data = data
@property
def data(self):
return self._data
@data.setter
def data(self, data):
self._data = data
def to_dict(self, serialize=False):
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = inspect.getargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, CreatePlaybackIDResponse):
return False
return self.to_dict() == other.to_dict()
|
MIT License
|
noxdafox/clipspy
|
clips/classes.py
|
Class.module
|
python
|
def module(self) -> Module:
name = ffi.string(lib.DefclassModule(self._ptr())).decode()
return Module(self._env, name)
|
The module in which the Class is defined.
Equivalent to the CLIPS (defclass-module) function.
|
https://github.com/noxdafox/clipspy/blob/a317964dc86755619d84b9adf4008d62663889ce/clips/classes.py#L224-L232
|
import os
import clips
from clips.modules import Module
from clips.common import PutSlotError, PUT_SLOT_ERROR
from clips.common import CLIPSError, SaveMode, ClassDefaultMode
from clips.common import environment_builder, environment_modifier
from clips._clips import lib, ffi
class Instance:
__slots__ = '_env', '_ist'
def __init__(self, env: ffi.CData, ist: ffi.CData):
self._env = env
self._ist = ist
lib.RetainInstance(self._ist)
def __del__(self):
try:
lib.ReleaseInstance(self._ist)
except (AttributeError, TypeError):
pass
def __hash__(self):
return hash(self._ist)
def __eq__(self, ist):
return self._ist == ist._ist
def __str__(self):
return ' '.join(instance_pp_string(self._env, self._ist).split())
def __repr__(self):
string = ' '.join(instance_pp_string(self._env, self._ist).split())
return "%s: %s" % (self.__class__.__name__, string)
def __iter__(self):
slot_names = (s.name for s in self.instance_class.slots())
return ((n, slot_value(self._env, self._ist, n)) for n in slot_names)
def __getitem__(self, slot):
return slot_value(self._env, self._ist, slot)
@property
def name(self) -> str:
return ffi.string(lib.InstanceName(self._ist)).decode()
@property
def instance_class(self) -> 'Class':
defclass = lib.InstanceClass(self._ist)
name = ffi.string(lib.DefclassName(defclass)).decode()
return Class(self._env, name)
def modify_slots(self, **slots):
modifier = environment_modifier(self._env, 'instance')
ret = lib.IMSetInstance(modifier, self._ist)
if ret != lib.IME_NO_ERROR:
raise CLIPSError(self._env, code=ret)
for slot, slot_val in slots.items():
value = clips.values.clips_value(self._env, value=slot_val)
ret = lib.IMPutSlot(modifier, str(slot).encode(), value)
if ret != PutSlotError.PSE_NO_ERROR:
raise PUT_SLOT_ERROR[ret](slot)
if lib.IMModify(modifier) is ffi.NULL:
raise CLIPSError(self._env, code=lib.IMError(self._env))
def send(self, message: str, arguments: str = None) -> type:
output = clips.values.clips_value(self._env)
instance = clips.values.clips_value(self._env, value=self)
args = arguments.encode() if arguments is not None else ffi.NULL
lib.Send(self._env, instance, message.encode(), args, output)
return clips.values.python_value(self._env, output)
def delete(self):
ret = lib.DeleteInstance(self._ist)
if ret != lib.UIE_NO_ERROR:
raise CLIPSError(self._env, code=ret)
def unmake(self):
ret = lib.UnmakeInstance(self._ist)
if ret != lib.UIE_NO_ERROR:
raise CLIPSError(self._env, code=ret)
class Class:
__slots__ = '_env', '_name'
def __init__(self, env: ffi.CData, name: str):
self._env = env
self._name = name.encode()
def __hash__(self):
return hash(self._ptr())
def __eq__(self, cls):
return self._ptr() == cls._ptr()
def __str__(self):
string = lib.DefclassPPForm(self._ptr())
string = ffi.string(string).decode() if string != ffi.NULL else ''
return ' '.join(string.split())
def __repr__(self):
string = lib.DefclassPPForm(self._ptr())
string = ffi.string(string).decode() if string != ffi.NULL else ''
return "%s: %s" % (self.__class__.__name__, ' '.join(string.split()))
def _ptr(self) -> ffi.CData:
cls = lib.FindDefclass(self._env, self._name)
if cls == ffi.NULL:
raise CLIPSError(self._env, 'Class <%s> not defined' % self.name)
return cls
@property
def abstract(self) -> bool:
return lib.ClassAbstractP(self._ptr())
@property
def reactive(self) -> bool:
return lib.ClassReactiveP(self._ptr())
@property
def name(self) -> str:
return ffi.string(lib.DefclassName(self._ptr())).decode()
@property
|
BSD 3-Clause New or Revised License
|
open-eo/openeo-python-client
|
openeo/rest/connection.py
|
Connection.load_collection
|
python
|
def load_collection(
self,
collection_id: str,
spatial_extent: Optional[Dict[str, float]] = None,
temporal_extent: Optional[List[Union[str, datetime.datetime, datetime.date]]] = None,
bands: Optional[List[str]] = None,
properties: Optional[Dict[str, Union[str, PGNode, Callable]]] = None,
fetch_metadata=True,
) -> DataCube:
if self._api_version.at_least("1.0.0"):
return DataCube.load_collection(
collection_id=collection_id, connection=self,
spatial_extent=spatial_extent, temporal_extent=temporal_extent, bands=bands, properties=properties,
fetch_metadata=fetch_metadata,
)
else:
return ImageCollectionClient.load_collection(
collection_id=collection_id, session=self,
spatial_extent=spatial_extent, temporal_extent=temporal_extent, bands=bands
)
|
Load a DataCube by collection id.
:param collection_id: image collection identifier
:param spatial_extent: limit data to specified bounding box or polygons
:param temporal_extent: limit data to specified temporal interval
:param bands: only add the specified bands
:param properties: limit data by metadata property predicates
:return: a datacube containing the requested data
|
https://github.com/open-eo/openeo-python-client/blob/bde2d0f992bd52fc244c8bfeceac4e58d6b12c2d/openeo/rest/connection.py#L828-L857
|
import datetime
import json
import logging
import shlex
import sys
import warnings
from collections import OrderedDict
from pathlib import Path
from typing import Dict, List, Tuple, Union, Callable, Optional, Any, Iterator
from urllib.parse import urljoin
import requests
from deprecated.sphinx import deprecated
from requests import Response
from requests.auth import HTTPBasicAuth, AuthBase
import openeo
from openeo.capabilities import ApiVersionException, ComparableVersion
from openeo.internal.graph_building import PGNode, as_flat_graph
from openeo.internal.jupyter import VisualDict, VisualList
from openeo.internal.processes.builder import ProcessBuilderBase
from openeo.metadata import CollectionMetadata
from openeo.rest import OpenEoClientException, OpenEoApiError, OpenEoRestError
from openeo.rest.auth.auth import NullAuth, BearerAuth
from openeo.rest.auth.config import RefreshTokenStore, AuthConfig
from openeo.rest.auth.oidc import OidcClientCredentialsAuthenticator, OidcAuthCodePkceAuthenticator, OidcClientInfo, OidcAuthenticator, OidcRefreshTokenAuthenticator, OidcResourceOwnerPasswordAuthenticator, OidcDeviceAuthenticator, OidcProviderInfo, OidcException, DefaultOidcClientGrant, GrantsChecker
from openeo.rest.datacube import DataCube
from openeo.rest.imagecollectionclient import ImageCollectionClient
from openeo.rest.job import RESTJob
from openeo.rest.rest_capabilities import RESTCapabilities
from openeo.rest.service import Service
from openeo.rest.udp import RESTUserDefinedProcess, Parameter
from openeo.util import ensure_list, legacy_alias, dict_no_none, rfc3339, load_json_resource, LazyLoadCache
_log = logging.getLogger(__name__)
def url_join(root_url: str, path: str):
return urljoin(root_url.rstrip('/') + '/', path.lstrip('/'))
class RestApiConnection:
def __init__(self, root_url: str, auth: AuthBase = None, session: requests.Session = None,
default_timeout: int = None):
self._root_url = root_url
self.auth = auth or NullAuth()
self.session = session or requests.Session()
self.default_timeout = default_timeout
self.default_headers = {
"User-Agent": "openeo-python-client/{cv} {py}/{pv} {pl}".format(
cv=openeo.client_version(),
py=sys.implementation.name, pv=".".join(map(str, sys.version_info[:3])),
pl=sys.platform
)
}
@property
def root_url(self):
return self._root_url
def build_url(self, path: str):
return url_join(self._root_url, path)
def _merged_headers(self, headers: dict) -> dict:
result = self.default_headers.copy()
if headers:
result.update(headers)
return result
def _is_external(self, url: str) -> bool:
root = self.root_url.rstrip("/")
return not (url == root or url.startswith(root + '/'))
def request(self, method: str, path: str, headers: dict = None, auth: AuthBase = None,
check_error=True, expected_status=None, **kwargs):
url = self.build_url(path)
auth = auth or (self.auth if not self._is_external(url) else None)
if _log.isEnabledFor(logging.DEBUG):
_log.debug("Request `{m} {u}` with headers {h}, auth {a}, kwargs {k}".format(
m=method.upper(), u=url, h=headers and headers.keys(), a=type(auth).__name__, k=list(kwargs.keys()))
)
resp = self.session.request(
method=method,
url=url,
headers=self._merged_headers(headers),
auth=auth,
timeout=kwargs.pop("timeout", self.default_timeout),
**kwargs
)
if _log.isEnabledFor(logging.DEBUG):
_log.debug("Got {r} headers {h!r}".format(r=resp, h=resp.headers))
status = resp.status_code
expected_status = ensure_list(expected_status) if expected_status else []
if check_error and status >= 400 and status not in expected_status:
self._raise_api_error(resp)
if expected_status and status not in expected_status:
raise OpenEoRestError("Got status code {s!r} for `{m} {p}` (expected {e!r})".format(
m=method.upper(), p=path, s=status, e=expected_status)
)
return resp
def _raise_api_error(self, response: requests.Response):
status_code = response.status_code
try:
info = response.json()
exception = OpenEoApiError(
http_status_code=status_code,
code=info.get("code", "unknown"),
message=info.get("message", "unknown error"),
id=info.get("id"),
url=info.get("url"),
)
except Exception:
text = response.text
_log.warning("Failed to parse API error response: {s} {t!r}".format(s=status_code, t=text))
if status_code == 502 and "Proxy Error" in text:
msg = "Received 502 Proxy Error." " This typically happens if an OpenEO request takes too long and is killed." " Consider using batch jobs instead of doing synchronous processing."
exception = OpenEoApiError(http_status_code=status_code, message=msg)
else:
exception = OpenEoApiError(http_status_code=status_code, message=text)
raise exception
def get(self, path, stream=False, auth: AuthBase = None, **kwargs) -> Response:
return self.request("get", path=path, stream=stream, auth=auth, **kwargs)
def post(self, path, json: dict = None, **kwargs) -> Response:
return self.request("post", path=path, json=json, allow_redirects=False, **kwargs)
def delete(self, path, **kwargs) -> Response:
return self.request("delete", path=path, allow_redirects=False, **kwargs)
def patch(self, path, **kwargs) -> Response:
return self.request("patch", path=path, allow_redirects=False, **kwargs)
def put(self, path, headers: dict = None, data=None, **kwargs) -> Response:
return self.request("put", path=path, data=data, headers=headers, allow_redirects=False, **kwargs)
def __repr__(self):
return "<{c} to {r!r} with {a}>".format(c=type(self).__name__, r=self._root_url, a=type(self.auth).__name__)
class Connection(RestApiConnection):
_MINIMUM_API_VERSION = ComparableVersion("0.4.0")
oidc_auth_user_id_token_as_bearer = False
def __init__(
self, url: str, auth: AuthBase = None, session: requests.Session = None, default_timeout: int = None,
auth_config: AuthConfig = None, refresh_token_store: RefreshTokenStore = None
):
if "://" not in url:
url = "https://" + url
self._orig_url = url
super().__init__(
root_url=self.version_discovery(url, session=session),
auth=auth, session=session, default_timeout=default_timeout
)
self._capabilities_cache = LazyLoadCache()
if self._api_version.below(self._MINIMUM_API_VERSION):
raise ApiVersionException("OpenEO API version should be at least {m!s}, but got {v!s}".format(
m=self._MINIMUM_API_VERSION, v=self._api_version)
)
self._auth_config = auth_config
self._refresh_token_store = refresh_token_store
@classmethod
def version_discovery(cls, url: str, session: requests.Session = None) -> str:
try:
well_known_url_response = RestApiConnection(url, session=session).get("/.well-known/openeo")
assert well_known_url_response.status_code == 200
versions = well_known_url_response.json()["versions"]
supported_versions = [v for v in versions if cls._MINIMUM_API_VERSION <= v["api_version"]]
assert supported_versions
production_versions = [v for v in supported_versions if v.get("production", True)]
highest_version = max(production_versions or supported_versions, key=lambda v: v["api_version"])
_log.debug("Highest supported version available in backend: %s" % highest_version)
return highest_version['url']
except Exception:
return url
def _get_auth_config(self) -> AuthConfig:
if self._auth_config is None:
self._auth_config = AuthConfig()
return self._auth_config
def _get_refresh_token_store(self) -> RefreshTokenStore:
if self._refresh_token_store is None:
self._refresh_token_store = RefreshTokenStore()
return self._refresh_token_store
def authenticate_basic(self, username: str = None, password: str = None) -> 'Connection':
if not self.capabilities().supports_endpoint("/credentials/basic", method="GET"):
raise OpenEoClientException("This openEO back-end does not support basic authentication.")
if username is None:
username, password = self._get_auth_config().get_basic_auth(backend=self._orig_url)
if username is None:
raise OpenEoClientException("No username/password given or found.")
resp = self.get(
'/credentials/basic',
auth=HTTPBasicAuth(username, password)
).json()
if self._api_version.at_least("1.0.0"):
self.auth = BearerAuth(bearer='basic//{t}'.format(t=resp["access_token"]))
else:
self.auth = BearerAuth(bearer=resp["access_token"])
return self
def _get_oidc_provider(self, provider_id: Union[str, None] = None) -> Tuple[str, OidcProviderInfo]:
if self._api_version.at_least("1.0.0"):
oidc_info = self.get("/credentials/oidc", expected_status=200).json()
providers = OrderedDict((p["id"], p) for p in oidc_info["providers"])
if len(providers) < 1:
raise OpenEoClientException("Backend lists no OIDC providers.")
_log.info("Found OIDC providers: {p}".format(p=list(providers.keys())))
if provider_id:
if provider_id not in providers:
raise OpenEoClientException(
"Requested OIDC provider {r!r} not available. Should be one of {p}.".format(
r=provider_id, p=list(providers.keys())
)
)
provider = providers[provider_id]
elif len(providers) == 1:
provider_id, provider = providers.popitem()
_log.info("No OIDC provider given, but only one available: {p!r}. Using that one.".format(
p=provider_id
))
else:
backend = self._orig_url
provider_configs = self._get_auth_config().get_oidc_provider_configs(backend=backend)
intersection = set(provider_configs.keys()).intersection(providers.keys())
if len(intersection) == 1:
provider_id = intersection.pop()
provider = providers[provider_id]
_log.info(
"No OIDC provider given, but only one in config (for backend {b!r}): {p!r}."
" Using that one.".format(b=backend, p=provider_id)
)
else:
provider_id, provider = providers.popitem(last=False)
_log.info("No OIDC provider given. Using first provider {p!r} as advertised by backend.".format(
p=provider_id
))
provider = OidcProviderInfo.from_dict(provider)
else:
provider = OidcProviderInfo(discovery_url=self.build_url('/credentials/oidc'))
return provider_id, provider
def _get_oidc_provider_and_client_info(
self, provider_id: str,
client_id: Union[str, None], client_secret: Union[str, None],
default_client_grant_check: Union[None, GrantsChecker] = None
) -> Tuple[str, OidcClientInfo]:
provider_id, provider = self._get_oidc_provider(provider_id)
if client_id is None:
_log.debug("No client_id: checking config for preferred client_id")
client_id, client_secret = self._get_auth_config().get_oidc_client_configs(
backend=self._orig_url, provider_id=provider_id
)
if client_id:
_log.info("Using client_id {c!r} from config (provider {p!r})".format(c=client_id, p=provider_id))
if client_id is None and default_client_grant_check:
_log.debug("No client_id given: checking default client in backend's provider info")
client_id = provider.get_default_client_id(grant_check=default_client_grant_check)
if client_id:
_log.info("Using default client_id {c!r} from OIDC provider {p!r} info.".format(
c=client_id, p=provider_id
))
if client_id is None:
raise OpenEoClientException("No client_id found.")
client_info = OidcClientInfo(client_id=client_id, client_secret=client_secret, provider=provider)
return provider_id, client_info
def _authenticate_oidc(
self,
authenticator: OidcAuthenticator,
provider_id: str,
store_refresh_token: bool = False
) -> 'Connection':
tokens = authenticator.get_tokens(request_refresh_token=store_refresh_token)
_log.info("Obtained tokens: {t}".format(t=[k for k, v in tokens._asdict().items() if v]))
if store_refresh_token:
if tokens.refresh_token:
self._get_refresh_token_store().set_refresh_token(
issuer=authenticator.provider_info.issuer,
client_id=authenticator.client_id,
refresh_token=tokens.refresh_token
)
else:
_log.warning("OIDC token response did not contain refresh token.")
token = tokens.access_token if not self.oidc_auth_user_id_token_as_bearer else tokens.id_token
if self._api_version.at_least("1.0.0"):
self.auth = BearerAuth(bearer='oidc/{p}/{t}'.format(p=provider_id, t=token))
else:
self.auth = BearerAuth(bearer=token)
return self
def authenticate_oidc_authorization_code(
self,
client_id: str = None,
client_secret: str = None,
provider_id: str = None,
timeout: int = None,
server_address: Tuple[str, int] = None,
webbrowser_open: Callable = None,
store_refresh_token=False,
) -> 'Connection':
provider_id, client_info = self._get_oidc_provider_and_client_info(
provider_id=provider_id, client_id=client_id, client_secret=client_secret,
default_client_grant_check=[DefaultOidcClientGrant.AUTH_CODE_PKCE],
)
authenticator = OidcAuthCodePkceAuthenticator(
client_info=client_info,
webbrowser_open=webbrowser_open, timeout=timeout, server_address=server_address
)
return self._authenticate_oidc(authenticator, provider_id=provider_id, store_refresh_token=store_refresh_token)
def authenticate_oidc_client_credentials(
self,
client_id: str = None,
client_secret: str = None,
provider_id: str = None,
store_refresh_token=False,
) -> 'Connection':
provider_id, client_info = self._get_oidc_provider_and_client_info(
provider_id=provider_id, client_id=client_id, client_secret=client_secret
)
authenticator = OidcClientCredentialsAuthenticator(client_info=client_info)
return self._authenticate_oidc(authenticator, provider_id=provider_id, store_refresh_token=store_refresh_token)
def authenticate_oidc_resource_owner_password_credentials(
self,
username: str, password: str,
client_id: str = None,
client_secret: str = None,
provider_id: str = None,
store_refresh_token=False
) -> 'Connection':
provider_id, client_info = self._get_oidc_provider_and_client_info(
provider_id=provider_id, client_id=client_id, client_secret=client_secret
)
authenticator = OidcResourceOwnerPasswordAuthenticator(
client_info=client_info, username=username, password=password
)
return self._authenticate_oidc(authenticator, provider_id=provider_id, store_refresh_token=store_refresh_token)
def authenticate_oidc_refresh_token(
self, client_id: str = None, refresh_token: str = None, client_secret: str = None, provider_id: str = None
) -> 'Connection':
provider_id, client_info = self._get_oidc_provider_and_client_info(
provider_id=provider_id, client_id=client_id, client_secret=client_secret,
default_client_grant_check=[DefaultOidcClientGrant.REFRESH_TOKEN],
)
if refresh_token is None:
refresh_token = self._get_refresh_token_store().get_refresh_token(
issuer=client_info.provider.issuer,
client_id=client_info.client_id
)
if refresh_token is None:
raise OpenEoClientException("No refresh token given or found")
authenticator = OidcRefreshTokenAuthenticator(client_info=client_info, refresh_token=refresh_token)
return self._authenticate_oidc(authenticator, provider_id=provider_id)
def authenticate_oidc_device(
self, client_id: str = None, client_secret: str = None, provider_id: str = None,
store_refresh_token=False, use_pkce: Union[bool, None] = None,
**kwargs
) -> 'Connection':
_g = DefaultOidcClientGrant
provider_id, client_info = self._get_oidc_provider_and_client_info(
provider_id=provider_id, client_id=client_id, client_secret=client_secret,
default_client_grant_check=(lambda grants: _g.DEVICE_CODE in grants or _g.DEVICE_CODE_PKCE in grants),
)
authenticator = OidcDeviceAuthenticator(client_info=client_info, use_pkce=use_pkce, **kwargs)
return self._authenticate_oidc(authenticator, provider_id=provider_id, store_refresh_token=store_refresh_token)
def authenticate_oidc(
self,
provider_id: str = None,
client_id: Union[str, None] = None, client_secret: Union[str, None] = None,
store_refresh_token: bool = True,
use_pkce: Union[bool, None] = None,
):
_g = DefaultOidcClientGrant
provider_id, client_info = self._get_oidc_provider_and_client_info(
provider_id=provider_id, client_id=client_id, client_secret=client_secret,
default_client_grant_check=lambda grants: (
_g.REFRESH_TOKEN in grants and (_g.DEVICE_CODE in grants or _g.DEVICE_CODE_PKCE in grants)
)
)
refresh_token = self._get_refresh_token_store().get_refresh_token(
issuer=client_info.provider.issuer,
client_id=client_info.client_id
)
if refresh_token:
try:
_log.info("Found refresh token: trying refresh token based authentication.")
authenticator = OidcRefreshTokenAuthenticator(client_info=client_info, refresh_token=refresh_token)
con = self._authenticate_oidc(
authenticator, provider_id=provider_id, store_refresh_token=store_refresh_token
)
print("Authenticated using refresh token.")
return con
except OidcException as e:
_log.info("Refresh token based authentication failed: {e}.".format(e=e))
_log.info("Trying device code flow.")
authenticator = OidcDeviceAuthenticator(client_info=client_info, use_pkce=use_pkce)
con = self._authenticate_oidc(authenticator, provider_id=provider_id, store_refresh_token=store_refresh_token)
print("Authenticated using device code flow.")
return con
def describe_account(self) -> str:
return self.get('/me', expected_status=200).json()
@deprecated("use :py:meth:`list_jobs` instead", version="0.4.10")
def user_jobs(self) -> dict:
return self.list_jobs()
def list_collections(self) -> List[dict]:
data = self.get('/collections', expected_status=200).json()["collections"]
return VisualList("collections", data=data)
def list_collection_ids(self) -> List[str]:
return [collection['id'] for collection in self.list_collections() if 'id' in collection]
def capabilities(self) -> RESTCapabilities:
return self._capabilities_cache.get(
"capabilities",
load=lambda: RESTCapabilities(data=self.get('/', expected_status=200).json(), url=self._orig_url)
)
def list_output_formats(self) -> dict:
if self._api_version.at_least("1.0.0"):
return self.list_file_formats()["output"]
else:
return self.get('/output_formats', expected_status=200).json()
list_file_types = legacy_alias(list_output_formats, "list_file_types")
def list_file_formats(self) -> dict:
formats = self._capabilities_cache.get(
key="file_formats",
load=lambda: self.get('/file_formats', expected_status=200).json()
)
return VisualDict("file-formats", data=formats)
def list_service_types(self) -> dict:
types = self._capabilities_cache.get(
key="service_types",
load=lambda: self.get('/service_types', expected_status=200).json()
)
return VisualDict("service-types", data=types)
def list_udf_runtimes(self) -> dict:
runtimes = self._capabilities_cache.get(
key="udf_runtimes",
load=lambda: self.get('/udf_runtimes', expected_status=200).json()
)
return VisualDict("udf-runtimes", data=runtimes)
def list_services(self) -> dict:
services = self.get('/services', expected_status=200).json()["services"]
return VisualList("data-table", data=services, parameters={'columns': 'services'})
def describe_collection(self, name) -> dict:
data = self.get('/collections/{}'.format(name), expected_status=200).json()
return VisualDict("collection", data=data)
def collection_items(self, name, spatial_extent: Optional[List[float]] = None, temporal_extent: Optional[List[Union[str, datetime.datetime]]] = None, limit: int = None) -> Iterator[dict]:
url = '/collections/{}/items'.format(name)
params = {}
if spatial_extent:
params["bbox"] = ",".join(str(c) for c in spatial_extent)
if temporal_extent:
params["datetime"] = "/".join(".." if t is None else rfc3339.normalize(t) for t in temporal_extent)
if limit is not None and limit > 0:
params['limit'] = limit
return paginate(self, url, params, lambda response, page: VisualDict("items", data = response, parameters = {'show-map': True, 'heading': 'Page {} - Items'.format(page)}))
def collection_metadata(self, name) -> CollectionMetadata:
return CollectionMetadata(metadata=self.describe_collection(name))
def list_processes(self, namespace: str = None) -> List[dict]:
if namespace is None:
processes = self._capabilities_cache.get(
key=("processes", "backend"),
load=lambda: self.get('/processes', expected_status=200).json()["processes"]
)
else:
processes = self.get('/processes/' + namespace, expected_status=200).json()["processes"]
return VisualList("processes", data=processes, parameters={'show-graph': True, 'provide-download': False})
def list_jobs(self) -> List[dict]:
jobs = self.get('/jobs', expected_status=200).json()["jobs"]
return VisualList("data-table", data=jobs, parameters={'columns': 'jobs'})
def save_user_defined_process(
self, user_defined_process_id: str,
process_graph: Union[dict, ProcessBuilderBase],
parameters: List[Union[dict, Parameter]] = None,
public: bool = False, summary: str = None, description: str = None
) -> RESTUserDefinedProcess:
if user_defined_process_id in set(p["id"] for p in self.list_processes()):
warnings.warn("Defining user-defined process {u!r} with same id as a pre-defined process".format(
u=user_defined_process_id))
if not parameters:
warnings.warn("Defining user-defined process {u!r} without parameters".format(u=user_defined_process_id))
udp = RESTUserDefinedProcess(user_defined_process_id=user_defined_process_id, connection=self)
udp.store(process_graph=process_graph, parameters=parameters, public=public,summary=summary,description=description)
return udp
def list_user_defined_processes(self) -> List[dict]:
data = self.get("/process_graphs", expected_status=200).json()["processes"]
return VisualList("processes", data=data, parameters={'show-graph': True, 'provide-download': False})
def user_defined_process(self, user_defined_process_id: str) -> RESTUserDefinedProcess:
return RESTUserDefinedProcess(user_defined_process_id=user_defined_process_id, connection=self)
def validate_process_graph(self, process_graph: dict) -> List[dict]:
request = {"process_graph": process_graph}
return self.post(path="/validation", json=request, expected_status=200).json()["errors"]
@property
def _api_version(self) -> ComparableVersion:
return self.capabilities().api_version_check
def datacube_from_process(self, process_id: str, namespace: str = None, **kwargs) -> DataCube:
if self._api_version.at_least("1.0.0"):
graph = PGNode(process_id, namespace=namespace, arguments=kwargs)
return DataCube(graph=graph, connection=self)
else:
raise OpenEoClientException(
"This method requires support for at least version 1.0.0 in the openEO backend.")
def datacube_from_flat_graph(self, flat_graph: dict, parameters: dict = None) -> DataCube:
if self._api_version.below("1.0.0"):
raise OpenEoClientException(
"This method requires support for at least version 1.0.0 in the openEO backend.")
parameters = parameters or {}
if "process_graph" in flat_graph:
for param in flat_graph.get("parameters") or []:
if "default" in param:
parameters.setdefault(param["name"], param["default"])
flat_graph = flat_graph["process_graph"]
pgnode = PGNode.from_flat_graph(flat_graph=flat_graph, parameters=parameters or {})
return DataCube(graph=pgnode, connection=self)
def datacube_from_json(self, src: Union[str, Path], parameters: dict = None) -> DataCube:
return self.datacube_from_flat_graph(load_json_resource(src), parameters=parameters)
|
Apache License 2.0
|
gbouvignies/chemex
|
chemex/containers/noise.py
|
_variance_from_scatter
|
python
|
def _variance_from_scatter(data):
x_name, y_name, *_ = data.dtype.names
data_sorted = np.sort(data, order=x_name)
values = data_sorted[y_name]
size = values.size
fda = [
[1, -1],
[1, -2, 1],
[1, -3, 3, -1],
[1, -4, 6, -4, 1],
[1, -5, 10, -10, 5, -1],
[1, -6, 15, -20, 15, -6, 1],
]
fda = [np.array(a_fda) / la.norm(a_fda) for a_fda in fda]
percents = np.array([0.05] + list(np.arange(0.1, 0.40, 0.025)))
percent_points = stats.norm.ppf(1.0 - percents)
sigma_est = []
for fdai in fda:
noisedata = sorted(signal.convolve(values, fdai, mode="valid"))
ntrim = len(noisedata)
if ntrim >= 2:
xaxis = (0.5 + np.arange(1, ntrim + 1)) / (ntrim + 0.5)
sigmas = []
function = interpolate.interp1d(xaxis, noisedata, "linear")
for a_perc, a_z in zip(percents, percent_points):
try:
val = (function(1.0 - a_perc) - function(a_perc)) / (2.0 * a_z)
sigmas.append(val)
except ValueError:
pass
sigma_est.append(np.median(sigmas))
variance = np.median(sigma_est) ** 2 / (1.0 + 15.0 * (size + 1.225) ** -1.245)
return max(variance, 1e-8)
|
Estimate the uncertainty in the CEST profile.
Adapted from:
https://www.mathworks.com/matlabcentral/fileexchange/16683-estimatenoise
|
https://github.com/gbouvignies/chemex/blob/0ca5d12ea3e6a921f1858d50b4a828e769932154/chemex/containers/noise.py#L31-L69
|
import numpy as np
from scipy import interpolate
from scipy import linalg as la
from scipy import signal
from scipy import stats
def _variance_from_duplicates(data):
groups = {}
x_name, y_name, e_name = data.dtype.names
for x, y in data[[x_name, y_name]]:
groups.setdefault(x, []).append(y)
variances, weights = [], []
for group in groups.values():
group_size = len(group)
if group_size > 1:
variances.append(np.var(group, ddof=1))
weights.append(group_size - 1)
if not variances:
return np.mean(data[e_name])
return np.average(variances, weights=weights)
|
BSD 3-Clause New or Revised License
|
nikolay-kha/pycnc
|
cnc/hal_virtual.py
|
get_extruder_temperature
|
python
|
def get_extruder_temperature():
return EXTRUDER_MAX_TEMPERATURE * 0.999
|
Measure extruder temperature.
:return: temperature in Celsius.
|
https://github.com/nikolay-kha/pycnc/blob/f5ae14b72b0dee7e24f1c323771936f1daa1da97/cnc/hal_virtual.py#L51-L55
|
from __future__ import division
import time
from cnc.pulses import *
from cnc.config import *
def init():
logging.info("initialize hal")
def spindle_control(percent):
logging.info("spindle control: {}%".format(percent))
def fan_control(on_off):
if on_off:
logging.info("Fan is on")
else:
logging.info("Fan is off")
def extruder_heater_control(percent):
pass
def bed_heater_control(percent):
pass
|
MIT License
|
castagnait/plugin.video.netflix
|
resources/lib/common/misc_utils.py
|
censure
|
python
|
def censure(value, length=3):
if not value:
return value
return value[:-length] + '*' * length
|
Censor part of the string with asterisks
|
https://github.com/castagnait/plugin.video.netflix/blob/1c68c7d4c399603a5dcbeef1e7637de7a9036a72/resources/lib/common/misc_utils.py#L178-L182
|
import operator
from urllib.parse import quote, urlencode
from resources.lib.globals import G
def find(value_to_find, attribute, search_space):
for video in search_space:
if video[attribute] == value_to_find:
return video
raise KeyError(f'Metadata for {value_to_find} does not exist')
def find_episode_metadata(episode_videoid, metadata):
season = find(int(episode_videoid.seasonid), 'id', metadata['seasons'])
episode = find(int(episode_videoid.episodeid), 'id', season.get('episodes', {}))
return episode, season
def get_class_methods(class_item=None):
from types import FunctionType
_type = FunctionType
return [x for x, y in class_item.__dict__.items()
if isinstance(y, _type)]
def build_url(pathitems=None, videoid=None, params=None, mode=None):
if not (pathitems or videoid):
raise ValueError('Either pathitems or videoid must be set.')
path = f'{G.BASE_URL}/{_encode_path(mode, pathitems, videoid)}/{_encode_params(params)}'
return path
def _expand_mode(mode):
return [mode] if mode else []
def _expand_videoid(videoid):
return videoid.to_path() if videoid else []
def _encode_path(mode, pathitems, videoid):
return quote(
'/'.join(_expand_mode(mode) +
(pathitems or []) +
_expand_videoid(videoid)).encode('utf-8'))
def _encode_params(params):
return f'?{urlencode(params)}' if params else ''
def is_numeric(string):
try:
int(string)
except ValueError:
return False
return True
def strp(value, form):
from datetime import datetime
def_value = datetime.utcfromtimestamp(0)
try:
return datetime.strptime(value, form)
except TypeError:
try:
from time import strptime
return datetime(*(strptime(value, form)[0:6]))
except ValueError:
return def_value
except Exception:
return def_value
def strf_timestamp(timestamp, form):
from datetime import datetime
try:
return datetime.utcfromtimestamp(timestamp).strftime(form)
except Exception:
return ''
def merge_dicts(dict_to_merge, merged_dict):
for key, value in dict_to_merge.items():
if isinstance(merged_dict.get(key), dict):
merge_dicts(value, merged_dict[key])
else:
merged_dict[key] = value
return merged_dict
def compare_dict_keys(dict_a, dict_b, compare_keys):
return all(dict_a[k] == dict_b[k] for k in dict_a if k in compare_keys)
def chunked_list(seq, chunk_len):
for start in range(0, len(seq), chunk_len):
yield seq[start:start + chunk_len]
def any_value_except(mapping, excluded_keys):
return next(mapping[key] for key in mapping if key not in excluded_keys)
def enclose_quotes(content):
return f'"{content}"'
def make_list(arg):
return (arg
if isinstance(arg, list)
else ([arg]
if arg is not None
else []))
def convert_seconds_to_hms_str(time):
h = int(time // 3600)
time %= 3600
m = int(time // 60)
s = int(time % 60)
return f'{h:02d}:{m:02d}:{s:02d}'
def remove_html_tags(raw_html):
import re
pattern = re.compile('<.*?>')
return re.sub(pattern, '', raw_html)
|
MIT License
|
sleventyeleven/linuxprivchecker
|
linuxprivchecker.py
|
search_file_passwords
|
python
|
def search_file_passwords():
pwdfiles = {
"LOGPWDS": {"cmd": "find /var/log -name '*.log' 2>/dev/null | xargs -l10 egrep 'pwd|password' 2>/dev/null", "msg": "Logs containing keyword 'password'", "results": []},
"CONFPWDS": {"cmd": "find /etc -name '*.c*' 2>/dev/null | xargs -l10 egrep 'pwd|password' 2>/dev/null", "msg": "Config files containing keyword 'password'", "results": []},
"SHADOW": {"cmd": "cat /etc/shadow 2>/dev/null", "msg": "Shadow File (Privileged)", "results": []}
}
pwdfiles = execute_cmd(pwdfiles)
print_results(pwdfiles)
|
Search File for passwords (search_file_passwords)
Search the identified file systems for files with potential credentials
:return: None
:TODO: Add searches for common cred files like ssh keys and access tokens
|
https://github.com/sleventyeleven/linuxprivchecker/blob/0d701080bbf92efd464e97d71a70f97c6f2cd658/linuxprivchecker.py#L264-L280
|
try:
import subprocess as sub
compatmode = 0
except ImportError:
import os
compatmode = 1
def execute_cmd(cmddict):
for item in cmddict:
cmd = cmddict[item]["cmd"]
if compatmode == 0:
out, error = sub.Popen([cmd], stdout=sub.PIPE, stderr=sub.PIPE, shell=True).communicate()
results = out.split('\n')
else:
echo_stdout = os.popen(cmd, 'r')
results = echo_stdout.read().split('\n')
cmddict[item]["results"] = results
return cmddict
def print_results(cmddict):
for item in cmddict:
msg = cmddict[item]["msg"]
results = cmddict[item]["results"]
print "[+] " + msg
for result in results:
if result.strip() != "":
print " " + result.strip()
print
def enum_system_info():
print "[*] GETTING BASIC SYSTEM INFO...\n"
sysinfo = {
"OS": {"cmd": "cat /etc/issue", "msg": "Operating System", "results": []},
"KERNEL": {"cmd": "cat /proc/version", "msg": "Kernel", "results": []},
"HOSTNAME": {"cmd": "hostname", "msg": "Hostname", "results": []}
}
sysinfo = execute_cmd(sysinfo)
print_results(sysinfo)
return sysinfo
def enum_network_info():
print "[*] GETTING NETWORKING INFO...\n"
netinfo = {
"netinfo": {"cmd": "/sbin/ifconfig -a", "msg": "Interfaces", "results": []},
"ROUTE": {"cmd": "route", "msg": "Route(s)", "results": []},
"NETSTAT": {"cmd": "netstat -antup | grep -v 'TIME_WAIT'", "msg": "Netstat", "results": []}
}
netinfo = execute_cmd(netinfo)
print_results(netinfo)
def enum_filesystem_info():
print "[*] GETTING FILESYSTEM INFO...\n"
driveinfo = {
"MOUNT": {"cmd": "mount", "msg": "Mount results", "results": []},
"FSTAB": {"cmd": "cat /etc/fstab 2>/dev/null", "msg": "fstab entries", "results": []}
}
driveinfo = execute_cmd(driveinfo)
print_results(driveinfo)
return driveinfo
def enum_cron_jobs():
croninfo = {
"CRON": {"cmd": "ls -la /etc/cron* 2>/dev/null", "msg": "Scheduled cron jobs", "results": []},
"CRONW": {"cmd": "ls -aRl /etc/cron* 2>/dev/null | awk '$1 ~ /w.$/' 2>/dev/null", "msg": "Writable cron dirs", "results": []},
"CRONU": {"cmd": "crontab -l 2>/dev/null", "msg": "Users cron jobs", "results": []}
}
croninfo = execute_cmd(croninfo)
print_results(croninfo)
def enum_user_info():
print "\n[*] ENUMERATING USER AND ENVIRONMENTAL INFO...\n"
userinfo = {
"WHOAMI": {"cmd": "whoami", "msg": "Current User", "results": []},
"ID": {"cmd": "id", "msg": "Current User ID", "results": []},
"ALLUSERS": {"cmd": "cat /etc/passwd", "msg": "All users", "results": []},
"SUPUSERS": {"cmd": "grep -v -E '^#' /etc/passwd | awk -F: '$3 == 0{print $1}'", "msg": "Super Users Found:", "results": []},
"ENV": {"cmd": "env 2>/dev/null | grep -v 'LS_COLORS'", "msg": "Environment", "results": []},
"SUDOERS": {"cmd": "cat /etc/sudoers 2>/dev/null | grep -v '#' 2>/dev/null", "msg": "Sudoers (privileged)", "results": []},
"SCREENS": {"cmd": "screen -ls 2>/dev/null", "msg": "List out any screens running for the current user", "results": []},
"LOGGEDIN": {"cmd": "who -a 2>/dev/null", "msg": "Logged in User Activity", "results": []}
}
userinfo = execute_cmd(userinfo)
print_results(userinfo)
if "root" in userinfo["ID"]["results"][0]:
print "[!] ARE YOU SURE YOU'RE NOT ROOT ALREADY?\n"
exit()
return userinfo
def enum_user_history_files():
print "\n[*] ENUMERATING USER History Files..\n"
historyfiles = {
"RHISTORY": {"cmd": "ls -la /root/.*_history 2>/dev/null", "msg": " See if you have access too Root user history (depends on privs)", "results": []},
"BASHHISTORY": {"cmd": "cat ~/.bash_history 2>/dev/null", "msg": " Get the contents of bash history file for current user", "results": []},
"NANOHISTORY": {"cmd": "cat ~/.nano_history 2>/dev/null", "msg": " Try to get the contents of nano history file for current user", "results": []},
"ATFTPHISTORY": {"cmd": "cat ~/.atftp_history 2>/dev/null", "msg": " Try to get the contents of atftp history file for current user", "results": []},
"MYSQLHISTORY": {"cmd": "cat ~/.mysql_history 2>/dev/null", "msg": " Try to get the contents of mysql history file for current user", "results": []},
"PHPHISTORY": {"cmd": "cat ~/.php_history 2>/dev/null", "msg": " Try to get the contents of php history file for current user", "results": []},
"PYTHONHISTORY": {"cmd": "cat ~/.python_history 2>/dev/null", "msg": " Try to get the contents of python history file for current user", "results": []},
"REDISHISTORY": {"cmd": "cat ~/.rediscli_history 2>/dev/null", "msg": " Try to get the contents of redis cli history file for current user", "results": []},
"TDSQLHISTORY": {"cmd": "cat ~/.tdsql_history 2>/dev/null", "msg": " Try to get the contents of tdsql history file for current user", "results": []}
}
historyfiles = execute_cmd(historyfiles)
print_results(historyfiles)
def enum_rc_files():
print "\n[*] ENUMERATING USER *.rc Style Files For INFO...\n"
rcfiles = {
"GBASHRC" : {"cmd": "cat /etc/bashrc 2>/dev/null", "msg": " Get the contents of bash rc file form global config file", "results": []},
"BASHRC": {"cmd": "cat ~/.bashrc 2>/dev/null", "msg": "Get the contents of bash rc file for current user", "results": []},
"SCREENRC": {"cmd": "cat ~/.screenrc 2>/dev/null", "msg": " Try to get the contents of screen rc file for current user", "results": []},
"GSCREENRC": {"cmd": "cat /etc/screenrc 2>/dev/null", "msg": "Try to get the contents of screen rc file form global config file", "results": []},
"VIRC": {"cmd": "cat ~/.virc 2>/dev/null", "msg": " Try to get the contents of vi rc file for current user", "results": []},
"MYSQLRC": {"cmd": "cat ~/.mysqlrc 2>/dev/null", "msg": " Try to get the contents of mysql rc file for current user", "results": []},
"NETRC": {"cmd": "cat ~/.netrc 2>/dev/null", "msg": " Try to get the contents of legacy net rc file for current user", "results": []}
}
rcfiles = execute_cmd(rcfiles)
print_results(rcfiles)
def search_file_perms():
print "[*] ENUMERATING FILE AND DIRECTORY PERMISSIONS/CONTENTS...\n"
fdperms = {
"WWDIRSROOT": {"cmd": "find / \( -wholename '/home/homedir*' -prune \) -o \( -type d -perm -0002 \) -exec ls -ld '{}' ';' 2>/dev/null | grep root", "msg": "World Writeable Directories for User/Group 'Root'", "results": []},
"WWDIRS": {"cmd": "find / \( -wholename '/home/homedir*' -prune \) -o \( -type d -perm -0002 \) -exec ls -ld '{}' ';' 2>/dev/null | grep -v root", "msg": "World Writeable Directories for Users other than Root", "results": []},
"WWFILES": {"cmd": "find / \( -wholename '/home/homedir/*' -prune -o -wholename '/proc/*' -prune \) -o \( -type f -perm -0002 \) -exec ls -l '{}' ';' 2>/dev/null", "msg": "World Writable Files", "results": []},
"SUID": {"cmd": "find / \( -perm -2000 -o -perm -4000 \) -exec ls -ld {} \; 2>/dev/null", "msg": "SUID/SGID Files and Directories", "results": []},
"ROOTHOME": {"cmd": "ls -ahlR /root 2>/dev/null", "msg": "Checking if root's home folder is accessible", "results": []}
}
fdperms = execute_cmd(fdperms)
print_results(fdperms)
|
MIT License
|
approxeng/approxeng.input
|
src/python/approxeng/input/__init__.py
|
Controller.presses
|
python
|
def presses(self) -> 'ButtonPresses':
return self.buttons.presses
|
The :class:`~approxeng.input.ButtonPresses` containing buttons pressed between the two most recent calls to
:meth:`~approxeng.input.Controller.check_presses`
|
https://github.com/approxeng/approxeng.input/blob/5822ef47ca139078b3771cfa29032d76a3fe7503/src/python/approxeng/input/__init__.py#L381-L386
|
import logging
from abc import ABC, abstractmethod
from math import sqrt
from time import time
from typing import Optional, Union, Tuple
import functools
from evdev import InputEvent, ff, ecodes
from approxeng.input.sys import sys_nodes
logger = logging.getLogger(name='approxeng.input')
def map_into_range(low, high, raw_value):
value = float(raw_value)
if low < high:
if value < low:
return 0
elif value > high:
return 1.0
elif low > high:
if value > low:
return 0
elif value < high:
return -1.0
return (value - low) / abs(high - low)
def map_single_axis(low, high, dead_zone, hot_zone, value):
input_range = high - low
corrected_low = low + input_range * dead_zone
corrected_high = high - input_range * hot_zone
return map_into_range(corrected_low, corrected_high, value)
def map_dual_axis(low, high, centre, dead_zone, hot_zone, value):
if value <= centre:
return map_single_axis(centre, low, dead_zone, hot_zone, value)
else:
return map_single_axis(centre, high, dead_zone, hot_zone, value)
class Controller(ABC):
def __init__(self, controls, node_mappings=None, dead_zone=None,
hot_zone=None, ff_device=None):
self.axes = Axes([control for control in controls if
isinstance(control, CentredAxis) or
isinstance(control, BinaryAxis) or
isinstance(control, TriggerAxis)])
self.buttons = Buttons([control for control in controls if
isinstance(control, Button) or
isinstance(control, BinaryAxis) or
isinstance(control, TriggerAxis)])
if dead_zone is not None:
for axis in self.axes.axes:
axis.dead_zone = dead_zone
if hot_zone is not None:
for axis in self.axes.axes:
axis.hot_zone = hot_zone
self.node_mappings = node_mappings
self.device_unique_name = None
self.exception = None
self.ff_device = ff_device
class ControllerStream(object):
def __init__(self, controller):
self.controller = controller
def __getitem__(self, item):
def generator():
while self.controller.connected:
yield self.controller.__getitem__(item)
return generator()
self.stream = ControllerStream(self)
@functools.lru_cache(maxsize=None)
def _build_effect(self, milliseconds=1000, strong_magnitude=0x0000, weak_magnitude=0xffff) -> int:
if self.ff_device:
logger.info('compiling new force feedback effect')
effect = ff.Effect(
ecodes.FF_RUMBLE, -1, 0,
ff.Trigger(0, 0),
ff.Replay(milliseconds, 0),
ff.EffectType(
ff_rumble_effect=ff.Rumble(strong_magnitude=strong_magnitude, weak_magnitude=weak_magnitude))
)
return self.ff_device.upload_effect(effect)
else:
raise ValueError('no force-feedback node, unable to compile effect')
def rumble(self, milliseconds=1000):
if self.ff_device:
logger.debug('controller go brrrr')
effect_id = self._build_effect(milliseconds=milliseconds)
repeat_count = 1
self.ff_device.write(ecodes.EV_FF, effect_id, repeat_count)
else:
logger.warning('no force-feedback node for this controller')
@property
def has_force_feedback(self):
return self.ff_device is not None
@property
def sys_nodes(self) -> {}:
if self.device_unique_name is not None:
return sys_nodes(self.device_unique_name)
return {}
def read_led_value(self, led_name) -> Optional[int]:
if self.device_unique_name is not None:
return sys.read_led_value(self.device_unique_name, led_name)
return None
def write_led_value(self, led_name: str, value: int):
if self.device_unique_name is not None:
sys.write_led_value(self.device_unique_name, led_name, value)
@property
def battery_level(self) -> Optional[float]:
if self.device_unique_name is not None:
return sys.read_power_level(self.device_unique_name)
return None
@staticmethod
@abstractmethod
def registration_ids() -> [Tuple[int, int]]:
pass
@property
def connected(self) -> bool:
if self.device_unique_name:
return True
return False
def __getitem__(self, item: Union[str, Tuple[str, ...]]) -> [Optional[float]]:
if isinstance(item, tuple):
return [self.__getattr__(single_item) for single_item in item]
return self.__getattr__(item)
def __getattr__(self, item: str) -> Optional[float]:
if item in self.axes:
return self.axes[item].value
elif item in self.buttons:
return self.buttons.held(item)
raise AttributeError
def __contains__(self, item: str) -> bool:
if item in self.axes:
return True
if item in self.buttons:
return True
return False
def check_presses(self) -> 'ButtonPresses':
return self.buttons.check_presses()
@property
def has_presses(self) -> bool:
return self.buttons.presses.has_presses
@property
def has_releases(self) -> bool:
return self.buttons.releases.has_presses
@property
|
Apache License 2.0
|
juju/charm-helpers
|
charmhelpers/contrib/network/ovs/__init__.py
|
set_manager
|
python
|
def set_manager(manager):
log('Setting manager for local ovs to {}'.format(manager))
subprocess.check_call(['ovs-vsctl', 'set-manager',
'ssl:{}'.format(manager)])
|
Set the controller for the local openvswitch
|
https://github.com/juju/charm-helpers/blob/25b740578385d15b38f11bed8e4b6e732bdfb7c6/charmhelpers/contrib/network/ovs/__init__.py#L451-L455
|
import collections
import hashlib
import os
import re
import six
import subprocess
from charmhelpers import deprecate
from charmhelpers.contrib.network.ovs import ovsdb as ch_ovsdb
from charmhelpers.fetch import apt_install
from charmhelpers.core.hookenv import (
log, WARNING, INFO, DEBUG, charm_name
)
from charmhelpers.core.host import (
CompareHostReleases,
lsb_release,
service
)
BRIDGE_TEMPLATE = """\
# This veth pair is required when neutron data-port is mapped to an existing linux bridge. lp:1635067
auto {linuxbridge_port}
iface {linuxbridge_port} inet manual
pre-up ip link add name {linuxbridge_port} type veth peer name {ovsbridge_port}
pre-up ip link set {ovsbridge_port} master {bridge}
pre-up ip link set {ovsbridge_port} up
up ip link set {linuxbridge_port} up
down ip link del {linuxbridge_port}
"""
MAX_KERNEL_INTERFACE_NAME_LEN = 15
def get_bridges():
cmd = ["ovs-vsctl", "list-br"]
lines = subprocess.check_output(cmd).decode('utf-8').split("\n")
maybe_bridges = [l.strip() for l in lines]
return [b for b in maybe_bridges if b]
def get_bridge_ports(name):
cmd = ["ovs-vsctl", "--", "list-ports", name]
lines = subprocess.check_output(cmd).decode('utf-8').split("\n")
maybe_ports = [l.strip() for l in lines]
return [p for p in maybe_ports if p]
def get_bridges_and_ports_map():
return {b: get_bridge_ports(b) for b in get_bridges()}
def _dict_to_vsctl_set(data, table, entity):
for (k, v) in data.items():
if isinstance(v, dict):
entries = {
'{}:{}'.format(k, dk): dv for (dk, dv) in v.items()}
else:
entries = {k: v}
for (colk, colv) in entries.items():
yield ('--', 'set', table, entity, '{}={}'.format(colk, colv))
def add_bridge(name, datapath_type=None, brdata=None, exclusive=False):
log('Creating bridge {}'.format(name))
cmd = ['ovs-vsctl', '--']
if not exclusive:
cmd.append('--may-exist')
cmd.extend(('add-br', name))
if brdata:
for setcmd in _dict_to_vsctl_set(brdata, 'bridge', name):
cmd.extend(setcmd)
if datapath_type is not None:
log('DEPRECATION WARNING: add_bridge called with datapath_type, '
'please use the brdata keyword argument instead.')
cmd += ['--', 'set', 'bridge', name,
'datapath_type={}'.format(datapath_type)]
subprocess.check_call(cmd)
def del_bridge(name):
log('Deleting bridge {}'.format(name))
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
def add_bridge_port(name, port, promisc=False, ifdata=None, exclusive=False,
linkup=True, portdata=None):
cmd = ['ovs-vsctl', '--']
if not exclusive:
cmd.append('--may-exist')
cmd.extend(('add-port', name, port))
for ovs_table, data in (('Interface', ifdata), ('Port', portdata)):
if data:
for setcmd in _dict_to_vsctl_set(data, ovs_table, port):
cmd.extend(setcmd)
log('Adding port {} to bridge {}'.format(port, name))
subprocess.check_call(cmd)
if linkup:
subprocess.check_call(["ip", "link", "set", port, "up"])
if promisc:
subprocess.check_call(["ip", "link", "set", port, "promisc", "on"])
elif promisc is False:
subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
def del_bridge_port(name, port, linkdown=True):
log('Deleting port {} from bridge {}'.format(port, name))
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
name, port])
if linkdown:
subprocess.check_call(["ip", "link", "set", port, "down"])
subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
def add_bridge_bond(bridge, port, interfaces, portdata=None, ifdatamap=None,
exclusive=False):
cmd = ['ovs-vsctl', '--']
if not exclusive:
cmd.append('--may-exist')
cmd.extend(('add-bond', bridge, port))
cmd.extend(interfaces)
if portdata:
for setcmd in _dict_to_vsctl_set(portdata, 'port', port):
cmd.extend(setcmd)
if ifdatamap:
for ifname, ifdata in ifdatamap.items():
for setcmd in _dict_to_vsctl_set(ifdata, 'Interface', ifname):
cmd.extend(setcmd)
subprocess.check_call(cmd)
@deprecate('see lp:1877594', '2021-01', log=log)
def add_ovsbridge_linuxbridge(name, bridge, ifdata=None, portdata=None):
try:
import netifaces
except ImportError:
if six.PY2:
apt_install('python-netifaces', fatal=True)
else:
apt_install('python3-netifaces', fatal=True)
import netifaces
existing_ovs_bridge = port_to_br(bridge)
if existing_ovs_bridge is not None:
log('Linuxbridge {} is already directly in use'
' by OVS bridge {}'.format(bridge, existing_ovs_bridge),
level=INFO)
return
ovsbridge_port = "veth-" + name
linuxbridge_port = "veth-" + bridge
if (len(ovsbridge_port) > MAX_KERNEL_INTERFACE_NAME_LEN or
len(linuxbridge_port) > MAX_KERNEL_INTERFACE_NAME_LEN):
hashed_bridge = hashlib.sha256(bridge.encode('UTF-8')).hexdigest()
base = '{}-{}'.format(hashed_bridge[:8], hashed_bridge[-2:])
ovsbridge_port = "cvo{}".format(base)
linuxbridge_port = "cvb{}".format(base)
network_interface_already_exists = False
interfaces = netifaces.interfaces()
for interface in interfaces:
if interface == ovsbridge_port or interface == linuxbridge_port:
log('Interface {} already exists'.format(interface), level=INFO)
network_interface_already_exists = True
break
log('Adding linuxbridge {} to ovsbridge {}'.format(bridge, name),
level=INFO)
if not network_interface_already_exists:
setup_eni()
with open('/etc/network/interfaces.d/{}.cfg'.format(
linuxbridge_port), 'w') as config:
config.write(BRIDGE_TEMPLATE.format(
linuxbridge_port=linuxbridge_port,
ovsbridge_port=ovsbridge_port, bridge=bridge))
try:
subprocess.check_call(['ifup', linuxbridge_port])
except FileNotFoundError:
raise RuntimeError('ifup: command not found. Did this charm forget '
'to install ifupdown?')
add_bridge_port(name, linuxbridge_port, ifdata=ifdata, exclusive=False,
portdata=portdata)
def is_linuxbridge_interface(port):
if os.path.exists('/sys/class/net/' + port + '/bridge'):
log('Interface {} is a Linux bridge'.format(port), level=DEBUG)
return True
else:
log('Interface {} is not a Linux bridge'.format(port), level=DEBUG)
return False
|
Apache License 2.0
|
almost-matching-exactly/dame-flame-python-package
|
dame_flame/flame_dame_helpers.py
|
verbose_output
|
python
|
def verbose_output(iteration_number, num_matched_groups, num_unmatched_t,
num_unmatched, orig_len_df_all, tot_treated, pe,
prev_iter_num_unmatched, curr_covar_set):
print("Iteration number: ", iteration_number)
print("\tNumber of matched groups formed in total: ", num_matched_groups)
print("\tUnmatched treated units: ", num_unmatched_t, "out of a total of ",
tot_treated, "treated units")
print("\tUnmatched control units: ", num_unmatched-num_unmatched_t,
"out of a total of ", orig_len_df_all-tot_treated, "control units")
print("\tPredictive error of covariates chosen this iteration: ", pe)
print("\tNumber of matches made in this iteration: ",
prev_iter_num_unmatched - num_unmatched)
print("\tNumber of matches made so far: ",
orig_len_df_all - num_unmatched)
print("\tIn this iteration, the covariates dropped are: ", curr_covar_set)
|
Prints progress of matching algorithm along various metrics
|
https://github.com/almost-matching-exactly/dame-flame-python-package/blob/b2e53a154ff514fbcbead2e8be36a8c769035015/dame_flame/flame_dame_helpers.py#L20-L37
|
import numpy as np
import pandas as pd
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
|
MIT License
|
eeyhan/onlinestudy
|
OnlineStudy/generic/views.py
|
CourseCommonquestionView.get
|
python
|
def get(self, request, pk):
commonquestion = models.Course.objects.filter(id=pk).first().common_question.all().order_by('id')
res = serializers.CommonQuestionSerializer(commonquestion, many=True)
return Response(res.data)
|
coursecomment = models.CommonQuestion.objects.filter(object_id=pk).first().content_object.common_question.all()
|
https://github.com/eeyhan/onlinestudy/blob/b8abfc7b4f2466e595be801bd9a19a509e03534e/OnlineStudy/generic/views.py#L153-L157
|
from rest_framework.views import APIView
from rest_framework.response import Response
from django.utils.timezone import now
from generic import serializers
from generic import models
from django.db.models import F
from utils.Auther import Auther
from utils.redis_pool import POOL
from utils.BaseResponse import BaseResponse
import redis
import json
import time
class CategoryView(APIView):
def get(self, request):
category = models.Category.objects.all()
res = serializers.CategorySerializer(category, many=True)
return Response(res.data)
class CourseView(APIView):
def get(self, request):
cid = request.query_params.get('cid')
query = request.query_params.get('query')
if cid:
cid = eval(cid)
if cid == 0:
course = models.Course.objects.filter(degree_course__isnull=True).all()
course = self.order_query(query, course)
else:
course = models.Course.objects.filter(category_id=cid, degree_course__isnull=True).all().order_by(
'category_id')
course = self.order_query(query, course)
res = serializers.CourseSerializer(course, many=True)
if query:
temp = res.data
temp.clear()
for item in res.data:
if item not in temp:
temp.append(item)
else:
continue
return Response(temp)
return Response(res.data)
def order_query(self, query, course):
if query:
if query == 'hot':
course = course.order_by('-study_number')
elif query == 'price':
course = course.order_by('price_policy__price').distinct()
elif query == '-price':
course = course.order_by('price_policy__price').distinct()
course = course.reverse()
return course
class DegreeView(APIView):
def get(self, request):
cid = request.query_params.get('cid')
query = request.query_params.get('query')
if cid:
cid = eval(cid)
if cid == 0:
course = models.Course.objects.filter(degree_course__isnull=False).all()
course = self.order_query(query, course)
else:
course = models.Course.objects.filter(category_id=cid, degree_course__isnull=False).all().order_by(
'category_id')
course = self.order_query(query, course)
res = serializers.CourseSerializer(course, many=True)
if query:
temp = res.data
temp.clear()
for item in res.data:
if item not in temp:
temp.append(item)
else:
continue
return Response(temp)
return Response(res.data)
def order_query(self, query, course):
if query:
if query == 'hot':
course = course.order_by('-study_number')
elif query == 'price':
course = course.order_by('price_policy__price').distinct()
elif query == '-price':
course = course.order_by('price_policy__price').distinct()
course = course.reverse()
return course
class CourseDetailView(APIView):
def get(self, request, pk):
course_detail = models.CourseDetail.objects.filter(course_id=pk)
res = serializers.CourseDetailSerializer(course_detail, many=True)
return Response(res.data)
class CourseChapterView(APIView):
def get(self, request, pk):
coursechapter = models.CourseChapter.objects.filter(course_id=pk).all().order_by('chapter')
res = serializers.ChapterSerializer(coursechapter, many=True)
return Response(res.data)
class CourseCommentView(APIView):
def get(self, request, pk):
coursecomment = models.Course.objects.filter(id=pk).first().comment.all().order_by('-id')
res = serializers.CommentSerializer(coursecomment, many=True)
return Response(res.data)
class CourseCommonquestionView(APIView):
|
MIT License
|
solararbiter/solarforecastarbiter-core
|
solarforecastarbiter/reports/figures/plotly_figures.py
|
formatted_interval
|
python
|
def formatted_interval(interval):
if (interval % np.timedelta64(1, 'h') == 0):
return f'{np.timedelta64(interval, "h").astype(int)}h'
else:
return f'{np.timedelta64(interval, "m").astype(int)}m'
|
Converts an interval_length timedelta into a string for display
Parameters
----------
minutes: np.timedelta64
Returns
-------
str
The interval as a string, displayed in the largest units possible
without mixing units(up to days)
|
https://github.com/solararbiter/solarforecastarbiter-core/blob/24a3aa68a30e0813382cf98f4db558b88dd49942/solarforecastarbiter/reports/figures/plotly_figures.py#L349-L365
|
import base64
import calendar
from copy import deepcopy
import datetime as dt
from itertools import cycle
from pathlib import Path
import logging
import pandas as pd
from plotly import __version__ as plotly_version
import plotly.graph_objects as go
import numpy as np
from matplotlib import cm
from matplotlib.colors import Normalize
from solarforecastarbiter import datamodel
from solarforecastarbiter.metrics.event import _event2count
import solarforecastarbiter.plotting.utils as plot_utils
logger = logging.getLogger(__name__)
D3_PALETTE = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf', '#aec7e8', '#ffbb78',
'#98df8a', '#ff9896', '#c5b0d5', '#c49c94', '#f7b6d2', '#c7c7c7',
'#dbdb8d', '#9edae5']
PALETTE = (D3_PALETTE[::2] + D3_PALETTE[1::2])
def gen_grays(num_colors):
rgb_delta = int(255/num_colors + 1)
color_list = ["#{h}{h}{h}".format(h=hex(i*rgb_delta)[2:])
for i in range(num_colors)]
return color_list
_num_obs_colors = 3
OBS_PALETTE = gen_grays(_num_obs_colors)
OBS_PALETTE.reverse()
OBS_PALETTE_TD_RANGE = pd.timedelta_range(
freq='10min', end='60min', periods=_num_obs_colors)
PROBABILISTIC_PALETTES = ['viridis', 'plasma', 'inferno', 'magma', 'cividis']
PLOT_BGCOLOR = '#FFF'
PLOT_MARGINS = {'l': 50, 'r': 50, 'b': 50, 't': 100, 'pad': 4}
PLOT_LAYOUT_DEFAULTS = {
'autosize': True,
'height': 250,
'margin': PLOT_MARGINS,
'plot_bgcolor': PLOT_BGCOLOR,
'title_font_size': 16,
'font': {'size': 14}
}
X_LABEL_HEIGHT_FACTOR = 11
try:
with open(Path(__file__).parent / 'fail.pdf', 'rb') as f:
fail_pdf = base64.a85encode(f.read()).decode()
except Exception:
fail_pdf = ',u@!!/MSk8$73+IY58P_+>=pV@VQ644<Q:NASu.&BHT/T0Ha7#+<Vd[7VQ[\\ATAnH7VlLTAOL*>De*Dd5!B<pFE1r$D$kNX1K6%.6<uqiV.X\\GOIXKoa;)c"!&3^A=pehYA92j5ARTE_ASu$s@VQ6-+>=pV@VQ5m+<WEu$>"*cDdmGg1E\\@oDdmGg4?Ns74pkk=A8bpl$8N_X+E(_($9UEn03!49AKWX&@:s-o,p4oL+<Vd[:gnBUDKI!U+>=p9$6UH6026"gBjj>HGT^350H`%l0J5:A+>>E,2\'?03+<Vd[6Z6jaASuU2+>b2p+ArOh+<W=-Ec6)>+?Van+<VdL+<W=:H#R=;01U&$F`7[1+<VdL+>6Y902ut#DKBc*Eb0,uGmYZ:+<VdL01d:.Eckq#+<VdL+<W=);]m_]AThctAPu#b$6UH65!B;r+<W=8ATMd4Ear[%+>Y,o+ArP14pkk=A8bpl$8EYW+E(_($9UEn03!49AKWX&@:s.m$6UH602$"iF!+[01*A7n;BT6P+<Vd[6Z7*bF<E:F5!B<bDIdZpC\'ljA0Hb:CC\'m\'c+>6Q3De+!#ATAnA@ps(lD]gbe0fCX<+=LoFFDu:^0/$gDBl\\-)Ea`p#Bk)3:DfTJ>.1.1?+>6*&ART[pDf.sOFCcRC6om(W1,(C>0K1^?0ebFC/MK+20JFp_5!B<bDIdZpC\'lmB0Hb:CC\'m\'c+>6]>E+L.F6Xb(FCi<qn+<Vd[:gn!JF!*1[0Ha7#5!B<bDIdZpC\'o3+AS)9\'+?0]^0JG170JG170H`822)@*4AfqF70JG170JG:B0d&/(0JG1\'DBK9?0JG170JG4>0d&/(0JG1\'DBK9?0JG170JG4<0H`&\'0JG1\'DBK9?0JG170JG182\'=S,0JG1\'DBK9?0JG170JG493?U"00JG1\'DBK9?0JG170JG=?2BX\\-0JG1\'DBK9?0JG170JG@B1*A8)0JG1\'DBK:.Ea`ZuATA,?4<Q:UBmO>53!pcN+>6W2Dfd*\\+>=p9$6UH601g%nD]gq\\0Ha7#5!B<pFCB33G]IA-$8sUq$7-ue:IYZ'
def _value_frame_dict(idx, pfxobs, column=None):
if column is None:
forecast_values = pfxobs.forecast_values
else:
if pfxobs.forecast_values is not None:
forecast_values = pfxobs.forecast_values[column]
else:
forecast_values = None
value_frame_dict = {
'pair_index': idx,
'observation_values': pfxobs.observation_values,
'forecast_values': forecast_values,
}
return value_frame_dict
def _meta_row_dict(idx, pfxobs, **kwargs):
forecast_object = kwargs.pop('forecast_object', None)
if forecast_object is None:
forecast_object = pfxobs.original.forecast
if (isinstance(forecast_object,
datamodel.ProbabilisticForecastConstantValue)
and
isinstance(pfxobs.original.forecast,
datamodel.ProbabilisticForecast)):
distribution = str(hash((
pfxobs.original.forecast,
pfxobs.original.forecast.interval_length,
pfxobs.original.forecast.interval_value_type,
pfxobs.original.forecast.interval_label)))
else:
distribution = None
try:
axis = forecast_object.axis
except AttributeError:
axis = None
try:
constant_value = forecast_object.constant_value
except AttributeError:
constant_value = None
meta = {
'pair_index': idx,
'observation_name': _obs_name(pfxobs.original),
'forecast_name': _fx_name(
forecast_object, pfxobs.original.data_object),
'interval_label': pfxobs.interval_label,
'interval_length': pfxobs.interval_length,
'forecast_type': pfxobs.original.__class__.__name__,
'axis': axis,
'constant_value': constant_value,
'observation_hash': str(hash((
pfxobs.original.data_object,
pfxobs.interval_length,
pfxobs.interval_value_type,
pfxobs.interval_label))),
'forecast_hash': str(hash((
forecast_object,
pfxobs.interval_length,
pfxobs.interval_value_type,
pfxobs.interval_label))),
'observation_color': _obs_color(
pfxobs.interval_length),
'distribution': distribution
}
meta.update(kwargs)
return meta
def construct_timeseries_dataframe(report):
value_frames = []
meta_rows = []
idx = 0
for pfxobs in report.raw_report.processed_forecasts_observations:
if isinstance(pfxobs.original.forecast,
datamodel.ProbabilisticForecast):
for cvfx in pfxobs.original.forecast.constant_values:
value_frame_dict = _value_frame_dict(
idx, pfxobs, column=str(cvfx.constant_value))
if value_frame_dict['forecast_values'] is None:
continue
meta_row_dict = _meta_row_dict(
idx, pfxobs,
forecast_object=cvfx,
forecast_type='ProbabilisticForecast')
value_frames.append(pd.DataFrame(value_frame_dict))
meta_rows.append(meta_row_dict)
idx += 1
else:
value_frame_dict = _value_frame_dict(idx, pfxobs)
if value_frame_dict['forecast_values'] is None:
continue
meta_row_dict = _meta_row_dict(idx, pfxobs)
value_frames.append(pd.DataFrame(value_frame_dict))
meta_rows.append(meta_row_dict)
idx += 1
if value_frames:
data = pd.concat(value_frames)
else:
data = pd.DataFrame()
metadata = pd.DataFrame(meta_rows)
data = data.tz_convert(report.raw_report.timezone)
data = data.rename_axis('timestamp')
return data, metadata
def _fill_timeseries(df, interval_length):
if not df.index.empty:
start = df.index[0]
end = df.index[-1]
freq_mins = int(interval_length / np.timedelta64(1, 'm'))
filled_idx = pd.date_range(start, end, freq=f'{freq_mins}min')
return df.reindex(filled_idx)
else:
return df
def _obs_name(fx_obs):
name = fx_obs.data_object.name
if fx_obs.forecast.name == fx_obs.data_object.name:
if isinstance(fx_obs.data_object, datamodel.Observation):
name += ' Observation'
else:
name += ' Aggregate'
return name
def _fx_name(forecast, data_object):
forecast_name = forecast.name
if isinstance(forecast, datamodel.ProbabilisticForecastConstantValue):
if forecast.axis == 'x':
forecast_name += f' Prob(x <= {forecast.constant_value} {forecast.units})'
else:
forecast_name += f' Prob(f <= x) = {forecast.constant_value}%'
if forecast_name == data_object.name:
forecast_name += ' Forecast'
return forecast_name
def _obs_color(interval_length):
idx = np.searchsorted(OBS_PALETTE_TD_RANGE, interval_length)
obs_color = OBS_PALETTE[idx]
return obs_color
def _boolean_filter_indices_by_pair(value_cds, pair_index):
return value_cds.data['pair_index'] == pair_index
def _none_or_values0(metadata, key):
value = metadata.get(key)
if value is not None:
value = value.values[0]
return value
def _extract_metadata_from_df(metadata_df, hash_, hash_key, keep_pairs=False):
metadata = metadata_df[metadata_df[hash_key] == hash_]
if keep_pairs:
pair_index = metadata['pair_index']
else:
pair_index = metadata['pair_index'].values[0]
meta = {
'pair_index': pair_index,
'observation_name': metadata['observation_name'].values[0],
'forecast_name': metadata['forecast_name'].values[0],
'interval_label': metadata['interval_label'].values[0],
'interval_length': metadata['interval_length'].values[0],
'observation_color': metadata['observation_color'].values[0],
}
meta['forecast_type'] = _none_or_values0(metadata, 'forecast_type')
meta['axis'] = _none_or_values0(metadata, 'axis')
meta['constant_value'] = _none_or_values0(metadata, 'constant_value')
return meta
def _legend_text(name, max_length=20):
if len(name) > max_length:
temp = []
new = []
for part in name.split(' '):
if len(' '.join(temp + [part])) > max_length:
new.append(' '.join(temp))
temp = [part]
else:
temp.append(part)
if temp:
new.append(' '.join(temp))
return '<br>'.join(new)
else:
return name
|
MIT License
|
argoproj-labs/argo-client-python
|
argo/workflows/client/api/workflow_service_api.py
|
WorkflowServiceApi.get_workflow
|
python
|
def get_workflow(self, namespace, name, **kwargs):
kwargs['_return_http_data_only'] = True
return self.get_workflow_with_http_info(namespace, name, **kwargs)
|
get_workflow # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_workflow(namespace, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: (required)
:param str name: (required)
:param str get_options_resource_version: When specified: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param str fields: Fields to be included or excluded in the response. e.g. \"spec,status.phase\", \"-status.nodes\".
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1Workflow
If the method is called asynchronously,
returns the request thread.
|
https://github.com/argoproj-labs/argo-client-python/blob/993d684cab39a834770b296e028519cec035c7b5/argo/workflows/client/api/workflow_service_api.py#L316-L341
|
from __future__ import absolute_import
import re
import six
from argo.workflows.client.api_client import ApiClient
from argo.workflows.client.exceptions import (
ApiTypeError,
ApiValueError
)
class WorkflowServiceApi(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_workflow(self, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
return self.create_workflow_with_http_info(namespace, body, **kwargs)
def create_workflow_with_http_info(self, namespace, body, **kwargs):
local_var_params = locals()
all_params = [
'namespace',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_workflow" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `create_workflow`")
if self.api_client.client_side_validation and ('body' not in local_var_params or
local_var_params['body'] is None):
raise ApiValueError("Missing the required parameter `body` when calling `create_workflow`")
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/api/v1/workflows/{namespace}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1Workflow',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_workflow(self, namespace, name, **kwargs):
kwargs['_return_http_data_only'] = True
return self.delete_workflow_with_http_info(namespace, name, **kwargs)
def delete_workflow_with_http_info(self, namespace, name, **kwargs):
local_var_params = locals()
all_params = [
'namespace',
'name',
'delete_options_grace_period_seconds',
'delete_options_preconditions_uid',
'delete_options_preconditions_resource_version',
'delete_options_orphan_dependents',
'delete_options_propagation_policy',
'delete_options_dry_run'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_workflow" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('namespace' not in local_var_params or
local_var_params['namespace'] is None):
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_workflow`")
if self.api_client.client_side_validation and ('name' not in local_var_params or
local_var_params['name'] is None):
raise ApiValueError("Missing the required parameter `name` when calling `delete_workflow`")
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace']
if 'name' in local_var_params:
path_params['name'] = local_var_params['name']
query_params = []
if 'delete_options_grace_period_seconds' in local_var_params and local_var_params['delete_options_grace_period_seconds'] is not None:
query_params.append(('deleteOptions.gracePeriodSeconds', local_var_params['delete_options_grace_period_seconds']))
if 'delete_options_preconditions_uid' in local_var_params and local_var_params['delete_options_preconditions_uid'] is not None:
query_params.append(('deleteOptions.preconditions.uid', local_var_params['delete_options_preconditions_uid']))
if 'delete_options_preconditions_resource_version' in local_var_params and local_var_params['delete_options_preconditions_resource_version'] is not None:
query_params.append(('deleteOptions.preconditions.resourceVersion', local_var_params['delete_options_preconditions_resource_version']))
if 'delete_options_orphan_dependents' in local_var_params and local_var_params['delete_options_orphan_dependents'] is not None:
query_params.append(('deleteOptions.orphanDependents', local_var_params['delete_options_orphan_dependents']))
if 'delete_options_propagation_policy' in local_var_params and local_var_params['delete_options_propagation_policy'] is not None:
query_params.append(('deleteOptions.propagationPolicy', local_var_params['delete_options_propagation_policy']))
if 'delete_options_dry_run' in local_var_params and local_var_params['delete_options_dry_run'] is not None:
query_params.append(('deleteOptions.dryRun', local_var_params['delete_options_dry_run']))
collection_formats['deleteOptions.dryRun'] = 'multi'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
auth_settings = []
return self.api_client.call_api(
'/api/v1/workflows/{namespace}/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object',
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
|
Apache License 2.0
|
schemaorg/sdopythonapp
|
lib/cloudstorage/storage_api.py
|
ReadBuffer.__getstate__
|
python
|
def __getstate__(self):
return {'api': self._api,
'path': self._path,
'buffer_size': self._buffer_size,
'request_size': self._max_request_size,
'etag': self._etag,
'size': self._file_size,
'offset': self._offset,
'closed': self.closed}
|
Store state as part of serialization/pickling.
The contents of the read buffer are not stored, only the current offset for
data read by the client. A new read buffer is established at unpickling.
The head information for the object (file size and etag) are stored to
reduce startup and ensure the file has not changed.
Returns:
A dictionary with the state of this object
|
https://github.com/schemaorg/sdopythonapp/blob/128be97d359178b26e5211a3e758933ff3a7b3df/lib/cloudstorage/storage_api.py#L270-L288
|
__all__ = ['ReadBuffer',
'StreamingBuffer',
]
import collections
import os
import urlparse
from . import api_utils
from . import common
from . import errors
from . import rest_api
try:
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
except ImportError:
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from google.appengine.api import app_identity
def _get_storage_api(retry_params, account_id=None):
api = _StorageApi(_StorageApi.full_control_scope,
service_account_id=account_id,
retry_params=retry_params)
service_account = app_identity.get_service_account_name()
if (common.local_run() and not common.get_access_token()
and (not service_account or service_account.endswith('@localhost'))):
api.api_url = common.local_api_url()
if common.get_access_token():
api.token = common.get_access_token()
return api
class _StorageApi(rest_api._RestApi):
api_url = 'https://storage.googleapis.com'
read_only_scope = 'https://www.googleapis.com/auth/devstorage.read_only'
read_write_scope = 'https://www.googleapis.com/auth/devstorage.read_write'
full_control_scope = 'https://www.googleapis.com/auth/devstorage.full_control'
def __getstate__(self):
return (super(_StorageApi, self).__getstate__(), {'api_url': self.api_url})
def __setstate__(self, state):
superstate, localstate = state
super(_StorageApi, self).__setstate__(superstate)
self.api_url = localstate['api_url']
@api_utils._eager_tasklet
@ndb.tasklet
def do_request_async(self, url, method='GET', headers=None, payload=None,
deadline=None, callback=None):
if headers is None:
headers = {}
if 'x-goog-api-version' not in headers:
headers['x-goog-api-version'] = '2'
headers['accept-encoding'] = 'gzip, *'
try:
resp_tuple = yield super(_StorageApi, self).do_request_async(
url, method=method, headers=headers, payload=payload,
deadline=deadline, callback=callback)
except urlfetch.DownloadError as e:
raise errors.TimeoutError(
'Request to Google Cloud Storage timed out.', e)
raise ndb.Return(resp_tuple)
def post_object_async(self, path, **kwds):
return self.do_request_async(self.api_url + path, 'POST', **kwds)
def put_object_async(self, path, **kwds):
return self.do_request_async(self.api_url + path, 'PUT', **kwds)
def get_object_async(self, path, **kwds):
return self.do_request_async(self.api_url + path, 'GET', **kwds)
def delete_object_async(self, path, **kwds):
return self.do_request_async(self.api_url + path, 'DELETE', **kwds)
def head_object_async(self, path, **kwds):
return self.do_request_async(self.api_url + path, 'HEAD', **kwds)
def get_bucket_async(self, path, **kwds):
return self.do_request_async(self.api_url + path, 'GET', **kwds)
def compose_object(self, file_list, destination_file, content_type):
xml_setting_list = ['<ComposeRequest>']
for meta_data in file_list:
xml_setting_list.append('<Component>')
for key, val in meta_data.iteritems():
xml_setting_list.append('<%s>%s</%s>' % (key, val, key))
xml_setting_list.append('</Component>')
xml_setting_list.append('</ComposeRequest>')
xml = ''.join(xml_setting_list)
if content_type is not None:
headers = {'Content-Type': content_type}
else:
headers = None
status, resp_headers, content = self.put_object(
api_utils._quote_filename(destination_file) + '?compose',
payload=xml,
headers=headers)
errors.check_status(status, [200], destination_file, resp_headers,
body=content)
_StorageApi = rest_api.add_sync_methods(_StorageApi)
class ReadBuffer(object):
DEFAULT_BUFFER_SIZE = 1024 * 1024
MAX_REQUEST_SIZE = 30 * DEFAULT_BUFFER_SIZE
def __init__(self,
api,
path,
buffer_size=DEFAULT_BUFFER_SIZE,
max_request_size=MAX_REQUEST_SIZE,
offset=0):
self._api = api
self._path = path
self.name = api_utils._unquote_filename(path)
self.closed = False
assert buffer_size <= max_request_size
self._buffer_size = buffer_size
self._max_request_size = max_request_size
self._offset = offset
self._buffer = _Buffer()
self._etag = None
get_future = self._get_segment(offset, self._buffer_size, check_response=False)
status, headers, content = self._api.head_object(path)
errors.check_status(status, [200], path, resp_headers=headers, body=content)
self._file_size = long(common.get_stored_content_length(headers))
self._check_etag(headers.get('etag'))
self._buffer_future = None
if self._file_size != 0:
content, check_response_closure = get_future.get_result()
check_response_closure()
self._buffer.reset(content)
self._request_next_buffer()
|
Apache License 2.0
|
lcharlick/python-metallum
|
metallum.py
|
MetallumEntity._dd_element_for_label
|
python
|
def _dd_element_for_label(self, label: str) -> Optional[PyQuery]:
labels = list(self._page('dt').contents())
try:
index = labels.index(label)
except ValueError:
return None
return self._page('dd').eq(index)
|
Data on entity pages are stored in <dt> / <dd> pairs
|
https://github.com/lcharlick/python-metallum/blob/e50bf2ae83ac0bc3928b379714ebf53771ae6ac4/metallum.py#L231-L241
|
import datetime
import json
import re
import time
import os.path
import tempfile
from typing import List, Optional
from urllib.parse import urlencode
import requests_cache
from dateutil import parser as date_parser
from pyquery import PyQuery
from requests_cache.core import remove_expired_responses
CACHE_FILE = os.path.join(tempfile.gettempdir(), 'metallum_cache')
requests_cache.install_cache(cache_name=CACHE_FILE, expire_after=300)
remove_expired_responses()
BASE_URL = 'https://www.metal-archives.com'
USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36'
BR = '<br/>'
CR = ' '
REQUEST_TIMEOUT = 1.0
UTC_OFFSET = 4
def map_params(params, m):
res = {}
for k, v in params.items():
if v is not None:
res[m.get(k, k)] = v
return res
def band_for_id(id: str) -> 'Band':
return Band('bands/_/{0}'.format(id))
def band_search(name, strict=True, genre=None, countries=[], year_created_from=None,
year_created_to=None, status=[], themes=None, location=None, label=None,
page_start=0) -> 'Search':
params = locals()
params['strict'] = str(int(params['strict']))
params = map_params(params, {
'name': 'bandName',
'strict': 'exactBandMatch',
'countries': 'country[]',
'year_created_from': 'yearCreationFrom',
'year_created_to': 'yearCreationTo',
'status': 'status[]',
'label': 'bandLabelName',
'page_start': 'iDisplayStart'
})
url = 'search/ajax-advanced/searching/bands/?' + urlencode(params, True)
return Search(url, BandResult)
def album_for_id(id: str) -> 'AlbumWrapper':
return AlbumWrapper(url='albums/_/_/{0}'.format(id))
def album_search(title, strict=True, band=None, band_strict=True, year_from=None,
year_to=None, month_from=None, month_to=None, countries=[], location=None, label=None,
indie_label=False, genre=None, types=[], page_start=0) -> 'Search':
params = locals()
params['strict'] = str(int(params['strict']))
params['band_strict'] = str(int(params['band_strict']))
params['indie_label'] = str(int(params['indie_label']))
if year_from and not month_from:
params['month_from'] = '1'
if year_to and not month_to:
params['month_to'] = '12'
params = map_params(params, {
'title': 'releaseTitle',
'strict': 'exactReleaseMatch',
'band': 'bandName',
'band_strict': 'exactBandMatch',
'year_from': 'releaseYearFrom',
'year_to': 'releaseYearTo',
'month_from': 'releaseMonthFrom',
'month_to': 'releaseMonthTo',
'countries': 'country[]',
'label': 'releaseLabelName',
'indie_label': 'indieLabel',
'types': 'releaseType[]',
'page_start': 'iDisplayStart'
})
url = 'search/ajax-advanced/searching/albums/?' + urlencode(params, True)
return Search(url, AlbumResult)
def lyrics_for_id(id: int) -> 'Lyrics':
return Lyrics(id)
def split_genres(s: str) -> List[str]:
return re.split(r'(?:,|;)\s*(?![^()]*\))', s)
class AlbumTypes(object):
FULL_LENGTH = 'Full-length'
EP = 'EP'
SINGLE = 'Single'
DEMO = 'Demo'
VIDEO = 'Video/VHS'
COMPILATION = 'Compilation'
DVD = 'DVD'
LIVE = 'Live album'
SPLIT = 'Split'
def make_absolute(endpoint: str) -> str:
return '{0}/{1}'.format(BASE_URL, endpoint)
def offset_time(t: datetime.datetime) -> datetime.datetime:
td = datetime.timedelta(hours=UTC_OFFSET)
return t + td
def parse_duration(s: str) -> int:
parts = s.split(':')
seconds = int(parts[-1])
if len(parts) > 1:
seconds += int(parts[-2]) * 60
if len(parts) == 3:
seconds += int(parts[0]) * 3600
return seconds
class Metallum(object):
def __init__(self, url):
self._session = requests_cache.CachedSession(cache_name=CACHE_FILE)
self._session.hooks = {'response': self._make_throttle_hook()}
self._session.headers = {
'User-Agent': USER_AGENT,
'Accept-Encoding': 'gzip'
}
self._content = self._fetch_page_content(url)
self._page = PyQuery(self._content)
def _make_throttle_hook(self):
def hook(response, *args, **kwargs):
is_cached = getattr(response, 'from_cache', False)
if not is_cached:
time.sleep(REQUEST_TIMEOUT)
return response
return hook
def _fetch_page_content(self, url) -> str:
res = self._session.get(make_absolute(url))
return res.text
class MetallumEntity(Metallum):
|
MIT License
|
yschoi-nisp/ai-grand-challenge-2020
|
pytorch_pretrained_bert/modeling.py
|
PreTrainedBertModel.from_pretrained
|
python
|
def from_pretrained(cls, pretrained_model_name, state_dict=None, cache_dir=None, *inputs, **kwargs):
if pretrained_model_name in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name]
else:
archive_file = pretrained_model_name
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except FileNotFoundError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
archive_file))
return None
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file):
serialization_dir = resolved_archive_file
else:
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
config_file = os.path.join(serialization_dir, CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
model = cls(config, *inputs, **kwargs)
if state_dict is None:
weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
state_dict = torch.load(weights_path)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if tempdir:
shutil.rmtree(tempdir)
return model
|
Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
|
https://github.com/yschoi-nisp/ai-grand-challenge-2020/blob/5059124ba198b656878ad35e97bad6b8e8106698/pytorch_pretrained_bert/modeling.py#L452-L557
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import numpy as np
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from .file_utils import cached_path
from torch.autograd import Variable
import torch.nn.functional as F
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
}
CONFIG_NAME = 'bert_config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
def gelu(x):
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(object):
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] if isinstance(config.hidden_act, str) else config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class PreTrainedBertModel(nn.Module):
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedBertModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
|
MIT License
|
aws-samples/aws-textract-comprehend-lex-chatbot
|
src/lambda/lex-manager.py
|
handler
|
python
|
def handler(event, context):
import cfnresponse
logger.info('event: {}'.format(cfnresponse.json_dump_format(event)))
request_type = event.get('RequestType')
resource_properties = event.get('ResourceProperties')
bot_name= resource_properties.get('BotName')
response_status = cfnresponse.SUCCESS
response = {}
response_id = event.get('RequestId')
reason = request_type
error = ''
should_delete = resource_properties.get('ShouldDelete', True)
if (request_type in ['Create', 'Update']):
try:
print("here2")
response['status']=create_bot()
if response['status'] =="SUCCESS":
print("Job succeded\n")
response_status = cfnresponse.SUCCESS
else:
response_status = cfnresponse.FAILED
print("Job Failed\n")
except Exception as e:
error = 'failed to {} bot: {}'.format(request_type, e)
pass
if (request_type == 'Delete' and should_delete != 'false'):
try:
response['status']=delete_bot(bot_name)
if response['status'] =="SUCCESS":
print("Job succeded\n")
response_status = cfnresponse.SUCCESS
else:
response_status = cfnresponse.FAILED
print("Delete Failed\n")
except Exception as e:
error = 'failed to delete bot: {}'.format(e)
pass
if error:
logger.error(error)
response_status = cfnresponse.FAILED
reason = error
if bool(context):
cfnresponse.send(
event,
context,
response_status,
response,
response_id,
reason
)
|
CloudFormation Custom Resource Lambda Handler
|
https://github.com/aws-samples/aws-textract-comprehend-lex-chatbot/blob/7c0038b3d90c3f72a11f967dcf111e2cd3b47af5/src/lambda/lex-manager.py#L70-L127
|
import logging
import json
import boto3
import time
lexclient = boto3.client('lex-models')
DEFAULT_LOGGING_LEVEL = logging.INFO
logging.basicConfig(
format='[%(levelname)s] %(message)s',
level=DEFAULT_LOGGING_LEVEL
)
logger = logging.getLogger(__name__)
logger.setLevel(DEFAULT_LOGGING_LEVEL)
BOT_DEFINITION_FILENAME = 'lambda/InvoiceBot.zip'
BOT_EXPORT_FILENAME = 'bot-definition-export.json'
def create_bot():
with open(BOT_DEFINITION_FILENAME, 'rb') as file_data:
bytes_content = file_data.read()
response = lexclient.start_import(
payload=bytes_content,
resourceType='BOT',
mergeStrategy='OVERWRITE_LATEST')
print("Import id is"+response['importId'])
import_status = lexclient.get_import(
importId=response['importId'])
while import_status['importStatus'] =='IN_PROGRESS':
import_status = lexclient.get_import(importId=response['importId'])
print("Bot creation is in progress")
if import_status['importStatus'] == 'COMPLETE':
return "SUCCESS"
else:
return "FAILURE"
def delete_bot(bot_name=None):
bot_aliases = lexclient.get_bot_aliases(botName=bot_name)['BotAliases']
for alias in bot_aliases:
print("Deleting Alias"+alias)
response = lexclient.delete_bot_alias(name=alias,botName=bot_name)
time.sleep(5)
response = lexclient.delete_bot(name=bot_name)
return "SUCCESS"
|
Apache License 2.0
|
loop3d/loopstructural
|
LoopStructural/visualisation/lavavu.py
|
LavaVuModelViewer.rotation
|
python
|
def rotation(self,xyz):
self.lv.rotation(xyz)
|
Set the rotation of the viewer
Parameters
----------
xyz : list like
x y z rotations
|
https://github.com/loop3d/loopstructural/blob/7ab33fd63742f4350bf729537bd5b423d6f84274/LoopStructural/visualisation/lavavu.py#L334-L342
|
from .model_plotter import BaseModelPlotter
from LoopStructural.utils import getLogger
from LoopStructural.utils import LoopImportError
from LoopStructural.modelling.features import GeologicalFeature
logger = getLogger(__name__)
import numpy as np
try:
import lavavu
from lavavu.vutils import is_notebook
except ImportError:
raise LoopImportError('lavavu',additional_information="Please install lavavu: pip install lavavu")
_OPEN_VIEWERS = {}
def close_all():
_OPEN_VIEWERS.clear()
return True
class LavaVuModelViewer(BaseModelPlotter):
def __init__(self,model=None, bounding_box=None, nsteps=None, **kwargs):
if lavavu is None:
logger.error("Lavavu isn't installed: pip install lavavu")
return
self._id_name = "{}-{}".format(str(hex(id(self))), len(_OPEN_VIEWERS))
_OPEN_VIEWERS[self._id_name] = self
self.lv = lavavu.Viewer(**kwargs)
self.lv['orthographic'] = True
self.objects = {}
super().__init__(model)
self.bounding_box = bounding_box
self.nsteps = nsteps
if model is not None:
self.bounding_box = model.bounding_box
self.nsteps = model.nsteps
logger.debug("Using bounding box from model")
if self.bounding_box is None or self.nsteps is None:
logger.error("Plot area has not been defined.")
self.bounding_box = np.array(self.bounding_box)
def _parse_kwargs(self,kwargs):
return {k:v for k,v in kwargs.items() if v is not None}
def _add_surface(self,
vertices,
faces,
name,
colour='red',
paint_with=None,
paint_with_value=None,
**kwargs
):
kwargs = self._parse_kwargs(kwargs)
surf = self.lv.triangles(name)
surf.vertices(vertices)
surf.indices(faces)
if paint_with is None:
surf.colours(colour)
surf["opacity"] = kwargs.get('opacity',1)
if paint_with_value is not None:
paint_with = paint_with_value
if paint_with is not None:
surfaceval = np.zeros(vertices.shape[0])
if isinstance(paint_with,GeologicalFeature):
surfaceval[:] = paint_with.evaluate_value(self.model.scale(vertices,inplace=False))
surf.values(surfaceval, 'paint_with')
if callable(paint_with):
surfaceval[:] = paint_with(self.model.scale(vertices))
surf.values(surfaceval, 'paint_with')
if isinstance(paint_with,(float,int)):
surfaceval[:] = paint_with
surf.values(surfaceval, 'paint_with')
surf["colourby"] = 'paint_with'
cmap = kwargs.get('cmap', self.default_cmap)
vmin = kwargs.get('vmin', np.nanmin(surfaceval))
vmax = kwargs.get('vmax', np.nanmax(surfaceval))
surf.colourmap(cmap, range=(vmin, vmax))
def _add_points(self, points, name, value= None, **kwargs):
kwargs = self._parse_kwargs(kwargs)
if points.shape[0] < 1:
raise ValueError("Points array must have at least one element")
if name is None:
name = 'Unnamed points'
p = self.lv.points(name, **kwargs)
p.vertices(points)
if value is not None:
p.values(value,'v')
p['colourby'] = "v"
vmin = kwargs.get('vmin',np.nanmin(value))
vmax = kwargs.get('vmax',np.nanmax(value))
logger.info('vmin {} and vmax {}'.format(vmin,vmax))
cmap = kwargs.get('cmap', self.default_cmap)
p.colourmap(cmap, range=(vmin, vmax))
def _add_vector_marker(self, location, vector, name, symbol_type='arrow',**kwargs):
kwargs = self._parse_kwargs(kwargs)
if location.shape[0] != vector.shape[0]:
raise ValueError("Location and vector arrays must be the same length")
if location.shape[0] < 1:
raise ValueError("Location array must have at least one element")
if name is None:
name = 'Unnamed points'
if symbol_type == 'arrow':
vectorfield = self.lv.vectors(name, **kwargs)
vectorfield.vertices(location)
vectorfield.vectors(vector)
elif symbol_type == 'disk':
scaleshapes = kwargs.get('scaleshapes',np.max(self.model.maximum-self.model.origin)*0.014)
vector /= np.linalg.norm(vector, axis=1)[:, None]
vectorfield = self.lv.shapes(name, scaleshapes=scaleshapes,shapelength=0,**kwargs)
vectorfield.vertices(location)
vectorfield.vectors(vector)
def interactive(self, popout=False):
if is_notebook() and popout is False:
self.lv.control.Panel()
self.lv.control.ObjectList()
self.lv.control.show()
if not is_notebook() or popout:
self.lv.control.Panel()
self.lv.control.ObjectList()
self.lv.interactive()
def set_zscale(self,zscale):
self.lv.modelscale([1,1,zscale])
def set_viewer_rotation(self, rotation):
self.lv.rotate(rotation)
def save(self, fname, **kwargs):
self.lv.image(fname, **kwargs)
def export_to_webgl(self,fname, **kwargs ):
self.lv.webgl(fname,**kwargs)
def display(self, fname=None, **kwargs):
if fname:
self.lv.image(fname, **kwargs)
self.lv.display()
def image(self, name, **kwargs):
self.lv.image(name)
def image_array(self, **kwargs):
return self.lv.rawimage(**kwargs).data
def rotatex(self, r):
self.lv.rotatex(r)
def rotatey(self, r):
self.lv.rotatey(r)
def rotatez(self, r):
self.lv.rotatez(r)
def rotate(self, r):
self.lv.rotate(r)
@property
def rotation(self):
return self.lv['xyzrotate']
@rotation.setter
|
MIT License
|
globocom/globonetworkapi-client-python
|
networkapiclient/Vlan.py
|
Vlan.deallocate
|
python
|
def deallocate(self, id_vlan):
if not is_valid_int_param(id_vlan):
raise InvalidParameterError(
u'The identifier of Vlan is invalid or was not informed.')
url = 'vlan/' + str(id_vlan) + '/deallocate/'
code, xml = self.submit(None, 'DELETE', url)
return self.response(code, xml)
|
Deallocate all relationships between Vlan.
:param id_vlan: Identifier of the VLAN. Integer value and greater than zero.
:return: None
:raise InvalidParameterError: VLAN identifier is null and invalid.
:raise VlanError: VLAN is active.
:raise VlanNaoExisteError: VLAN not found.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
|
https://github.com/globocom/globonetworkapi-client-python/blob/08dc24c54ee3cd6cdcca1fb33fb4796db8118e6f/networkapiclient/Vlan.py#L998-L1020
|
from networkapiclient.Config import IP_VERSION
from networkapiclient.exception import InvalidParameterError
from networkapiclient.GenericClient import GenericClient
from networkapiclient.Pagination import Pagination
from networkapiclient.utils import get_list_map
from networkapiclient.utils import is_valid_int_param
class Vlan(GenericClient):
def __init__(self, networkapi_url, user, password, user_ldap=None):
super(Vlan, self).__init__(networkapi_url, user, password, user_ldap)
def invalidate(self, id_vlan):
if not is_valid_int_param(id_vlan):
raise InvalidParameterError(
u'The identifier of Vlan is invalid or was not informed.')
url = 'vlan/%s/invalidate/%s/' % (str(id_vlan), IP_VERSION.IPv4[0])
code, xml = self.submit(None, 'PUT', url)
return self.response(code, xml)
def invalidate_ipv6(self, id_vlan):
if not is_valid_int_param(id_vlan):
raise InvalidParameterError(
u'The identifier of Vlan is invalid or was not informed.')
url = 'vlan/%s/invalidate/%s/' % (str(id_vlan), IP_VERSION.IPv6[0])
code, xml = self.submit(None, 'PUT', url)
return self.response(code, xml)
def find_vlans(
self,
number,
name,
iexact,
environment,
net_type,
network,
ip_version,
subnet,
acl,
pagination):
if not isinstance(pagination, Pagination):
raise InvalidParameterError(
u"Invalid parameter: pagination must be a class of type 'Pagination'.")
vlan_map = dict()
vlan_map['start_record'] = pagination.start_record
vlan_map['end_record'] = pagination.end_record
vlan_map['asorting_cols'] = pagination.asorting_cols
vlan_map['searchable_columns'] = pagination.searchable_columns
vlan_map['custom_search'] = pagination.custom_search
vlan_map['numero'] = number
vlan_map['nome'] = name
vlan_map['exato'] = iexact
vlan_map['ambiente'] = environment
vlan_map['tipo_rede'] = net_type
vlan_map['rede'] = network
vlan_map['versao'] = ip_version
vlan_map['subrede'] = subnet
vlan_map['acl'] = acl
url = 'vlan/find/'
code, xml = self.submit({'vlan': vlan_map}, 'POST', url)
key = 'vlan'
return get_list_map(
self.response(
code, xml, [
key, 'redeipv4', 'redeipv6', 'equipamentos']), key)
def list_all(self):
url = 'vlan/all/'
code, xml = self.submit(None, 'GET', url)
key = 'vlan'
return get_list_map(self.response(code, xml, [key]), key)
def listar_por_ambiente(self, id_ambiente):
if not is_valid_int_param(id_ambiente):
raise InvalidParameterError(u'Environment id is none or invalid.')
url = 'vlan/ambiente/' + str(id_ambiente) + '/'
code, xml = self.submit(None, 'GET', url)
key = 'vlan'
return get_list_map(self.response(code, xml, [key]), key)
def alocar(
self,
nome,
id_tipo_rede,
id_ambiente,
descricao,
id_ambiente_vip=None,
vrf=None):
vlan_map = dict()
vlan_map['nome'] = nome
vlan_map['id_tipo_rede'] = id_tipo_rede
vlan_map['id_ambiente'] = id_ambiente
vlan_map['descricao'] = descricao
vlan_map['id_ambiente_vip'] = id_ambiente_vip
vlan_map['vrf'] = vrf
code, xml = self.submit({'vlan': vlan_map}, 'POST', 'vlan/')
return self.response(code, xml)
def insert_vlan(
self,
environment_id,
name,
number,
description,
acl_file,
acl_file_v6,
network_ipv4,
network_ipv6,
vrf=None):
if not is_valid_int_param(environment_id):
raise InvalidParameterError(u'Environment id is none or invalid.')
if not is_valid_int_param(number):
raise InvalidParameterError(u'Vlan number is none or invalid')
vlan_map = dict()
vlan_map['environment_id'] = environment_id
vlan_map['name'] = name
vlan_map['description'] = description
vlan_map['acl_file'] = acl_file
vlan_map['acl_file_v6'] = acl_file_v6
vlan_map['number'] = number
vlan_map['network_ipv4'] = network_ipv4
vlan_map['network_ipv6'] = network_ipv6
vlan_map['vrf'] = vrf
code, xml = self.submit({'vlan': vlan_map}, 'POST', 'vlan/insert/')
return self.response(code, xml)
def edit_vlan(
self,
environment_id,
name,
number,
description,
acl_file,
acl_file_v6,
id_vlan):
if not is_valid_int_param(id_vlan):
raise InvalidParameterError(
u'Vlan id is invalid or was not informed.')
if not is_valid_int_param(environment_id):
raise InvalidParameterError(u'Environment id is none or invalid.')
if not is_valid_int_param(number):
raise InvalidParameterError(u'Vlan number is none or invalid')
vlan_map = dict()
vlan_map['vlan_id'] = id_vlan
vlan_map['environment_id'] = environment_id
vlan_map['name'] = name
vlan_map['description'] = description
vlan_map['acl_file'] = acl_file
vlan_map['acl_file_v6'] = acl_file_v6
vlan_map['number'] = number
code, xml = self.submit({'vlan': vlan_map}, 'POST', 'vlan/edit/')
return self.response(code, xml)
def create_vlan(self, id_vlan):
vlan_map = dict()
vlan_map['vlan_id'] = id_vlan
code, xml = self.submit({'vlan': vlan_map}, 'PUT', 'vlan/create/')
return self.response(code, xml)
def allocate_without_network(self, environment_id, name, description, vrf=None):
vlan_map = dict()
vlan_map['environment_id'] = environment_id
vlan_map['name'] = name
vlan_map['description'] = description
vlan_map['vrf'] = vrf
code, xml = self.submit({'vlan': vlan_map}, 'POST', 'vlan/no-network/')
return self.response(code, xml)
def adicionar_permissao(self, id_vlan, nome_equipamento, nome_interface):
if not is_valid_int_param(id_vlan):
raise InvalidParameterError(
u'Vlan id is invalid or was not informed.')
url = 'vlan/' + str(id_vlan) + '/add/'
vlan_map = dict()
vlan_map['nome'] = nome_equipamento
vlan_map['nome_interface'] = nome_interface
code, xml = self.submit({'equipamento': vlan_map}, 'PUT', url)
return self.response(code, xml)
def remover_permissao(self, id_vlan, nome_equipamento, nome_interface):
if not is_valid_int_param(id_vlan):
raise InvalidParameterError(
u'Vlan id is invalid or was not informed.')
url = 'vlan/' + str(id_vlan) + '/del/'
vlan_map = dict()
vlan_map['nome'] = nome_equipamento
vlan_map['nome_interface'] = nome_interface
code, xml = self.submit({'equipamento': vlan_map}, 'PUT', url)
return self.response(code, xml)
def verificar_permissao(self, id_vlan, nome_equipamento, nome_interface):
if not is_valid_int_param(id_vlan):
raise InvalidParameterError(
u'Vlan id is invalid or was not informed.')
url = 'vlan/' + str(id_vlan) + '/check/'
vlan_map = dict()
vlan_map['nome'] = nome_equipamento
vlan_map['nome_interface'] = nome_interface
code, xml = self.submit({'equipamento': vlan_map}, 'PUT', url)
return self.response(code, xml)
def buscar(self, id_vlan):
if not is_valid_int_param(id_vlan):
raise InvalidParameterError(
u'Vlan id is invalid or was not informed.')
url = 'vlan/' + str(id_vlan) + '/'
code, xml = self.submit(None, 'GET', url)
return self.response(code, xml)
def get(self, id_vlan):
if not is_valid_int_param(id_vlan):
raise InvalidParameterError(
u'Parameter id_vlan is invalid. Value: ' +
id_vlan)
url = 'vlan/' + str(id_vlan) + '/network/'
code, xml = self.submit(None, 'GET', url)
return get_list_map(
self.response(
code, xml, [
'redeipv4', 'redeipv6']), 'vlan')
def listar_permissao(self, nome_equipamento, nome_interface):
vlan_map = dict()
vlan_map['nome'] = nome_equipamento
vlan_map['nome_interface'] = nome_interface
code, xml = self.submit({'equipamento': vlan_map}, 'PUT', 'vlan/list/')
return self.response(code, xml)
def criar(self, id_vlan):
if not is_valid_int_param(id_vlan):
raise InvalidParameterError(
u'Vlan id is invalid or was not informed.')
url = 'vlan/' + str(id_vlan) + '/criar/'
code, xml = self.submit({'vlan': None}, 'PUT', url)
return self.response(code, xml)
def create_ipv4(self, id_network_ipv4):
url = 'vlan/v4/create/'
vlan_map = dict()
vlan_map['id_network_ip'] = id_network_ipv4
code, xml = self.submit({'vlan': vlan_map}, 'POST', url)
return self.response(code, xml)
def create_ipv6(self, id_network_ipv6):
url = 'vlan/v6/create/'
vlan_map = dict()
vlan_map['id_network_ip'] = id_network_ipv6
code, xml = self.submit({'vlan': vlan_map}, 'POST', url)
return self.response(code, xml)
def apply_acl(self, equipments, vlan, environment, network):
vlan_map = dict()
vlan_map['equipments'] = equipments
vlan_map['vlan'] = vlan
vlan_map['environment'] = environment
vlan_map['network'] = network
url = 'vlan/apply/acl/'
code, xml = self.submit({'vlan': vlan_map}, 'POST', url)
return self.response(code, xml)
def confirm_vlan(self, number_net, id_environment_vlan, ip_version=None):
url = 'vlan/confirm/' + str(number_net) + '/' + id_environment_vlan + '/' + str(ip_version)
code, xml = self.submit(None, 'GET', url)
return self.response(code, xml)
def check_number_available(self, id_environment, num_vlan, id_vlan):
url = 'vlan/check_number_available/' + str(id_environment) + '/' + str(num_vlan) + '/' + str(id_vlan)
code, xml = self.submit(None, 'GET', url)
return self.response(code, xml)
def validar(self, id_vlan):
if not is_valid_int_param(id_vlan):
raise InvalidParameterError(
u'The identifier of Vlan is invalid or was not informed.')
url = 'vlan/' + str(id_vlan) + '/validate/' + IP_VERSION.IPv4[0] + '/'
code, xml = self.submit(None, 'PUT', url)
return self.response(code, xml)
def validate_ipv6(self, id_vlan):
if not is_valid_int_param(id_vlan):
raise InvalidParameterError(
u'The identifier of Vlan is invalid or was not informed.')
url = 'vlan/' + str(id_vlan) + '/validate/' + IP_VERSION.IPv6[0] + '/'
code, xml = self.submit(None, 'PUT', url)
return self.response(code, xml)
def remove(self, id_vlan):
if not is_valid_int_param(id_vlan):
raise InvalidParameterError(
u'Parameter id_vlan is invalid. Value: ' +
id_vlan)
url = 'vlan/' + str(id_vlan) + '/remove/'
code, xml = self.submit(None, 'DELETE', url)
return self.response(code, xml)
|
Apache License 2.0
|
mendeley/mendeley-python-sdk
|
mendeley/resources/groups.py
|
Groups.get
|
python
|
def get(self, id):
return super(Groups, self).get(id)
|
Retrieves a group by ID.
:param id: the ID of the group to get.
:return: a :class:`Group <mendeley.models.groups.Group>`.
|
https://github.com/mendeley/mendeley-python-sdk/blob/3794a1024828c4a592ed10bf78265bafde9c35b9/mendeley/resources/groups.py#L15-L22
|
from mendeley.models.groups import Group, GroupMember
from mendeley.resources.base import ListResource, GetByIdResource
class Groups(GetByIdResource, ListResource):
_url = '/groups'
def __init__(self, session):
self.session = session
|
Apache License 2.0
|
pypa/pipenv
|
pipenv/vendor/pexpect/spawnbase.py
|
SpawnBase.__iter__
|
python
|
def __iter__(self):
return iter(self.readline, self.string_type())
|
This is to support iterators over a file-like object.
|
https://github.com/pypa/pipenv/blob/9378cb515189d11841a4de49a5ac3c01fca509ec/pipenv/vendor/pexpect/spawnbase.py#L483-L486
|
from io import StringIO, BytesIO
import codecs
import os
import sys
import re
import errno
from .exceptions import ExceptionPexpect, EOF, TIMEOUT
from .expect import Expecter, searcher_string, searcher_re
PY3 = (sys.version_info[0] >= 3)
text_type = str if PY3 else unicode
class _NullCoder(object):
@staticmethod
def encode(b, final=False):
return b
@staticmethod
def decode(b, final=False):
return b
class SpawnBase(object):
encoding = None
pid = None
flag_eof = False
def __init__(self, timeout=30, maxread=2000, searchwindowsize=None,
logfile=None, encoding=None, codec_errors='strict'):
self.stdin = sys.stdin
self.stdout = sys.stdout
self.stderr = sys.stderr
self.searcher = None
self.ignorecase = False
self.before = None
self.after = None
self.match = None
self.match_index = None
self.terminated = True
self.exitstatus = None
self.signalstatus = None
self.status = None
self.child_fd = -1
self.timeout = timeout
self.delimiter = EOF
self.logfile = logfile
self.logfile_read = None
self.logfile_send = None
self.maxread = maxread
self.searchwindowsize = searchwindowsize
self.delaybeforesend = 0.05
self.delayafterclose = 0.1
self.delayafterterminate = 0.1
self.delayafterread = 0.0001
self.softspace = False
self.name = '<' + repr(self) + '>'
self.closed = True
self.encoding = encoding
self.codec_errors = codec_errors
if encoding is None:
self._encoder = self._decoder = _NullCoder()
self.string_type = bytes
self.buffer_type = BytesIO
self.crlf = b'\r\n'
if PY3:
self.allowed_string_types = (bytes, str)
self.linesep = os.linesep.encode('ascii')
def write_to_stdout(b):
try:
return sys.stdout.buffer.write(b)
except AttributeError:
return sys.stdout.write(b.decode('ascii', 'replace'))
self.write_to_stdout = write_to_stdout
else:
self.allowed_string_types = (basestring,)
self.linesep = os.linesep
self.write_to_stdout = sys.stdout.write
else:
self._encoder = codecs.getincrementalencoder(encoding)(codec_errors)
self._decoder = codecs.getincrementaldecoder(encoding)(codec_errors)
self.string_type = text_type
self.buffer_type = StringIO
self.crlf = u'\r\n'
self.allowed_string_types = (text_type, )
if PY3:
self.linesep = os.linesep
else:
self.linesep = os.linesep.decode('ascii')
self.write_to_stdout = sys.stdout.write
self.async_pw_transport = None
self._buffer = self.buffer_type()
self._before = self.buffer_type()
def _log(self, s, direction):
if self.logfile is not None:
self.logfile.write(s)
self.logfile.flush()
second_log = self.logfile_send if (direction=='send') else self.logfile_read
if second_log is not None:
second_log.write(s)
second_log.flush()
def _coerce_expect_string(self, s):
if self.encoding is None and not isinstance(s, bytes):
return s.encode('ascii')
return s
def _coerce_send_string(self, s):
if self.encoding is None and not isinstance(s, bytes):
return s.encode('utf-8')
return s
def _get_buffer(self):
return self._buffer.getvalue()
def _set_buffer(self, value):
self._buffer = self.buffer_type()
self._buffer.write(value)
buffer = property(_get_buffer, _set_buffer)
def read_nonblocking(self, size=1, timeout=None):
try:
s = os.read(self.child_fd, size)
except OSError as err:
if err.args[0] == errno.EIO:
self.flag_eof = True
raise EOF('End Of File (EOF). Exception style platform.')
raise
if s == b'':
self.flag_eof = True
raise EOF('End Of File (EOF). Empty string style platform.')
s = self._decoder.decode(s, final=False)
self._log(s, 'read')
return s
def _pattern_type_err(self, pattern):
raise TypeError('got {badtype} ({badobj!r}) as pattern, must be one'
' of: {goodtypes}, pexpect.EOF, pexpect.TIMEOUT' .format(badtype=type(pattern),
badobj=pattern,
goodtypes=', '.join([str(ast) for ast in self.allowed_string_types])
)
)
def compile_pattern_list(self, patterns):
if patterns is None:
return []
if not isinstance(patterns, list):
patterns = [patterns]
compile_flags = re.DOTALL
if self.ignorecase:
compile_flags = compile_flags | re.IGNORECASE
compiled_pattern_list = []
for idx, p in enumerate(patterns):
if isinstance(p, self.allowed_string_types):
p = self._coerce_expect_string(p)
compiled_pattern_list.append(re.compile(p, compile_flags))
elif p is EOF:
compiled_pattern_list.append(EOF)
elif p is TIMEOUT:
compiled_pattern_list.append(TIMEOUT)
elif isinstance(p, type(re.compile(''))):
compiled_pattern_list.append(p)
else:
self._pattern_type_err(p)
return compiled_pattern_list
def expect(self, pattern, timeout=-1, searchwindowsize=-1, async_=False, **kw):
if 'async' in kw:
async_ = kw.pop('async')
if kw:
raise TypeError("Unknown keyword arguments: {}".format(kw))
compiled_pattern_list = self.compile_pattern_list(pattern)
return self.expect_list(compiled_pattern_list,
timeout, searchwindowsize, async_)
def expect_list(self, pattern_list, timeout=-1, searchwindowsize=-1,
async_=False, **kw):
if timeout == -1:
timeout = self.timeout
if 'async' in kw:
async_ = kw.pop('async')
if kw:
raise TypeError("Unknown keyword arguments: {}".format(kw))
exp = Expecter(self, searcher_re(pattern_list), searchwindowsize)
if async_:
from ._async import expect_async
return expect_async(exp, timeout)
else:
return exp.expect_loop(timeout)
def expect_exact(self, pattern_list, timeout=-1, searchwindowsize=-1,
async_=False, **kw):
if timeout == -1:
timeout = self.timeout
if 'async' in kw:
async_ = kw.pop('async')
if kw:
raise TypeError("Unknown keyword arguments: {}".format(kw))
if (isinstance(pattern_list, self.allowed_string_types) or
pattern_list in (TIMEOUT, EOF)):
pattern_list = [pattern_list]
def prepare_pattern(pattern):
if pattern in (TIMEOUT, EOF):
return pattern
if isinstance(pattern, self.allowed_string_types):
return self._coerce_expect_string(pattern)
self._pattern_type_err(pattern)
try:
pattern_list = iter(pattern_list)
except TypeError:
self._pattern_type_err(pattern_list)
pattern_list = [prepare_pattern(p) for p in pattern_list]
exp = Expecter(self, searcher_string(pattern_list), searchwindowsize)
if async_:
from ._async import expect_async
return expect_async(exp, timeout)
else:
return exp.expect_loop(timeout)
def expect_loop(self, searcher, timeout=-1, searchwindowsize=-1):
exp = Expecter(self, searcher, searchwindowsize)
return exp.expect_loop(timeout)
def read(self, size=-1):
if size == 0:
return self.string_type()
if size < 0:
self.expect(self.delimiter)
return self.before
cre = re.compile(self._coerce_expect_string('.{%d}' % size), re.DOTALL)
index = self.expect([cre, self.delimiter])
if index == 0:
return self.after
return self.before
def readline(self, size=-1):
if size == 0:
return self.string_type()
index = self.expect([self.crlf, self.delimiter])
if index == 0:
return self.before + self.crlf
else:
return self.before
|
MIT License
|
gkno/gkno_launcher
|
src/networkx/classes/function.py
|
set_node_attributes
|
python
|
def set_node_attributes(G,name,attributes):
for node,value in attributes.items():
G.node[node][name]=value
|
Set node attributes from dictionary of nodes and values
Parameters
----------
G : NetworkX Graph
name : string
Attribute name
attributes: dict
Dictionary of attributes keyed by node.
Examples
--------
>>> G=nx.path_graph(3)
>>> bb=nx.betweenness_centrality(G)
>>> nx.set_node_attributes(G,'betweenness',bb)
>>> G.node[1]['betweenness']
1.0
|
https://github.com/gkno/gkno_launcher/blob/4210ede8448155d70bfbdbd658125a1d95ea8e95/src/networkx/classes/function.py#L282-L304
|
import networkx as nx
import itertools
__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult(dschult@colgate.edu)'])
__all__ = ['nodes', 'edges', 'degree', 'degree_histogram', 'neighbors',
'number_of_nodes', 'number_of_edges', 'density',
'nodes_iter', 'edges_iter', 'is_directed','info',
'freeze','is_frozen','subgraph','create_empty_copy',
'set_node_attributes','get_node_attributes',
'set_edge_attributes','get_edge_attributes',
'all_neighbors','non_neighbors']
def nodes(G):
return G.nodes()
def nodes_iter(G):
return G.nodes_iter()
def edges(G,nbunch=None):
return G.edges(nbunch)
def edges_iter(G,nbunch=None):
return G.edges_iter(nbunch)
def degree(G,nbunch=None,weight=None):
return G.degree(nbunch,weight)
def neighbors(G,n):
return G.neighbors(n)
def number_of_nodes(G):
return G.number_of_nodes()
def number_of_edges(G):
return G.number_of_edges()
def density(G):
n=number_of_nodes(G)
m=number_of_edges(G)
if m==0 or n <= 1:
d=0.0
else:
if G.is_directed():
d=m/float(n*(n-1))
else:
d= m*2.0/float(n*(n-1))
return d
def degree_histogram(G):
degseq=list(G.degree().values())
dmax=max(degseq)+1
freq= [ 0 for d in range(dmax) ]
for d in degseq:
freq[d] += 1
return freq
def is_directed(G):
return G.is_directed()
def freeze(G):
def frozen(*args):
raise nx.NetworkXError("Frozen graph can't be modified")
G.add_node=frozen
G.add_nodes_from=frozen
G.remove_node=frozen
G.remove_nodes_from=frozen
G.add_edge=frozen
G.add_edges_from=frozen
G.remove_edge=frozen
G.remove_edges_from=frozen
G.clear=frozen
G.frozen=True
return G
def is_frozen(G):
try:
return G.frozen
except AttributeError:
return False
def subgraph(G, nbunch):
return G.subgraph(nbunch)
def create_empty_copy(G,with_nodes=True):
H=G.__class__()
if with_nodes:
H.add_nodes_from(G)
return H
def info(G, n=None):
info=''
if n is None:
info+="Name: %s\n"%G.name
type_name = [type(G).__name__]
info+="Type: %s\n"%",".join(type_name)
info+="Number of nodes: %d\n"%G.number_of_nodes()
info+="Number of edges: %d\n"%G.number_of_edges()
nnodes=G.number_of_nodes()
if len(G) > 0:
if G.is_directed():
info+="Average in degree: %8.4f\n"% (sum(G.in_degree().values())/float(nnodes))
info+="Average out degree: %8.4f"% (sum(G.out_degree().values())/float(nnodes))
else:
s=sum(G.degree().values())
info+="Average degree: %8.4f"% (float(s)/float(nnodes))
else:
if n not in G:
raise nx.NetworkXError("node %s not in graph"%(n,))
info+="Node % s has the following properties:\n"%n
info+="Degree: %d\n"%G.degree(n)
info+="Neighbors: "
info+=' '.join(str(nbr) for nbr in G.neighbors(n))
return info
|
MIT License
|
4dnucleome/partseg
|
package/PartSegCore/analysis/calculation_plan.py
|
FileCalculation.uuid
|
python
|
def uuid(self):
return self.calculation.uuid
|
uuid of whole calculation
|
https://github.com/4dnucleome/partseg/blob/f6bb1bb02c006f2e009e873a0e3bad87469cc90e/package/PartSegCore/analysis/calculation_plan.py#L444-L446
|
import logging
import os
import sys
import textwrap
import typing
import uuid
from abc import abstractmethod
from copy import copy, deepcopy
from enum import Enum
from ..algorithm_describe_base import ROIExtractionProfile
from ..class_generator import BaseSerializableClass, enum_register
from ..mask_create import MaskProperty
from ..universal_const import Units
from . import analysis_algorithm_dict
from .measurement_calculation import MeasurementProfile
class MaskBase:
name: str
class RootType(Enum):
Image = 0
Project = 1
Mask_project = 2
def __str__(self):
return self.name.replace("_", " ")
enum_register.register_class(RootType)
class MaskCreate(MaskBase, BaseSerializableClass):
mask_property: MaskProperty
def __str__(self):
return f"Mask create: {self.name}\n" + str(self.mask_property).split("\n", 1)[1]
class MaskUse(MaskBase, BaseSerializableClass):
class MaskSum(MaskBase, BaseSerializableClass):
mask1: str
mask2: str
class MaskIntersection(MaskBase, BaseSerializableClass):
mask1: str
mask2: str
class Save(BaseSerializableClass):
suffix: str
directory: str
algorithm: str
short_name: str
values: dict
class MeasurementCalculate(BaseSerializableClass):
__old_names__ = "StatisticCalculate"
channel: int
units: Units
measurement_profile: MeasurementProfile
name_prefix: str
@typing.overload
def __init__(self, channel: int, units: Units, measurement_profile: MeasurementProfile, name_prefix: str):
...
@property
def name(self):
return self.measurement_profile.name
def __str__(self):
channel = "Like segmentation" if self.channel == -1 else str(self.channel)
desc = str(self.measurement_profile).split("\n", 1)[1]
return f"MeasurementCalculate \nChannel: {channel}\nUnits: {self.units}\n{desc}\n"
def get_save_path(op: Save, calculation: "FileCalculation") -> str:
from PartSegCore.analysis.save_functions import save_dict
extension = save_dict[op.algorithm].get_default_extension()
rel_path = os.path.relpath(calculation.file_path, calculation.base_prefix)
rel_path = os.path.splitext(rel_path)[0]
if op.directory:
file_name = os.path.basename(rel_path)
base_rel_path = os.path.dirname(rel_path)
return os.path.join(calculation.result_prefix, base_rel_path, op.directory, file_name + op.suffix + extension)
return os.path.join(calculation.result_prefix, rel_path + op.suffix + extension)
class MaskMapper:
name: str
@abstractmethod
def get_mask_path(self, file_path: str) -> str:
@abstractmethod
def get_parameters(self):
@staticmethod
def is_ready() -> bool:
return True
class MaskSuffix(MaskMapper, BaseSerializableClass):
suffix: str
@typing.overload
def __init__(self, name: str, suffix: str):
...
def get_mask_path(self, file_path: str) -> str:
base, ext = os.path.splitext(file_path)
return base + self.suffix + ext
def get_parameters(self):
return {"name": self.name, "suffix": self.suffix}
class MaskSub(MaskMapper, BaseSerializableClass):
base: str
rep: str
@typing.overload
def __init__(self, name: str, base: str, rep: str):
...
def get_mask_path(self, file_path: str) -> str:
dir_name, filename = os.path.split(file_path)
filename = filename.replace(self.base, self.rep)
return os.path.join(dir_name, filename)
def get_parameters(self):
return {"name": self.name, "base": self.base, "rep": self.rep}
class MaskFile(MaskMapper, BaseSerializableClass):
path_to_file: str
name_dict: typing.Optional[dict] = None
@typing.overload
def __init__(self, name: str, path_to_file: str, name_dict: typing.Optional[dict] = None):
...
def is_ready(self) -> bool:
return os.path.exists(self.path_to_file)
def get_mask_path(self, file_path: str) -> str:
if self.name_dict is None:
self.parse_map()
try:
return self.name_dict[os.path.normpath(file_path)]
except (KeyError, AttributeError):
return ""
def get_parameters(self):
return {"name": self.name, "path_to_file": self.path_to_file}
def set_map_path(self, value):
self.path_to_file = value
def parse_map(self, sep=";"):
if not os.path.exists(self.path_to_file):
logging.error(f"File does not exists: {self.path_to_file}")
raise ValueError(f"File for mapping mask does not exists: {self.path_to_file}")
with open(self.path_to_file) as map_file:
dir_name = os.path.dirname(self.path_to_file)
for i, line in enumerate(map_file):
try:
file_name, mask_name = line.split(sep)
except ValueError:
logging.error(f"Error in parsing map file\nline {i}\n{line}\nfrom file{self.path_to_file}")
continue
file_name = file_name.strip()
mask_name = mask_name.strip()
if not os.path.abspath(file_name):
file_name = os.path.normpath(os.path.join(dir_name, file_name))
if not os.path.abspath(mask_name):
mask_name = os.path.normpath(os.path.join(dir_name, mask_name))
self.name_dict[file_name] = mask_name
class Operations(Enum):
reset_to_base = 1
class PlanChanges(Enum):
add_node = 1
remove_node = 2
replace_node = 3
class CalculationTree:
def __init__(
self,
operation: typing.Union[BaseSerializableClass, ROIExtractionProfile, MeasurementCalculate, RootType],
children: typing.List["CalculationTree"],
):
if operation == "root":
operation = RootType.Image
self.operation = operation
self.children = children
def __str__(self):
return f"{self.operation}:\n[{'n'.join([str(x) for x in self.children])}]"
def __repr__(self):
return f"CalculationTree(operation={repr(self.operation)}, children={self.children})"
class NodeType(Enum):
segment = 1
mask = 2
measurement = 3
root = 4
save = 5
none = 6
file_mask = 7
class BaseCalculation:
def __init__(
self,
base_prefix: str,
result_prefix: str,
measurement_file_path: str,
sheet_name: str,
calculation_plan: "CalculationPlan",
voxel_size: typing.Sequence[float],
):
self.base_prefix = base_prefix
self.result_prefix = result_prefix
self.measurement_file_path = measurement_file_path
self.sheet_name = sheet_name
self.calculation_plan = calculation_plan
self.uuid = uuid.uuid4()
self.voxel_size = voxel_size
def __repr__(self):
return (
f"{self.__class__.__name__}(calculation_plan={self.calculation_plan}, voxel_size={self.voxel_size}, "
f"base_prefix={self.base_prefix}, result_prefix={self.base_prefix}, "
f"measurement_file_path{self.measurement_file_path}, sheet_name={self.sheet_name})"
)
class Calculation(BaseCalculation):
def __init__(
self, file_list, base_prefix, result_prefix, measurement_file_path, sheet_name, calculation_plan, voxel_size
):
super().__init__(base_prefix, result_prefix, measurement_file_path, sheet_name, calculation_plan, voxel_size)
self.file_list: typing.List[str] = file_list
def get_base_calculation(self) -> BaseCalculation:
base = BaseCalculation(
self.base_prefix,
self.result_prefix,
self.measurement_file_path,
self.sheet_name,
self.calculation_plan,
self.voxel_size,
)
base.uuid = self.uuid
return base
@property
def measurement(self):
return self.calculation_plan.get_measurements()
class FileCalculation:
def __init__(self, file_path: str, calculation: BaseCalculation):
self.file_path = file_path
self.calculation = calculation
@property
def base_prefix(self):
return self.calculation.base_prefix
@property
def result_prefix(self):
return self.calculation.result_prefix
@property
def calculation_plan(self):
return self.calculation.calculation_plan
@property
|
BSD 3-Clause New or Revised License
|
radlab/sparrow
|
deploy/third_party/boto-2.1.1/boto/ses/connection.py
|
SESConnection.send_email
|
python
|
def send_email(self, source, subject, body, to_addresses, cc_addresses=None,
bcc_addresses=None, format='text', reply_addresses=None,
return_path=None, text_body=None, html_body=None):
format = format.lower().strip()
if body is not None:
if format == "text":
if text_body is not None:
raise Warning("You've passed in both a body and a text_body; please choose one or the other.")
text_body = body
else:
if html_body is not None:
raise Warning("You've passed in both a body and an html_body; please choose one or the other.")
html_body = body
params = {
'Source': source,
'Message.Subject.Data': subject,
}
if return_path:
params['ReturnPath'] = return_path
if html_body is not None:
params['Message.Body.Html.Data'] = html_body
if text_body is not None:
params['Message.Body.Text.Data'] = text_body
if(format not in ("text","html")):
raise ValueError("'format' argument must be 'text' or 'html'")
if(not (html_body or text_body)):
raise ValueError("No text or html body found for mail")
self._build_list_params(params, to_addresses,
'Destination.ToAddresses.member')
if cc_addresses:
self._build_list_params(params, cc_addresses,
'Destination.CcAddresses.member')
if bcc_addresses:
self._build_list_params(params, bcc_addresses,
'Destination.BccAddresses.member')
if reply_addresses:
self._build_list_params(params, reply_addresses,
'ReplyToAddresses.member')
return self._make_request('SendEmail', params)
|
Composes an email message based on input data, and then immediately
queues the message for sending.
:type source: string
:param source: The sender's email address.
:type subject: string
:param subject: The subject of the message: A short summary of the
content, which will appear in the recipient's inbox.
:type body: string
:param body: The message body.
:type to_addresses: list of strings or string
:param to_addresses: The To: field(s) of the message.
:type cc_addresses: list of strings or string
:param cc_addresses: The CC: field(s) of the message.
:type bcc_addresses: list of strings or string
:param bcc_addresses: The BCC: field(s) of the message.
:type format: string
:param format: The format of the message's body, must be either "text"
or "html".
:type reply_addresses: list of strings or string
:param reply_addresses: The reply-to email address(es) for the
message. If the recipient replies to the
message, each reply-to address will
receive the reply.
:type return_path: string
:param return_path: The email address to which bounce notifications are
to be forwarded. If the message cannot be delivered
to the recipient, then an error message will be
returned from the recipient's ISP; this message will
then be forwarded to the email address specified by
the ReturnPath parameter.
:type text_body: string
:param text_body: The text body to send with this email.
:type html_body: string
:param html_body: The html body to send with this email.
|
https://github.com/radlab/sparrow/blob/afb8efadeb88524f1394d1abe4ea66c6fd2ac744/deploy/third_party/boto-2.1.1/boto/ses/connection.py#L113-L207
|
from boto.connection import AWSAuthConnection
from boto.exception import BotoServerError
from boto.regioninfo import RegionInfo
import boto
import boto.jsonresponse
import urllib
import base64
class SESConnection(AWSAuthConnection):
ResponseError = BotoServerError
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'email.us-east-1.amazonaws.com'
APIVersion = '2010-12-01'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/'):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
AWSAuthConnection.__init__(self, self.region.endpoint,
aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass, debug,
https_connection_factory, path)
def _required_auth_capability(self):
return ['ses']
def _build_list_params(self, params, items, label):
if isinstance(items, basestring):
items = [items]
for i in range(1, len(items) + 1):
params['%s.%d' % (label, i)] = items[i - 1]
def _make_request(self, action, params=None):
ct = 'application/x-www-form-urlencoded; charset=UTF-8'
headers = {'Content-Type': ct}
params = params or {}
params['Action'] = action
for k, v in params.items():
if isinstance(v, unicode):
params[k] = v.encode('utf-8')
response = super(SESConnection, self).make_request(
'POST',
'/',
headers=headers,
data=urllib.urlencode(params)
)
body = response.read()
if response.status == 200:
list_markers = ('VerifiedEmailAddresses', 'SendDataPoints')
e = boto.jsonresponse.Element(list_marker=list_markers)
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
|
Apache License 2.0
|
docusign/code-examples-python
|
app/eSignature/examples/eg031_bulk_send/controller.py
|
Eg031Controller.create_bulk_sending_list
|
python
|
def create_bulk_sending_list(cls, args):
bulk_copies = []
for signer in args:
recipient_1 = BulkSendingCopyRecipient(
role_name="signer",
tabs=[],
name=signer["signer_name"],
email=signer["signer_email"]
)
recipient_2 = BulkSendingCopyRecipient(
role_name="cc",
tabs=[],
name=signer["cc_name"],
email=signer["cc_email"]
)
bulk_copy = BulkSendingCopy(
recipients=[recipient_1, recipient_2],
custom_fields=[]
)
bulk_copies.append(bulk_copy)
bulk_sending_list = BulkSendingList(
name="sample",
bulk_copies=bulk_copies
)
return bulk_sending_list
|
1. Create recipient objects with signers
2. Create recipient objects with ccs
3. Create bulk copies objects
4. Create the bulk sending list object
|
https://github.com/docusign/code-examples-python/blob/1e6ca12f6304d01e573a138e103028c23155196a/app/eSignature/examples/eg031_bulk_send/controller.py#L160-L196
|
import base64
from os import path
from docusign_esign import EnvelopesApi, Document, Signer, EnvelopeDefinition, Recipients, BulkEnvelopesApi, TextCustomField, CustomFields, Tabs, SignHere
from docusign_esign.models import BulkSendingCopy, BulkSendingList, BulkSendingCopyRecipient, BulkSendingCopyTab, BulkSendRequest, BulkSendBatchStatus
from flask import request, session
from ....consts import demo_docs_path, pattern
from ....docusign import create_api_client
from ....ds_config import DS_CONFIG
class Eg031Controller:
@staticmethod
def get_args():
signer_email_1 = pattern.sub("", request.form.get("signer_email_1"))
signer_name_1 = pattern.sub("", request.form.get("signer_name_1"))
cc_email_1 = pattern.sub("", request.form.get("cc_email_1"))
cc_name_1 = pattern.sub("", request.form.get("cc_name_1"))
signer_email_2 = pattern.sub("", request.form.get("signer_email_2"))
signer_name_2 = pattern.sub("", request.form.get("signer_name_2"))
cc_email_2 = pattern.sub("", request.form.get("cc_email_2"))
cc_name_2 = pattern.sub("", request.form.get("cc_name_2"))
args = {
"account_id": session["ds_account_id"],
"base_path": session["ds_base_path"],
"access_token": session["ds_access_token"],
"signers": [{
"signer_name": signer_name_1,
"signer_email": signer_email_1,
"cc_email": cc_email_1,
"cc_name": cc_name_1
},
{
"signer_name": signer_name_2,
"signer_email": signer_email_2,
"cc_email": cc_email_2,
"cc_name": cc_name_2
}
]
}
return args
@classmethod
def worker(cls, args):
api_client = create_api_client(base_path=args["base_path"], access_token=args["access_token"])
bulk_envelopes_api = BulkEnvelopesApi(api_client)
bulk_sending_list = cls.create_bulk_sending_list(args["signers"])
bulk_list = bulk_envelopes_api.create_bulk_send_list(
account_id=args["account_id"],
bulk_sending_list=bulk_sending_list
)
bulk_list_id = bulk_list.list_id
envelope_api = EnvelopesApi(api_client)
envelope_definition = cls.make_draft_envelope()
envelope = envelope_api.create_envelope(account_id=args["account_id"], envelope_definition=envelope_definition)
envelope_id = envelope.envelope_id
text_custom_fields = TextCustomField(name="mailingListId", required="false", show="false", value=bulk_list_id)
custom_fields = CustomFields(list_custom_fields=[], text_custom_fields=[text_custom_fields])
envelope_api.create_custom_fields(
account_id=args["account_id"],
envelope_id=envelope_id,
custom_fields=custom_fields
)
recipient_sign_here = SignHere(
anchor_string="/sn1/",
anchor_units="pixels",
anchor_y_offset="10",
anchor_x_offset="20",
tab_label="RecipentTab"
)
cc = Signer(
name="Multi Bulk Recipient::cc",
email="multiBulkRecipients-cc@docusign.com",
role_name="cc",
note="",
routing_order="1",
status="created",
delivery_method="email",
recipient_id="1",
recipient_type="signer"
)
signer = Signer(
name="Multi Bulk Recipient::signer",
email="multiBulkRecipients-signer@docusign.com",
role_name="signer",
note="",
routing_order="1",
status="created",
delivery_method="email",
recipient_id="2",
recipient_type="signer"
)
signer.tabs = Tabs(sign_here_tabs=[recipient_sign_here])
envelope_api.create_recipient(
account_id=args["account_id"],
envelope_id=envelope_id,
recipients=Recipients(signers=[signer, cc])
)
bulk_send_request = BulkSendRequest(envelope_or_template_id=envelope_id)
batch = bulk_envelopes_api.create_bulk_send_request(
account_id=args["account_id"],
bulk_send_list_id=bulk_list_id,
bulk_send_request=bulk_send_request
)
batch_id = batch.batch_id
response = bulk_envelopes_api.get_bulk_send_batch_status(account_id=args["account_id"], bulk_send_batch_id=batch_id)
print(response)
return response
@classmethod
|
MIT License
|
cisco/mindmeld
|
mindmeld/components/_util.py
|
TreeNlp.to_dict
|
python
|
def to_dict(self) -> dict:
self._sync_nodes()
result = defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
for domain in self.get_domain_nodes():
if domain.mask_state:
result[domain.nlp_name] = defaultdict(lambda: defaultdict(dict))
for intent in self.get_intent_nodes(domain.nlp_name):
if intent.mask_state:
result[domain.nlp_name][intent.nlp_name] = defaultdict(dict)
for entity in self.get_entity_nodes(domain.nlp_name,
intent.nlp_name):
if entity.mask_state:
result[domain.nlp_name][intent.nlp_name][entity.nlp_name] = {}
for role in self.get_role_nodes(domain.nlp_name,
intent.nlp_name,
entity.nlp_name):
if role.mask_state:
result[domain.nlp_name][intent.nlp_name][
entity.nlp_name][role.nlp_name] = {}
serialize_results = self._default_to_regular(result)
return serialize_results
|
This function serializes TreeNlp into a dict structure by only adding keys representing
allow MaskState nodes and not adding keys for deny and unset MaskState nodes.
|
https://github.com/cisco/mindmeld/blob/d3a0606b5eaa92733dd12674438d45de4b124c63/mindmeld/components/_util.py#L284-L313
|
import importlib
import logging
import enum
from typing import Union, Optional, List
from collections import defaultdict
from ..exceptions import InvalidMaskError
logger = logging.getLogger(__name__)
def _is_module_available(module_name: str):
return bool(importlib.util.find_spec(module_name) is not None)
def _get_module_or_attr(module_name: str, func_name: str = None):
m = importlib.import_module(module_name)
if not func_name:
return m
if func_name not in dir(m):
raise ImportError(f"Cannot import {func_name} from {module_name}")
return getattr(m, func_name)
class MaskState(enum.Enum):
unset = enum.auto()
allow = enum.auto()
deny = enum.auto()
def __bool__(self):
return self == self.allow
class TreeNode:
def __init__(self, nlp_name: str,
parent: Optional['TreeNode'] = None,
children: Optional[List['TreeNode']] = None,
mask_state: Optional[MaskState] = None):
self.nlp_name = nlp_name
self.mask_state = mask_state
self.parent = parent
self.children = children or []
class TreeNlp:
def __init__(self, nlp, mask_state=MaskState.unset):
self.root = TreeNode('root', mask_state=mask_state)
for domain in nlp.domains:
domain_node = TreeNode(domain, parent=self.root, mask_state=mask_state)
self.root.children.append(domain_node)
for intent in nlp.domains[domain].intents:
intent_node = TreeNode(intent, parent=domain_node, mask_state=mask_state)
domain_node.children.append(intent_node)
entities = nlp.domains[domain].intents[intent].entities
for entity in entities:
entity_node = TreeNode(entity, parent=intent_node, mask_state=mask_state)
intent_node.children.append(entity_node)
for role in entities[entity].role_classifier.roles:
role_node = TreeNode(role, parent=intent_node, mask_state=mask_state)
entity_node.children.append(role_node)
@staticmethod
def _convert_tree_node_to_values(*nlp_components):
result = [None for _ in ['domain', 'intent', 'entity', 'role']]
for idx, component in enumerate(nlp_components):
component_name = component.nlp_name if isinstance(
component, TreeNode) else component
result[idx] = component_name
return result
def get_domain_nodes(self):
return self.root.children or []
def get_intent_nodes(self, domain: Union[str, TreeNode]):
domain, _, _, _ = self._convert_tree_node_to_values(domain)
for domain_node in self.root.children:
if domain_node.nlp_name == domain:
return domain_node.children
return []
def get_entity_nodes(self, domain: Union[str, TreeNode],
intent: Union[str, TreeNode]):
domain, intent, _, _ = self._convert_tree_node_to_values(domain, intent)
for intent_node in self.get_intent_nodes(domain):
if intent_node.nlp_name == intent:
return intent_node.children
return []
def get_role_nodes(self, domain: Union[str, TreeNode],
intent: Union[str, TreeNode],
entity: Union[str, TreeNode]):
domain, intent, entity, _ = self._convert_tree_node_to_values(
domain, intent, entity)
for entity_node in self.get_entity_nodes(domain, intent):
if entity_node.nlp_name == entity:
return entity_node.children
return []
def update(self, mask_state: bool,
domain: Union[str, TreeNode],
intent: Optional[Union[str, TreeNode]] = None,
entity: Optional[Union[str, TreeNode]] = None,
role: Optional[Union[str, TreeNode]] = None):
domain_name, intent_name, entity_name, role_name = self._convert_tree_node_to_values(
domain, intent, entity, role)
nlp_components = [domain_name, intent_name, entity_name, role_name]
for i in range(1, len(nlp_components)):
if any(not component for component in nlp_components[:i]) and nlp_components[i]:
raise InvalidMaskError(
f"Unable to resolve NLP hierarchy since "
f"{str(nlp_components[i])} does not have an valid ancestor")
for domain_node in self.get_domain_nodes():
if domain_node.nlp_name != domain_name:
continue
if not intent_name:
domain_node.mask_state = mask_state
return
for intent_node in self.get_intent_nodes(domain_node.nlp_name):
if intent_name not in ('*', intent_node.nlp_name):
continue
if not entity_name:
intent_node.mask_state = mask_state
if intent_name == '*':
continue
return
for entity_node in self.get_entity_nodes(domain_node.nlp_name,
intent_node.nlp_name):
if entity_name not in ('*', entity_node.nlp_name):
continue
if not role_name:
entity_node.mask_state = mask_state
if entity_name == '*':
continue
return
for role_node in self.get_role_nodes(domain_node.nlp_name,
intent_node.nlp_name,
entity_node.nlp_name):
if role_name not in ('*', role_node.nlp_name):
continue
role_node.mask_state = mask_state
if role_name == '*':
continue
return
def _sync_nodes(self):
for domain in self.get_domain_nodes():
intents = self.get_intent_nodes(domain)
for intent in intents:
if domain.mask_state != MaskState.unset and intent.mask_state == MaskState.unset:
intent.mask_state = domain.mask_state
entities = self.get_entity_nodes(domain, intent)
for entity in entities:
if intent.mask_state != MaskState.unset and entity.mask_state == MaskState.unset:
entity.mask_state = intent.mask_state
roles = self.get_role_nodes(domain, intent, entity)
for role in roles:
if entity.mask_state != MaskState.unset and role.mask_state == MaskState.unset:
role.mask_state = entity.mask_state
if roles and all(role.mask_state == MaskState.deny for role in roles):
entity.mask_state = MaskState.deny
if intents and all(intent.mask_state == MaskState.deny for intent in intents):
domain.mask_state = MaskState.deny
def _default_to_regular(self, d):
if isinstance(d, defaultdict):
d = {k: self._default_to_regular(v) for k, v in d.items()}
return d
|
Apache License 2.0
|
unixsurfer/anycast_healthchecker
|
anycast_healthchecker/utils.py
|
configuration_check
|
python
|
def configuration_check(config):
log_level = config.get('daemon', 'loglevel')
num_level = getattr(logging, log_level.upper(), None)
pidfile = config.get('daemon', 'pidfile')
if not os.path.isdir(os.path.dirname(pidfile)):
raise ValueError("{d} doesn't exit".format(d=os.path.dirname(pidfile)))
if not isinstance(num_level, int):
raise ValueError('Invalid log level: {}'.format(log_level))
for _file in 'log_file', 'stderr_file':
if config.has_option('daemon', _file):
try:
touch(config.get('daemon', _file))
except OSError as exc:
raise ValueError(exc)
for option, getter in DAEMON_OPTIONS_TYPE.items():
try:
getattr(config, getter)('daemon', option)
except configparser.NoOptionError as error:
if option not in DAEMON_OPTIONAL_OPTIONS:
raise ValueError(error)
except configparser.Error as error:
raise ValueError(error)
except ValueError as exc:
msg = ("invalid data for '{opt}' option in daemon section: {err}"
.format(opt=option, err=exc))
raise ValueError(msg)
service_configuration_check(config)
|
Perform a sanity check on configuration.
First it performs a sanity check against settings for daemon
and then against settings for each service check.
Arguments:
config (obj): A configparser object which holds our configuration.
Returns:
None if all checks are successfully passed otherwise raises a
ValueError exception.
|
https://github.com/unixsurfer/anycast_healthchecker/blob/a8bd71b30cabf9452076ca12f0faeae453b02670/anycast_healthchecker/utils.py#L326-L372
|
from collections import Counter
import re
import os
import signal
import sys
import subprocess
import logging
import logging.handlers
import time
import datetime
import configparser
import glob
import shlex
import shutil
import ipaddress
from pythonjsonlogger import jsonlogger
from anycast_healthchecker import DEFAULT_OPTIONS, PROGRAM_NAME, __version__
SERVICE_OPTIONS_TYPE = {
'check_cmd': 'get',
'check_interval': 'getfloat',
'check_timeout': 'getfloat',
'check_rise': 'getint',
'check_fail': 'getint',
'check_disabled': 'getboolean',
'on_disabled': 'get',
'ip_prefix': 'get',
'interface': 'get',
'ip_check_disabled': 'getboolean',
'custom_bird_reconfigure_cmd_timeout': 'getfloat',
'custom_bird_reconfigure_cmd': 'get',
}
SERVICE_OPTIONAL_OPTIONS = {
'custom_bird_reconfigure_cmd_timeout',
'custom_bird_reconfigure_cmd',
}
DAEMON_OPTIONS_TYPE = {
'pidfile': 'get',
'bird_conf': 'get',
'bird6_conf': 'get',
'bird_variable': 'get',
'bird6_variable': 'get',
'log_maxbytes': 'getint',
'log_backups': 'getint',
'log_file': 'get',
'stderr_file': 'get',
'stderr_log_server': 'getboolean',
'log_server': 'get',
'log_server_port': 'getint',
'json_stdout': 'getboolean',
'json_log_server': 'getboolean',
'json_log_file': 'getboolean',
'purge_ip_prefixes': 'getboolean',
'bird_keep_changes': 'getboolean',
'bird6_keep_changes': 'getboolean',
'bird_changes_counter': 'getint',
'bird6_changes_counter': 'getint',
'bird_reconfigure_cmd': 'get',
'bird6_reconfigure_cmd': 'get',
'splay_startup': 'getfloat',
}
DAEMON_OPTIONAL_OPTIONS = [
'stderr_log_server',
'stderr_file',
'log_server',
'log_file',
'splay_startup',
]
def valid_ip_prefix(ip_prefix):
try:
ip_prefix = ipaddress.ip_network(ip_prefix)
except ValueError:
return False
else:
if ip_prefix.version == 4 and ip_prefix.max_prefixlen != 32:
return False
if ip_prefix.version == 6 and ip_prefix.max_prefixlen != 128:
return False
return True
def touch(file_path):
with open(file_path, 'a'):
os.utime(file_path, None)
def get_ip_prefixes_from_config(config, services, ip_version):
ip_prefixes = set()
for service in services:
ip_prefix = ipaddress.ip_network(config.get(service, 'ip_prefix'))
if ip_prefix.version == ip_version:
ip_prefixes.add(ip_prefix.with_prefixlen)
return ip_prefixes
def ip_prefixes_sanity_check(config, bird_configuration):
for ip_version in bird_configuration:
modify_ip_prefixes(config,
bird_configuration[ip_version]['config_file'],
bird_configuration[ip_version]['variable_name'],
bird_configuration[ip_version]['dummy_ip_prefix'],
bird_configuration[ip_version]['reconfigure_cmd'],
bird_configuration[ip_version]['keep_changes'],
bird_configuration[ip_version]['changes_counter'],
ip_version)
def modify_ip_prefixes(
config,
config_file,
variable_name,
dummy_ip_prefix,
reconfigure_cmd,
keep_changes,
changes_counter,
ip_version):
log = logging.getLogger(PROGRAM_NAME)
services = config.sections()
services.remove('daemon')
update_bird_conf = False
try:
ip_prefixes_in_bird = get_ip_prefixes_from_bird(config_file)
except OSError as error:
log.error("failed to open Bird configuration %s, this is a FATAL "
"error, thus exiting main program", error)
sys.exit(1)
_name = get_variable_name_from_bird(config_file)
if _name is None:
log.warning("failed to find variable name in %s, going to add it",
config_file)
update_bird_conf = True
elif _name != variable_name:
log.warning("found incorrect variable name in %s, going to add the "
"correct one %s", _name, variable_name)
update_bird_conf = True
if dummy_ip_prefix not in ip_prefixes_in_bird:
log.warning("dummy IP prefix %s is missing from bird configuration "
"%s, adding it", dummy_ip_prefix, config_file)
ip_prefixes_in_bird.insert(0, dummy_ip_prefix)
update_bird_conf = True
ip_prefixes_with_check = get_ip_prefixes_from_config(
config,
services,
ip_version)
ip_prefixes_with_check.add(dummy_ip_prefix)
ip_prefixes_without_check = set(ip_prefixes_in_bird).difference(
ip_prefixes_with_check)
if ip_prefixes_without_check:
if config.getboolean('daemon', 'purge_ip_prefixes'):
log.warning("removing IP prefix(es) %s from %s because they don't "
"have a service check configured",
','.join(ip_prefixes_without_check),
config_file)
ip_prefixes_in_bird[:] = (ip for ip in ip_prefixes_in_bird
if ip not in ip_prefixes_without_check)
update_bird_conf = True
else:
log.warning("found IP prefixes %s in %s without a service "
"check configured",
','.join(ip_prefixes_without_check),
config_file)
if update_bird_conf:
if keep_changes:
archive_bird_conf(config_file, changes_counter)
tempname = write_temp_bird_conf(
dummy_ip_prefix,
config_file,
variable_name,
ip_prefixes_in_bird
)
try:
os.rename(tempname, config_file)
except OSError as error:
msg = ("CRITICAL: failed to create Bird configuration {e}, "
"this is FATAL error, thus exiting main program"
.format(e=error))
sys.exit("{m}".format(m=msg))
else:
log.info("Bird configuration for IPv%s is updated", ip_version)
reconfigure_bird(reconfigure_cmd)
def load_configuration(config_file, config_dir, service_file):
config_files = [config_file]
config = configparser.ConfigParser()
config.read_dict(DEFAULT_OPTIONS)
if not os.path.isfile(config_file):
raise ValueError("{f} configuration file either isn't readable or "
"doesn't exist".format(f=config_file))
if service_file is not None:
if not os.path.isfile(service_file):
raise ValueError("{f} configuration file for a service check "
"doesn't exist".format(f=service_file))
else:
config_files.append(service_file)
elif config_dir is not None:
if not os.path.isdir(config_dir):
raise ValueError("{d} directory with configuration files for "
"service checks doesn't exist"
.format(d=config_dir))
else:
config_files.extend(glob.glob(os.path.join(config_dir, '*.conf')))
try:
config.read(config_files)
except configparser.Error as exc:
raise ValueError(exc)
configuration_check(config)
bird_configuration = build_bird_configuration(config)
create_bird_config_files(bird_configuration)
return config, bird_configuration
|
Apache License 2.0
|
frank-qlu/recruit
|
招聘爬虫/zlzpView/static/zlzpView/venv/Lib/site-packages/numpy/polynomial/polynomial.py
|
polyint
|
python
|
def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c + 0.0
cdt = c.dtype
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if np.ndim(lbnd) != 0:
raise ValueError("lbnd must be a scalar.")
if np.ndim(scl) != 0:
raise ValueError("scl must be a scalar.")
if iaxis != axis:
raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
k = list(k) + [0]*(cnt - len(k))
c = np.moveaxis(c, iaxis, 0)
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=cdt)
tmp[0] = c[0]*0
tmp[1] = c[0]
for j in range(1, n):
tmp[j + 1] = c[j]/(j + 1)
tmp[0] += k[i] - polyval(lbnd, tmp)
c = tmp
c = np.moveaxis(c, 0, iaxis)
return c
|
Integrate a polynomial.
Returns the polynomial coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients, from low to high degree along each axis, e.g., [1,2,3]
represents the polynomial ``1 + 2*x + 3*x**2`` while [[1,2],[1,2]]
represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
1-D array of polynomial coefficients, ordered from low to high.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at zero
is the first value in the list, the value of the second integral
at zero is the second value, etc. If ``k == []`` (the default),
all constants are set to zero. If ``m == 1``, a single scalar can
be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Coefficient array of the integral.
Raises
------
ValueError
If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or
``np.ndim(scl) != 0``.
See Also
--------
polyder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`. Why
is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
:math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c = (1,2,3)
>>> P.polyint(c) # should return array([0, 1, 1, 1])
array([ 0., 1., 1., 1.])
>>> P.polyint(c,3) # should return array([0, 0, 0, 1/6, 1/12, 1/20])
array([ 0. , 0. , 0. , 0.16666667, 0.08333333,
0.05 ])
>>> P.polyint(c,k=3) # should return array([3, 1, 1, 1])
array([ 3., 1., 1., 1.])
>>> P.polyint(c,lbnd=-2) # should return array([6, 1, 1, 1])
array([ 6., 1., 1., 1.])
>>> P.polyint(c,scl=-2) # should return array([0, -2, -2, -2])
array([ 0., -2., -2., -2.])
|
https://github.com/frank-qlu/recruit/blob/0875fb1d2cfb581aaa8abc7a97880c0ce5bf6147/招聘爬虫/zlzpView/static/zlzpView/venv/Lib/site-packages/numpy/polynomial/polynomial.py#L573-L693
|
from __future__ import division, absolute_import, print_function
__all__ = [
'polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', 'polyadd',
'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', 'polyval',
'polyvalfromroots', 'polyder', 'polyint', 'polyfromroots', 'polyvander',
'polyfit', 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d',
'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d']
import warnings
import numpy as np
import numpy.linalg as la
from numpy.core.multiarray import normalize_axis_index
from . import polyutils as pu
from ._polybase import ABCPolyBase
polytrim = pu.trimcoef
polydomain = np.array([-1, 1])
polyzero = np.array([0])
polyone = np.array([1])
polyx = np.array([0, 1])
def polyline(off, scl):
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def polyfromroots(roots):
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [polyline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [polymul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = polymul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def polyadd(c1, c2):
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def polysub(c1, c2):
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def polymulx(c):
[c] = pu.as_series([c])
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1:] = c
return prd
def polymul(c1, c2):
[c1, c2] = pu.as_series([c1, c2])
ret = np.convolve(c1, c2)
return pu.trimseq(ret)
def polydiv(c1, c2):
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
len1 = len(c1)
len2 = len(c2)
if len2 == 1:
return c1/c2[-1], c1[:1]*0
elif len1 < len2:
return c1[:1]*0, c1
else:
dlen = len1 - len2
scl = c2[-1]
c2 = c2[:-1]/scl
i = dlen
j = len1 - 1
while i >= 0:
c1[i:j] -= c2*c1[j]
i -= 1
j -= 1
return c1[j+1:]/scl, pu.trimseq(c1[:j+1])
def polypow(c, pow, maxpower=None):
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
prd = c
for i in range(2, power + 1):
prd = np.convolve(prd, c)
return prd
def polyder(c, m=1, scl=1, axis=0):
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c + 0.0
cdt = c.dtype
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=cdt)
for j in range(n, 0, -1):
der[j - 1] = j*c[j]
c = der
c = np.moveaxis(c, 0, iaxis)
return c
|
Apache License 2.0
|
jhuggins/viabel
|
viabel/_utils.py
|
_data_file_path
|
python
|
def _data_file_path(filename):
return os.path.abspath(os.path.join(__file__, '../data', filename))
|
Returns the path to an internal file
|
https://github.com/jhuggins/viabel/blob/a8f67b098d1d3ece0c16dd7607b28820d882f358/viabel/_utils.py#L39-L41
|
import os
import pickle
import shutil
import time
from hashlib import md5
import autograd.numpy as np
import pystan
def vectorize_if_needed(f, a, axis=-1):
if a.ndim > 1:
return np.apply_along_axis(f, axis, a)
else:
return f(a)
def ensure_2d(a):
if a.ndim == 0:
return a
while a.ndim < 2:
a = a[:, np.newaxis]
return a
class Timer:
def __init__(self):
pass
def __enter__(self):
self.start = time.perf_counter()
return self
def __exit__(self, *args):
self.end = time.perf_counter()
self.interval = self.end - self.start
|
MIT License
|
continualai/avalanche
|
avalanche/benchmarks/datasets/inaturalist/inaturalist_data.py
|
INATURALIST_DATA.__init__
|
python
|
def __init__(self, data_folder='data/', trainval=True):
self.trainval = trainval
self.log = logging.getLogger("avalanche")
if os.path.isabs(data_folder):
self.data_folder = data_folder
else:
self.data_folder = os.path.join(os.path.dirname(__file__),
data_folder)
try:
os.makedirs(self.data_folder, exist_ok=True)
self.log.info("Directory %s created", self.data_folder)
self.download = True
self.download_inaturalist()
except OSError:
import traceback
traceback.print_exc()
self.download = False
self.log.error("Directory %s already exists", self.data_folder)
|
Args:
data_folder (string): folder in which to download
inaturalist dataset.
|
https://github.com/continualai/avalanche/blob/9d72ee638d10af989455df8d062e8e86a4399c1d/avalanche/benchmarks/datasets/inaturalist/inaturalist_data.py#L117-L144
|
import os
import sys
import logging
import tarfile
if sys.version_info[0] >= 3:
from urllib.request import urlretrieve
else:
from urllib import urlretrieve
base_url = "https://ml-inat-competition-datasets.s3.amazonaws.com/2018"
train_data = [
('train_val2018.tar.gz',
f"{base_url}/train_val2018.tar.gz"),
('train2018.json.tar.gz',
f"{base_url}/train2018.json.tar.gz"),
('val2018.json.tar.gz',
f"{base_url}/val2018.json.tar.gz"),
]
test_data = [
('test2018.tar.gz',
f"{base_url}/test2018.tar.gz"),
('test2018.json.tar.gz',
f"{base_url}/test2018.json.tar.gz"),
]
class INATURALIST_DATA(object):
|
MIT License
|
iydon/info_qq_bot
|
icu/database.py
|
Database.keyword_match
|
python
|
def keyword_match(self, string, fuzzy=0):
string = string.lower()
if fuzzy:
for key, _ in process.extractBests(
string, self.cache['fuzzy'].keys(), limit=fuzzy
):
yield self.cache['fuzzy'][key]
else:
for item in self.cache['keyword']:
if not isinstance(item['keyword'], str) and all(
any(k2 in string for k2 in k1) for k1 in item['keyword']
):
yield {'return': item['return'], 'type': item['type']}
|
match keyword to get returns
- Argument:
- string: str
- fuzzy: int, default 0, fuzzy search limit
- Return:
- Iterator[JSON serializable]
|
https://github.com/iydon/info_qq_bot/blob/f3c48029469798938c17353a0a901e0be49b5bf6/icu/database.py#L54-L76
|
import typing
from fuzzywuzzy import process
from tinydb import TinyDB
class Database:
def __init__(self, path):
db = TinyDB(path, ensure_ascii=False, encoding='utf-8')
self.table = {
'keyword': db.table('keyword'),
}
self.cache = self._cache()
def keyword_add(self, keyword, return_, type='text', update=True):
assert isinstance(keyword, typing.Iterable) and all(
isinstance(k1, typing.Iterable) and all(
isinstance(k2, str) for k2 in k1
) for k1 in keyword
) and isinstance(type, str)
keyword = list(list(map(str.lower, k)) for k in keyword)
self.table['keyword'].insert(
{'keyword': keyword, 'return': return_, 'type': type}
)
if update:
self.cache = self._cache()
def keyword_add_fuzzy(self, keyword, return_, type='text', update=True):
assert isinstance(keyword, str) and isinstance(type, str)
self.table['keyword'].insert(
{'keyword': keyword, 'return': return_, 'type': type}
)
if update:
self.cache = self._cache()
|
MIT License
|
morepath/morepath
|
morepath/app.py
|
App._follow_class_defers
|
python
|
def _follow_class_defers(self, find, model, variables):
seen = set()
app = self
while app is not None:
if app in seen:
raise LinkError("Circular defer. Cannot link to: %r" % model)
result = find(app, model, variables)
if result is not None:
return result, app
seen.add(app)
app = app._deferred_class_link_app(model, variables)
return None, app
|
Resolve to deferring app and find something.
For ``model`` and ``variables``, look up deferring app as defined
by :class:`morepath.App.defer_class_links` recursively. Use the
supplied ``find`` function to find something for ``model`` and
``variables`` in that app. When something found, return what is
found and the app where it was found.
:param find: a function that takes an ``app``, ``model`` and
``variables`` arguments and should return something when it is
found, or ``None`` when not.
:param model: the model class to find things for.
:return: a tuple with the thing found (or ``None``) and the app in
which it was found.
|
https://github.com/morepath/morepath/blob/09972904229f807da75c75d8825af1495057acdc/morepath/app.py#L571-L597
|
import dectate
from dectate import directive
import reg
from webob.exc import HTTPNotFound
from .request import Request
from .reify import reify
from .path import PathInfo
from .error import LinkError
from . import directive as action
def cached_key_lookup(key_lookup):
return reg.DictCachingKeyLookup(key_lookup)
def commit_if_needed(app):
if not app.is_committed():
app.commit()
def dispatch_method(*predicates, **kw):
kw.setdefault("get_key_lookup", cached_key_lookup)
kw.setdefault("first_invocation_hook", commit_if_needed)
return reg.dispatch_method(*predicates, **kw)
dispatch_method.__doc__ = reg.dispatch_method.__doc__
class App(dectate.App):
parent = None
request_class = Request
logger_name = "morepath.directive"
setting = directive(action.SettingAction)
setting_section = directive(action.SettingSectionAction)
predicate_fallback = directive(action.PredicateFallbackAction)
predicate = directive(action.PredicateAction)
method = directive(action.MethodAction)
converter = directive(action.ConverterAction)
_path = directive(action.PathAction)
path = directive(action.PathCompositeAction)
permission_rule = directive(action.PermissionRuleAction)
template_directory = directive(action.TemplateDirectoryAction)
template_loader = directive(action.TemplateLoaderAction)
template_render = directive(action.TemplateRenderAction)
view = directive(action.ViewAction)
json = directive(action.JsonAction)
html = directive(action.HtmlAction)
mount = directive(action.MountAction)
defer_links = directive(action.DeferLinksAction)
defer_class_links = directive(action.DeferClassLinksAction)
tween_factory = directive(action.TweenFactoryAction)
identity_policy = directive(action.IdentityPolicyAction)
verify_identity = directive(action.VerifyIdentityAction)
dump_json = directive(action.DumpJsonAction)
link_prefix = directive(action.LinkPrefixAction)
def __init__(self):
pass
def request(self, environ):
return self.request_class(environ, self)
def __call__(self, environ, start_response):
request = self.request(environ)
response = self.publish(request)
return response(environ, start_response)
@reify
def publish(self):
if not self.is_committed():
self.commit()
return self.config.tween_registry.wrap(self)
def ancestors(self):
app = self
while app is not None:
yield app
app = app.parent
@reify
def root(self):
return list(self.ancestors())[-1]
def child(self, app, **variables):
if isinstance(app, App):
result = app
if app.__class__ not in self.config.path_registry.mounted:
return None
else:
if isinstance(app, str):
factory = self.config.path_registry.named_mounted.get(app)
else:
factory = self.config.path_registry.mounted.get(app)
if factory is None:
return None
result = factory(**variables)
result.parent = self
return result
def sibling(self, app, **variables):
parent = self.parent
if parent is None:
return None
return parent.child(app, **variables)
@property
def settings(self):
return self.config.setting_registry
@classmethod
def mounted_app_classes(cls, callback=None):
discovery = set()
found = {cls}
while found:
discovery.update(found)
if callback is not None:
callback(*found)
found = {
c for a in found for c in a.config.path_registry.mounted
} - discovery
return discovery
@classmethod
def commit(cls):
return cls.mounted_app_classes(dectate.commit)
@classmethod
def init_settings(cls, settings):
def set_setting_section(section, section_settings):
cls.setting_section(section)(lambda: section_settings)
for section, section_settings in settings.items():
set_setting_section(section, section_settings)
@dispatch_method()
def get_view(self, obj, request):
return HTTPNotFound()
@dispatch_method("identity")
def _verify_identity(self, identity):
return False
@dispatch_method("identity", "obj", reg.match_class("permission"))
def _permits(self, identity, obj, permission):
return False
@dispatch_method("obj")
def _dump_json(self, obj, request):
return obj
def _link_prefix(self, request):
return request.application_url
@dispatch_method(reg.match_class("model"))
def _class_path(self, model, variables):
return None
@dispatch_method("obj")
def _path_variables(self, obj):
return self._default_path_variables(obj)
@dispatch_method("obj")
def _default_path_variables(self, obj):
return None
@dispatch_method("obj")
def _deferred_link_app(self, obj):
return None
@dispatch_method(reg.match_class("model"))
def _deferred_class_link_app(self, model, variables):
return None
@classmethod
def clean(cls):
reg.clean_dispatch_methods(cls)
def _identify(self, request):
return None
def remember_identity(self, response, request, identity):
pass
def forget_identity(self, response, request):
pass
def _get_path(self, obj):
return self._class_path(obj.__class__, self._path_variables(obj))
def _get_mounted_path(self, obj):
paths = []
parameters = {}
app = self
while app is not None:
info = app._get_path(obj)
if info is None:
return None
paths.append(info.path)
parameters.update(info.parameters)
obj = app
app = app.parent
paths.reverse()
return PathInfo("/".join(paths).strip("/"), parameters)
def _get_mounted_class_path(self, model, variables):
info = self._class_path(model, variables)
if info is None:
return None
if self.parent is None:
return info
mount_info = self.parent._get_mounted_path(self)
path = mount_info.path
if info.path:
path += "/" + info.path
parameters = info.parameters.copy()
parameters.update(mount_info.parameters)
return PathInfo(path, parameters)
def _get_deferred_mounted_path(self, obj):
def find(app, obj):
return app._get_mounted_path(obj)
return self._follow_defers(find, obj)
def _get_deferred_mounted_class_path(self, model, variables):
def find(app, model, variables):
return app._get_mounted_class_path(model, variables)
info, app = self._follow_class_defers(find, model, variables)
return info
def _follow_defers(self, find, obj):
seen = set()
app = self
while app is not None:
if app in seen:
raise LinkError("Circular defer. Cannot link to: %r" % obj)
result = find(app, obj)
if result is not None:
return result, app
seen.add(app)
next_app = app._deferred_link_app(obj)
if next_app is None:
variables = app._path_variables(obj)
if variables is not None:
next_app = app._deferred_class_link_app(
obj.__class__, variables
)
app = next_app
return None, app
|
BSD 3-Clause New or Revised License
|
reliaqualassociates/ramstk
|
src/ramstk/views/gtk3/preferences/panel.py
|
GeneralPreferencesPanel._do_load_panel
|
python
|
def _do_load_panel(self, configuration: RAMSTKUserConfiguration) -> None:
_positions = {"bottom": 1, "left": 2, "right": 3, "top": 4}
_papersize = {"a4": 1, "letter": 2}
self._configuration = configuration
self.cmbModuleBookTabPosition.do_update(
_positions[self._configuration.RAMSTK_TABPOS["modulebook"].lower()],
signal="changed",
)
self.cmbWorkBookTabPosition.do_update(
_positions[self._configuration.RAMSTK_TABPOS["workbook"].lower()],
signal="changed",
)
self.cmbListBookTabPosition.do_update(
_positions[self._configuration.RAMSTK_TABPOS["listbook"].lower()],
signal="changed",
)
self.cmbReportSize.do_update(
_papersize[self._configuration.RAMSTK_REPORT_SIZE.lower()], signal="changed"
)
self.txtFRMultiplier.do_update(
str(self._configuration.RAMSTK_HR_MULTIPLIER), signal="changed"
)
self.txtDecimalPlaces.do_update(
str(self._configuration.RAMSTK_DEC_PLACES), signal="changed"
)
self.txtMissionTime.do_update(
str(self._configuration.RAMSTK_MTIME), signal="changed"
)
self.btnConfDir.set_current_folder(self._configuration.RAMSTK_CONF_DIR)
self.btnDataDir.set_current_folder(self._configuration.RAMSTK_DATA_DIR)
self.btnIconDir.set_current_folder(self._configuration.RAMSTK_ICON_DIR)
self.btnLogDir.set_current_folder(self._configuration.RAMSTK_LOG_DIR)
|
Load the current preference values.
:return: None
:rtype: None
|
https://github.com/reliaqualassociates/ramstk/blob/ffec5a107424914cf0026c6dfe26369c221f79f9/src/ramstk/views/gtk3/preferences/panel.py#L286-L326
|
from typing import Any, Dict, List
import toml
from pubsub import pub
from ramstk.configuration import RAMSTKUserConfiguration
from ramstk.utilities import string_to_boolean
from ramstk.views.gtk3 import Gdk, Gtk, _
from ramstk.views.gtk3.widgets import (
RAMSTKComboBox,
RAMSTKEntry,
RAMSTKFileChooserButton,
RAMSTKFixedPanel,
RAMSTKLabel,
RAMSTKTreePanel,
RAMSTKTreeView,
)
class GeneralPreferencesPanel(RAMSTKFixedPanel):
_select_msg = "succeed_get_preferences_attributes"
_tag = "preferences"
_title = _("General Preferences")
def __init__(self) -> None:
super().__init__()
self.btnConfDir: RAMSTKFileChooserButton = RAMSTKFileChooserButton(
_("RAMSTK Configuration File Directory")
)
self.btnDataDir: RAMSTKFileChooserButton = RAMSTKFileChooserButton(
_("RAMSTK Data Directory")
)
self.btnIconDir: RAMSTKFileChooserButton = RAMSTKFileChooserButton(
_("RAMSTK Icon Directory")
)
self.btnLogDir: RAMSTKFileChooserButton = RAMSTKFileChooserButton(
_("RAMSTK Log Directory")
)
self.cmbModuleBookTabPosition: RAMSTKComboBox = RAMSTKComboBox(simple=True)
self.cmbWorkBookTabPosition: RAMSTKComboBox = RAMSTKComboBox(simple=True)
self.cmbListBookTabPosition: RAMSTKComboBox = RAMSTKComboBox(simple=True)
self.cmbReportSize: RAMSTKComboBox = RAMSTKComboBox(simple=True)
self.txtFRMultiplier: RAMSTKEntry = RAMSTKEntry()
self.txtDecimalPlaces: RAMSTKEntry = RAMSTKEntry()
self.txtMissionTime: RAMSTKEntry = RAMSTKEntry()
self._configuration: RAMSTKUserConfiguration = RAMSTKUserConfiguration()
self.dic_attribute_widget_map: Dict[str, List[Any]] = {
"module_book_tab_pos": [
0,
self.cmbModuleBookTabPosition,
"changed",
self._on_changed_combo,
"",
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Module Book Tab Position:"),
"gchararray",
],
"work_book_tab_pos": [
1,
self.cmbWorkBookTabPosition,
"changed",
self._on_changed_combo,
"",
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Work Book Tab Position:"),
"gchararray",
],
"list_book_tab_pos": [
2,
self.cmbListBookTabPosition,
"changed",
self._on_changed_combo,
"",
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("List Book Tab Position:"),
"gchararray",
],
"report_size": [
3,
self.cmbReportSize,
"changed",
super().on_changed_combo,
"",
"Letter",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Report Paper Size:"),
"gchararray",
],
"fr_multiplier": [
4,
self.txtFRMultiplier,
"changed",
super().on_changed_entry,
"",
6,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
"width": 75,
},
_("Failure Rate Multiplier:"),
"gfloat",
],
"decimals": [
5,
self.txtDecimalPlaces,
"changed",
super().on_changed_entry,
"",
3,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
"width": 75,
},
_("Decimal Places:"),
"gint",
],
"mission_time": [
6,
self.txtMissionTime,
"changed",
super().on_changed_entry,
"",
1.0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Reliability Mission Time:"),
"gfloat",
],
"config_file_path": [
7,
self.btnConfDir,
"file-set",
self._do_select_path,
"",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"height": 30,
"fg_color": "#000000",
"select-action": Gtk.FileChooserAction.SELECT_FOLDER,
"visible": True,
},
_("Path to RAMSTK Configuration Files:"),
"gchararray",
],
"data_file_path": [
8,
self.btnDataDir,
"file-set",
self._do_select_path,
"",
1,
{
"bg_color": "#FFFFFF",
"editable": True,
"height": 30,
"fg_color": "#000000",
"select-action": Gtk.FileChooserAction.SELECT_FOLDER,
"visible": True,
},
_("Path to RAMSTK Data Files:"),
"gchararray",
],
"icon_file_path": [
9,
self.btnIconDir,
"file-set",
self._do_select_path,
"",
1,
{
"bg_color": "#FFFFFF",
"editable": True,
"height": 30,
"fg_color": "#000000",
"select-action": Gtk.FileChooserAction.SELECT_FOLDER,
"visible": True,
},
_("Path to RAMSTK Icon Files:"),
"gchararray",
],
"log_file_path": [
10,
self.btnLogDir,
"file-set",
self._do_select_path,
"",
2,
{
"bg_color": "#FFFFFF",
"editable": True,
"height": 30,
"fg_color": "#000000",
"select-action": Gtk.FileChooserAction.SELECT_FOLDER,
"visible": True,
},
_("Path to RAMSTK Log Files:"),
"gchararray",
],
}
super().do_set_properties()
super().do_make_panel()
self._do_load_comboboxes()
super().do_set_callbacks()
pub.subscribe(self._do_load_panel, "request_load_preferences")
|
BSD 3-Clause New or Revised License
|
has2k1/mizani
|
mizani/scale.py
|
scale_discrete.map
|
python
|
def map(cls, x, palette, limits, na_value=None):
n = len(limits)
pal = palette(n)[match(x, limits)]
try:
pal[pd.isnull(x)] = na_value
except TypeError:
pal = [v if not pd.isnull(v) else na_value for v in pal]
return pal
|
Map values to a discrete palette
Parameters
----------
palette : callable ``f(x)``
palette to use
x : array_like
Continuous values to scale
na_value : object
Value to use for missing values.
Returns
-------
out : array_like
Values mapped onto a palette
|
https://github.com/has2k1/mizani/blob/167a5f37c69c7ed8e094cbc487bd43267c15b42c/mizani/scale.py#L236-L261
|
import numpy as np
import pandas as pd
import pandas.api.types as pdtypes
from .bounds import censor, rescale
from .utils import CONTINUOUS_KINDS, DISCRETE_KINDS, min_max, match
from .utils import get_categories
__all__ = ['scale_continuous', 'scale_discrete']
class scale_continuous:
@classmethod
def apply(cls, x, palette, na_value=None, trans=None):
if trans is not None:
x = trans.transform(x)
limits = cls.train(x)
return cls.map(x, palette, limits, na_value)
@classmethod
def train(cls, new_data, old=None):
if not len(new_data):
return old
if not hasattr(new_data, 'dtype'):
new_data = np.asarray(new_data)
if new_data.dtype.kind not in CONTINUOUS_KINDS:
raise TypeError(
"Discrete value supplied to continuous scale")
if old is not None:
new_data = np.hstack([new_data, old])
return min_max(new_data, na_rm=True, finite=True)
@classmethod
def map(cls, x, palette, limits, na_value=None, oob=censor):
x = oob(rescale(x, _from=limits))
pal = palette(x)
try:
pal[pd.isnull(x)] = na_value
except TypeError:
pal = [v if not pd.isnull(v) else na_value for v in pal]
return pal
class scale_discrete:
@classmethod
def apply(cls, x, palette, na_value=None):
limits = cls.train(x)
return cls.map(x, palette, limits, na_value)
@classmethod
def train(cls, new_data, old=None, drop=False, na_rm=False):
if not len(new_data):
return old
if old is None:
old = []
else:
old = list(old)
nan_bool_idx = pd.isnull(new_data)
has_na = np.any(nan_bool_idx)
if not hasattr(new_data, 'dtype'):
new_data = np.asarray(new_data)
new_data = new_data[~nan_bool_idx]
if new_data.dtype.kind not in DISCRETE_KINDS:
raise TypeError(
"Continuous value supplied to discrete scale")
if pdtypes.is_categorical_dtype(new_data):
categories = get_categories(new_data)
if drop:
present = set(new_data.drop_duplicates())
new = [i for i in categories if i in present]
else:
new = list(categories)
else:
new = np.unique(new_data)
new.sort()
old_set = set(old)
if pdtypes.is_categorical_dtype(new_data):
all_set = old_set | set(new)
ordered_cats = categories.union(old, sort=False)
limits = [c for c in ordered_cats if c in all_set]
else:
limits = old + [i for i in new if (i not in old_set)]
has_na_limits = any(pd.isnull(limits))
if not has_na_limits and not na_rm and has_na:
limits.append(np.nan)
return limits
@classmethod
|
BSD 3-Clause New or Revised License
|
openatx/facebook-wda
|
wda/__init__.py
|
BaseClient.implicitly_wait
|
python
|
def implicitly_wait(self, seconds):
assert isinstance(seconds, (int, float))
self.__timeout = seconds
|
set default element search timeout
|
https://github.com/openatx/facebook-wda/blob/9488109e5615c74fa41a0627ec16a11a0728218c/wda/__init__.py#L728-L733
|
from __future__ import print_function, unicode_literals
import base64
import contextlib
import copy
import enum
import functools
import io
import json
import logging
import os
import re
import shutil
import subprocess
import threading
import time
from collections import defaultdict, namedtuple
from typing import Callable, Optional, Union
from urllib.parse import urlparse
import requests
import retry
import six
from deprecated import deprecated
from . import requests_usbmux, xcui_element_types
from ._proto import *
from .exceptions import *
from .usbmux import Usbmux
from .utils import inject_call, limit_call_depth
try:
from functools import cached_property
except ImportError:
from cached_property import cached_property
try:
import sys
import logzero
if not (hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()):
log_format = '[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d] %(message)s'
logzero.setup_default_logger(formatter=logzero.LogFormatter(
fmt=log_format))
logger = logzero.logger
except ImportError:
logger = logging.getLogger("facebook-wda")
DEBUG = False
HTTP_TIMEOUT = 180.0
DEVICE_WAIT_TIMEOUT = 180.0
LANDSCAPE = 'LANDSCAPE'
PORTRAIT = 'PORTRAIT'
LANDSCAPE_RIGHT = 'UIA_DEVICE_ORIENTATION_LANDSCAPERIGHT'
PORTRAIT_UPSIDEDOWN = 'UIA_DEVICE_ORIENTATION_PORTRAIT_UPSIDEDOWN'
class Status(enum.IntEnum):
UNKNOWN = 100
ERROR = 110
class Callback(str, enum.Enum):
ERROR = "::error"
HTTP_REQUEST_BEFORE = "::http-request-before"
HTTP_REQUEST_AFTER = "::http-request-after"
RET_RETRY = "::retry"
RET_ABORT = "::abort"
RET_CONTINUE = "::continue"
class AttrDict(dict):
def __getattr__(self, key):
if isinstance(key, str) and key in self:
return self[key]
raise AttributeError("Attribute key not found", key)
def convert(dictionary):
return AttrDict(dictionary)
def urljoin(*urls):
return '/'.join([u.strip("/") for u in urls])
def roundint(i):
return int(round(i, 0))
def namedlock(name):
if not hasattr(namedlock, 'locks'):
namedlock.locks = defaultdict(threading.Lock)
return namedlock.locks[name]
def httpdo(url, method="GET", data=None, timeout=None) -> AttrDict:
p = urlparse(url)
with namedlock(p.scheme + "://" + p.netloc):
return _unsafe_httpdo(url, method, data, timeout)
@functools.lru_cache(1024)
def _requests_session_pool_get(scheme, netloc):
return requests_usbmux.Session()
def _is_tmq_platform() -> bool:
return os.getenv("TMQ") == "true"
def _unsafe_httpdo(url, method='GET', data=None, timeout=None):
start = time.time()
if DEBUG:
body = json.dumps(data) if data else ''
print("Shell$ curl -X {method} -d '{body}' '{url}'".format(
method=method.upper(), body=body or '', url=url))
if timeout is None:
timeout = HTTP_TIMEOUT
try:
u = urlparse(url)
request_session = _requests_session_pool_get(u.scheme, u.netloc)
response = request_session.request(method,
url,
json=data,
timeout=timeout)
except (requests.ConnectionError, requests.ReadTimeout) as e:
raise
if response.status_code == 502:
raise WDABadGateway(response.status_code, response.text)
if DEBUG:
ms = (time.time() - start) * 1000
response_text = response.text
if url.endswith("/screenshot"):
response_text = response_text[:100] + "..."
print('Return ({:.0f}ms): {}'.format(ms, response_text))
try:
retjson = response.json()
retjson['status'] = retjson.get('status', 0)
r = convert(retjson)
if isinstance(r.value, dict) and r.value.get("error"):
status = Status.ERROR
value = r.value.copy()
value.pop("traceback", None)
for errCls in (WDAInvalidSessionIdError, WDAPossiblyCrashedError, WDAKeyboardNotPresentError, WDAUnknownError, WDAStaleElementReferenceError):
if errCls.check(value):
raise errCls(status, value)
raise WDARequestError(status, value)
return r
except JSONDecodeError:
if response.text == "":
raise WDAEmptyResponseError(method, url, data)
raise WDAError(method, url, response.text[:100] + "...")
except requests.ConnectionError as e:
raise WDAError("Failed to establish connection to to WDA")
class Rect(list):
def __init__(self, x, y, width, height):
super().__init__([x, y, width, height])
self.__dict__.update({
"x": x,
"y": y,
"width": width,
"height": height
})
def __str__(self):
return 'Rect(x={x}, y={y}, width={w}, height={h})'.format(
x=self.x, y=self.y, w=self.width, h=self.height)
def __repr__(self):
return str(self)
@property
def center(self):
return namedtuple('Point', ['x', 'y'])(self.x + self.width // 2,
self.y + self.height // 2)
@property
def origin(self):
return namedtuple('Point', ['x', 'y'])(self.x, self.y)
@property
def left(self):
return self.x
@property
def top(self):
return self.y
@property
def right(self):
return self.x + self.width
@property
def bottom(self):
return self.y + self.height
def _start_wda_xctest(udid: str, wda_bundle_id=None) -> bool:
xctool_path = shutil.which("tins2") or shutil.which("tidevice")
if not xctool_path:
return False
logger.info("WDA is not running, exec: {} xctest".format(xctool_path))
args = []
if udid:
args.extend(['-u', udid])
args.append('xctest')
if wda_bundle_id:
args.extend(['-B', wda_bundle_id])
p = subprocess.Popen([xctool_path] + args)
time.sleep(3)
if p.poll() is not None:
logger.warning("xctest launch failed")
return False
return True
class BaseClient(object):
def __init__(self, url=None, _session_id=None):
if not url:
url = os.environ.get('DEVICE_URL', 'http://localhost:8100')
assert re.match(r"^(http\+usbmux|https?)://", url), "Invalid URL: %r" % url
self.__wda_url = url
self.__session_id = _session_id
self.__is_app = bool(_session_id)
self.__timeout = 30.0
self.__callbacks = defaultdict(list)
self.__callback_depth = 0
self.__callback_running = False
if not _session_id:
self._init_callback()
def _callback_fix_invalid_session_id(self, err: WDAError):
if isinstance(err, WDAInvalidSessionIdError):
self.session_id = None
return Callback.RET_RETRY
if isinstance(err, WDAPossiblyCrashedError):
self.session_id = self.session().session_id
return Callback.RET_RETRY
""" 等待设备恢复上线 """
def _callback_wait_ready(self, err):
if isinstance(err, (ConnectionError, requests.ConnectionError,
requests.ReadTimeout, WDABadGateway)):
if not self.wait_ready(DEVICE_WAIT_TIMEOUT):
return Callback.RET_ABORT
return Callback.RET_RETRY
def _callback_tmq_before_send_keys(self, urlpath: str):
if urlpath.endswith("/wda/keys"):
if self.alert.exists:
self.alert.accept()
print("send_keys callback called")
def _callback_tmq_print_error(self, method, url, data, err):
if 'no such alert' in str(err):
return
logger.warning(
"HTTP Error happens, this message is printed for better debugging")
body = json.dumps(data) if data else ''
logger.warning("Shell$ curl -X {method} -d '{body}' '{url}'".format(
method=method.upper(), body=body or '', url=url))
logger.warning("Error: %s", err)
def _init_callback(self):
self.register_callback(Callback.ERROR,
self._callback_fix_invalid_session_id)
if _is_tmq_platform():
logger.info("register callbacks for tmq")
self.register_callback(Callback.ERROR, self._callback_wait_ready)
self.register_callback(Callback.HTTP_REQUEST_BEFORE,
self._callback_tmq_before_send_keys)
self.register_callback(Callback.ERROR,
self._callback_tmq_print_error)
def _callback_json_report(self, method, urlpath):
pass
def _set_output_report(self, filename: str):
self.register_callback(
Callback.HTTP_REQUEST_BEFORE, self._callback_json_report)
def is_ready(self) -> bool:
try:
self.http.get("status", timeout=3)
return True
except Exception as e:
return False
def wait_ready(self, timeout=120, noprint=False) -> bool:
deadline = time.time() + timeout
def _dprint(message: str):
if noprint:
return
print("facebook-wda", time.ctime(), message)
_dprint("Wait ready (timeout={:.1f})".format(timeout))
while time.time() < deadline:
if self.is_ready():
_dprint("device back online")
return True
else:
_dprint("{!r} wait_ready left {:.1f} seconds".format(self.__wda_url, deadline - time.time()))
time.sleep(1.0)
_dprint("device still offline")
return False
@retry.retry(exceptions=WDAEmptyResponseError, tries=3, delay=2)
def status(self):
res = self.http.get('status')
res["value"]['sessionId'] = res.get("sessionId")
return res.value
def register_callback(self, event_name: str, func: Callable, try_first: bool = False):
if try_first:
self.__callbacks[event_name].insert(0, func)
else:
self.__callbacks[event_name].append(func)
def unregister_callback(self,
event_name: Optional[str] = None,
func: Optional[Callable] = None):
if event_name is None:
self.__callbacks.clear()
elif func is None:
self.__callbacks[event_name].clear()
else:
self.__callbacks[event_name].remove(func)
def _run_callback(self, event_name, callbacks,
**kwargs) -> Union[None, Callback]:
if not callbacks:
return
self.__callback_running = True
try:
for fn in callbacks[event_name]:
ret = inject_call(fn, **kwargs)
if ret in [
Callback.RET_RETRY, Callback.RET_ABORT,
Callback.RET_CONTINUE
]:
return ret
finally:
self.__callback_running = False
@property
def callbacks(self):
return self.__callbacks
@limit_call_depth(4)
def _fetch(self,
method: str,
urlpath: str,
data: Optional[dict] = None,
with_session: bool = False,
timeout: Optional[float] = None) -> AttrDict:
urlpath = "/" + urlpath.lstrip("/")
callbacks = self.__callbacks
if self.__callback_running:
callbacks = None
url = urljoin(self.__wda_url, urlpath)
run_callback = functools.partial(self._run_callback,
callbacks=callbacks,
method=method,
url=url,
urlpath=urlpath,
with_session=with_session,
data=data,
client=self)
try:
if with_session:
url = urljoin(self.__wda_url, "session", self.session_id,
urlpath)
run_callback(Callback.HTTP_REQUEST_BEFORE)
response = httpdo(url, method, data, timeout)
run_callback(Callback.HTTP_REQUEST_AFTER, response=response)
return response
except Exception as err:
ret = run_callback(Callback.ERROR, err=err)
if ret == Callback.RET_RETRY:
return self._fetch(method, urlpath, data, with_session)
elif ret == Callback.RET_CONTINUE:
return
else:
raise
@property
def http(self):
return namedtuple("HTTPRequest", ['fetch', 'get', 'post'])(
self._fetch,
functools.partial(self._fetch, "GET"),
functools.partial(self._fetch, "POST"))
@property
def _session_http(self):
return namedtuple("HTTPSessionRequest", ['fetch', 'get', 'post', 'delete'])(
functools.partial(self._fetch, with_session=True),
functools.partial(self._fetch, "GET", with_session=True),
functools.partial(self._fetch, "POST", with_session=True),
functools.partial(self._fetch, "DELETE", with_session=True))
def home(self):
try:
self.http.post('/wda/homescreen')
except WDARequestError as e:
if "Timeout waiting until SpringBoard is visible" in str(e):
return
raise
def healthcheck(self):
return self.http.get('/wda/healthcheck')
def locked(self) -> bool:
return self.http.get("/wda/locked").value
def lock(self):
return self.http.post('/wda/lock')
def unlock(self):
return self.http.post('/wda/unlock')
def sleep(self, secs: float):
time.sleep(secs)
@retry.retry(WDAUnknownError, tries=3, delay=.5, jitter=.2)
def app_current(self) -> dict:
return self.http.get("/wda/activeAppInfo").value
def source(self, format='xml', accessible=False):
if accessible:
return self.http.get('/wda/accessibleSource').value
return self.http.get('source?format=' + format).value
def screenshot(self, png_filename=None, format='pillow'):
value = self.http.get('screenshot').value
raw_value = base64.b64decode(value)
png_header = b"\x89PNG\r\n\x1a\n"
if not raw_value.startswith(png_header) and png_filename:
raise WDARequestError(-1, "screenshot png format error")
if png_filename:
with open(png_filename, 'wb') as f:
f.write(raw_value)
if format == 'raw':
return raw_value
elif format == 'pillow':
from PIL import Image
buff = io.BytesIO(raw_value)
im = Image.open(buff)
return im.convert("RGB")
else:
raise ValueError("unknown format")
def session(self,
bundle_id=None,
arguments: Optional[list] = None,
environment: Optional[dict] = None,
alert_action: Optional[AlertAction] = None):
capabilities = {}
if bundle_id:
always_match = {
"bundleId": bundle_id,
"arguments": arguments or [],
"environment": environment or {},
"shouldWaitForQuiescence": False,
}
if alert_action:
assert alert_action in ["accept", "dismiss"]
capabilities["defaultAlertAction"] = alert_action
capabilities['alwaysMatch'] = always_match
payload = {
"capabilities": capabilities,
"desiredCapabilities": capabilities.get('alwaysMatch',
{}),
}
if self.locked():
self.unlock()
try:
res = self.http.post('session', payload)
except WDAEmptyResponseError:
res = self.session().app_state(bundle_id)
if res.value != 4:
raise
client = Client(self.__wda_url, _session_id=res.sessionId)
client.__timeout = self.__timeout
client.__callbacks = self.__callbacks
return client
def close(self):
try:
return self._session_http.delete('/')
except WDARequestError as e:
if not isinstance(e, (WDAInvalidSessionIdError, WDAPossiblyCrashedError)):
raise
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@property
@deprecated(version="1.0.0", reason="Use session_id instread id")
def id(self):
return self._get_session_id()
@property
def session_id(self) -> str:
if self.__session_id:
return self.__session_id
current_sid = self.status()['sessionId']
if current_sid:
self.__session_id = current_sid
return current_sid
return self.session().session_id
@session_id.setter
def session_id(self, value):
self.__session_id = value
def _get_session_id(self) -> str:
return self.session_id
@cached_property
def scale(self) -> int:
try:
return self._session_http.get("/wda/screen").value['scale']
except (KeyError, WDARequestError):
v = max(self.screenshot().size) / max(self.window_size())
return round(v)
@cached_property
def bundle_id(self):
v = self._session_http.get("/").value
return v['capabilities'].get('CFBundleIdentifier')
|
MIT License
|
pappasam/latexbuild
|
latexbuild/__init__.py
|
build_html
|
python
|
def build_html(path_jinja2, template_name, path_outfile, template_kwargs=None):
latex_template_object = LatexBuild(
path_jinja2,
template_name,
template_kwargs,
)
return latex_template_object.build_html(path_outfile)
|
Helper function for building an html from a latex jinja2 template
:param path_jinja2: the root directory for latex jinja2 templates
:param template_name: the relative path, to path_jinja2, to the desired
jinja2 Latex template
:param path_outfile: the full path to the desired final output file
Must contain the same file extension as files generated by
cmd_wo_infile, otherwise the process will fail
:param template_kwargs: a dictionary of key/values for jinja2 variables
|
https://github.com/pappasam/latexbuild/blob/596a2a0a4c42eaa5eb9503d64f9073ad5d0640d5/latexbuild/__init__.py#L40-L56
|
from .build import LatexBuild
from .jinja2_extension import render_latex_template
def build_pdf(path_jinja2, template_name, path_outfile, template_kwargs=None):
latex_template_object = LatexBuild(
path_jinja2,
template_name,
template_kwargs,
)
return latex_template_object.build_pdf(path_outfile)
|
MIT License
|
kylebebak/requester
|
deps/oauthlib/oauth2/rfc6749/clients/base.py
|
Client.prepare_authorization_request
|
python
|
def prepare_authorization_request(self, authorization_url, state=None,
redirect_url=None, scope=None, **kwargs):
if not is_secure_transport(authorization_url):
raise InsecureTransportError()
self.state = state or self.state_generator()
self.redirect_url = redirect_url or self.redirect_url
self.scope = scope or self.scope
auth_url = self.prepare_request_uri(
authorization_url, redirect_uri=self.redirect_url,
scope=self.scope, state=self.state, **kwargs)
return auth_url, FORM_ENC_HEADERS, ''
|
Prepare the authorization request.
This is the first step in many OAuth flows in which the user is
redirected to a certain authorization URL. This method adds
required parameters to the authorization URL.
:param authorization_url: Provider authorization endpoint URL.
:param state: CSRF protection string. Will be automatically created if
not provided. The generated state is available via the ``state``
attribute. Clients should verify that the state is unchanged and
present in the authorization response. This verification is done
automatically if using the ``authorization_response`` parameter
with ``prepare_token_request``.
:param redirect_url: Redirect URL to which the user will be returned
after authorization. Must be provided unless previously setup with
the provider. If provided then it must also be provided in the
token request.
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
|
https://github.com/kylebebak/requester/blob/92fe4bfb21eb446b519f08188e5a390c06a0e2b2/deps/oauthlib/oauth2/rfc6749/clients/base.py#L201-L236
|
from __future__ import absolute_import, unicode_literals
import time
import warnings
from oauthlib.common import generate_token
from oauthlib.oauth2.rfc6749 import tokens
from oauthlib.oauth2.rfc6749.errors import (InsecureTransportError,
TokenExpiredError)
from oauthlib.oauth2.rfc6749.parameters import (parse_token_response,
prepare_token_request,
prepare_token_revocation_request)
from oauthlib.oauth2.rfc6749.utils import is_secure_transport
AUTH_HEADER = 'auth_header'
URI_QUERY = 'query'
BODY = 'body'
FORM_ENC_HEADERS = {
'Content-Type': 'application/x-www-form-urlencoded'
}
class Client(object):
def __init__(self, client_id,
default_token_placement=AUTH_HEADER,
token_type='Bearer',
access_token=None,
refresh_token=None,
mac_key=None,
mac_algorithm=None,
token=None,
scope=None,
state=None,
redirect_url=None,
state_generator=generate_token,
**kwargs):
self.client_id = client_id
self.default_token_placement = default_token_placement
self.token_type = token_type
self.access_token = access_token
self.refresh_token = refresh_token
self.mac_key = mac_key
self.mac_algorithm = mac_algorithm
self.token = token or {}
self.scope = scope
self.state_generator = state_generator
self.state = state
self.redirect_url = redirect_url
self.code = None
self.expires_in = None
self._expires_at = None
self.populate_token_attributes(self.token)
@property
def token_types(self):
return {
'Bearer': self._add_bearer_token,
'MAC': self._add_mac_token
}
def prepare_request_uri(self, *args, **kwargs):
raise NotImplementedError("Must be implemented by inheriting classes.")
def prepare_request_body(self, *args, **kwargs):
raise NotImplementedError("Must be implemented by inheriting classes.")
def parse_request_uri_response(self, *args, **kwargs):
def add_token(self, uri, http_method='GET', body=None, headers=None,
token_placement=None, **kwargs):
if not is_secure_transport(uri):
raise InsecureTransportError()
token_placement = token_placement or self.default_token_placement
case_insensitive_token_types = dict(
(k.lower(), v) for k, v in self.token_types.items())
if not self.token_type.lower() in case_insensitive_token_types:
raise ValueError("Unsupported token type: %s" % self.token_type)
if not (self.access_token or self.token.get('access_token')):
raise ValueError("Missing access token.")
if self._expires_at and self._expires_at < time.time():
raise TokenExpiredError()
return case_insensitive_token_types[self.token_type.lower()](uri, http_method, body,
headers, token_placement, **kwargs)
|
MIT License
|
blacktear23/py-servicebus
|
servicebus/pika/adapters/twisted_connection.py
|
IOLoopReactorAdapter.add_timeout
|
python
|
def add_timeout(self, deadline, callback_method):
return self.reactor.callLater(deadline, callback_method)
|
Add the callback_method to the IOLoop timer to fire after deadline
seconds. Returns a handle to the timeout. Do not confuse with
Tornado's timeout where you pass in the time you want to have your
callback called. Only pass in the seconds until it's to be called.
:param int deadline: The number of seconds to wait to call callback
:param method callback_method: The callback method
:rtype: twisted.internet.interfaces.IDelayedCall
|
https://github.com/blacktear23/py-servicebus/blob/c3d6ccf0b2abf131ca1060d89f3c0d4ab08481e4/servicebus/pika/adapters/twisted_connection.py#L200-L211
|
import functools
from twisted.internet import defer, error, reactor
from twisted.python import log
from servicebus.pika import exceptions
from servicebus.pika.adapters import base_connection
class ClosableDeferredQueue(defer.DeferredQueue):
def __init__(self, size=None, backlog=None):
self.closed = None
super(ClosableDeferredQueue, self).__init__(size, backlog)
def put(self, obj):
if self.closed:
return defer.fail(self.closed)
return defer.DeferredQueue.put(self, obj)
def get(self):
if self.closed:
return defer.fail(self.closed)
return defer.DeferredQueue.get(self)
def close(self, reason):
self.closed = reason
while self.waiting:
self.waiting.pop().errback(reason)
self.pending = []
class TwistedChannel(object):
WRAPPED_METHODS = ('exchange_declare', 'exchange_delete', 'queue_declare',
'queue_bind', 'queue_purge', 'queue_unbind', 'basic_qos',
'basic_get', 'basic_recover', 'tx_select', 'tx_commit',
'tx_rollback', 'flow', 'basic_cancel')
def __init__(self, channel):
self.__channel = channel
self.__closed = None
self.__calls = set()
self.__consumers = {}
channel.add_on_close_callback(self.channel_closed)
def channel_closed(self, channel, reply_code, reply_text):
self.__closed = exceptions.ChannelClosed(reply_code, reply_text)
for d in self.__calls:
d.errback(self.__closed)
for consumers in self.__consumers.values():
for c in consumers:
c.close(self.__closed)
self.__calls = set()
self.__consumers = {}
def basic_consume(self, *args, **kwargs):
if self.__closed:
return defer.fail(self.__closed)
queue = ClosableDeferredQueue()
queue_name = kwargs['queue']
kwargs['consumer_callback'] = lambda *args: queue.put(args)
self.__consumers.setdefault(queue_name, set()).add(queue)
try:
consumer_tag = self.__channel.basic_consume(*args, **kwargs)
except:
return defer.fail()
return defer.succeed((queue, consumer_tag))
def queue_delete(self, *args, **kwargs):
wrapped = self.__wrap_channel_method('queue_delete')
queue_name = kwargs['queue']
d = wrapped(*args, **kwargs)
return d.addCallback(self.__clear_consumer, queue_name)
def basic_publish(self, *args, **kwargs):
if self.__closed:
return defer.fail(self.__closed)
return defer.succeed(self.__channel.basic_publish(*args, **kwargs))
def __wrap_channel_method(self, name):
method = getattr(self.__channel, name)
@functools.wraps(method)
def wrapped(*args, **kwargs):
if self.__closed:
return defer.fail(self.__closed)
d = defer.Deferred()
self.__calls.add(d)
d.addCallback(self.__clear_call, d)
def single_argument(*args):
if len(args) > 1:
d.callback(tuple(args))
else:
d.callback(*args)
kwargs['callback'] = single_argument
try:
method(*args, **kwargs)
except:
return defer.fail()
return d
return wrapped
def __clear_consumer(self, ret, queue_name):
self.__consumers.pop(queue_name, None)
return ret
def __clear_call(self, ret, d):
self.__calls.discard(d)
return ret
def __getattr__(self, name):
if name in self.WRAPPED_METHODS:
return self.__wrap_channel_method(name)
return getattr(self.__channel, name)
class IOLoopReactorAdapter(object):
def __init__(self, connection, reactor):
self.connection = connection
self.reactor = reactor
self.started = False
|
BSD 3-Clause New or Revised License
|
kivy/kivy-designer
|
designer/core/builder.py
|
Buildozer.rebuild
|
python
|
def rebuild(self, *args):
self.clean()
self.profiler.bind(on_clean=self._rebuild)
|
Update project dependencies, and build it again
|
https://github.com/kivy/kivy-designer/blob/20343184a28c2851faf0c1ab451d0286d147a441/designer/core/builder.py#L146-L150
|
import os
import shutil
import sys
import designer
from designer.uix.confirmation_dialog import ConfirmationDialog
from designer.utils import constants
from designer.utils.utils import (
get_current_project,
get_fs_encoding,
get_kd_data_dir,
ignore_proj_watcher)
from kivy.event import EventDispatcher
from kivy.properties import (
Clock,
ConfigParser,
ConfigParserProperty,
ObjectProperty,
StringProperty,
)
from kivy.uix.popup import Popup
class Builder(EventDispatcher):
def __init__(self, profiler):
self.profiler = profiler
self.designer = self.profiler.designer
self.designer_settings = self.designer.designer_settings
self.project_watcher = self.designer.project_watcher
self.proj_settings = self.designer.proj_settings
self.ui_creator = self.designer.ui_creator
self.run_command = self.ui_creator.kivy_console.run_command
self.can_run = False
self.last_command = None
if not self.profiler.pro_mode:
self.profiler.pro_mode = 'Debug'
class Buildozer(Builder):
def __init__(self, profiler):
super(Buildozer, self).__init__(profiler)
self.buildozer_path = ''
def _initialize(self):
if self.designer.popup:
self.can_run = False
self.profiler.dispatch('on_error', 'You must close all popups '
'before building your project')
return
self.buildozer_path = self.designer_settings.config_parser.getdefault(
'buildozer',
'buildozer_path',
''
)
if self.buildozer_path == '':
self.profiler.dispatch('on_error',
'Buildozer Path not specified.'
"\n\nUpdate it on File -> Settings")
self.can_run = False
return
envs = self.proj_settings.config_parser.getdefault(
'env variables',
'env',
''
)
for env in envs.split(' '):
self.ui_creator.kivy_console.environment[
env[:env.find('=')]] = env[env.find('=') + 1:]
if not os.path.isfile(os.path.join(self.profiler.project_path,
'buildozer.spec')):
confirm_dlg = ConfirmationDialog(
message='buildozer.spec not found.\n'
'Do you want to create it now?')
self.designer.popup = Popup(title='Buildozer',
content=confirm_dlg,
size_hint=(None, None),
size=('200pt', '150pt'),
auto_dismiss=False)
confirm_dlg.bind(on_ok=self._perform_create_spec,
on_cancel=self.designer.close_popup)
self.designer.popup.open()
self.can_run = False
return
self.can_run = True
def _perform_create_spec(self, *args):
templates_dir = os.path.join(get_kd_data_dir(),
constants.DIR_NEW_TEMPLATE)
shutil.copy(os.path.join(templates_dir, 'default.spec'),
os.path.join(self.profiler.project_path, 'buildozer.spec'))
self.designer.designer_content.update_tree_view(get_current_project())
self.designer.close_popup()
self.last_command()
def _create_command(self, extra):
self.project_watcher.pause_watching()
self._initialize()
self.ui_creator.tab_pannel.switch_to(
self.ui_creator.tab_pannel.tab_list[2])
cd = 'cd ' + self.profiler.project_path
args = [self.buildozer_path]
if self.profiler.pro_verbose:
args.append('--verbose')
args.append(self.profiler.pro_target.lower())
args += extra
return [cd, " ".join(args)]
def build(self, *args):
build_mode = self.profiler.pro_mode.lower()
cmd = self._create_command([build_mode])
if not self.can_run:
self.last_command = self.build
return
self.run_command(cmd)
self.profiler.dispatch('on_message', 'Building project...')
self.ui_creator.kivy_console.bind(on_command_list_done=self.on_build)
|
MIT License
|
carla-simulator/ros-bridge
|
carla_ros_bridge/src/carla_ros_bridge/traffic.py
|
Traffic.__init__
|
python
|
def __init__(self, uid, name, parent, node, carla_actor):
super(Traffic, self).__init__(uid=uid,
name=name,
parent=parent,
node=node,
carla_actor=carla_actor)
|
Constructor
:param uid: unique identifier for this object
:type uid: int
:param name: name identiying this object
:type name: string
:param parent: the parent of this
:type parent: carla_ros_bridge.Parent
:param node: node-handle
:type node: CompatibleNode
:param carla_actor: carla actor object
:type carla_actor: carla.Actor
|
https://github.com/carla-simulator/ros-bridge/blob/dac9e729b70a3db9da665c1fdb843e96e7e25d04/carla_ros_bridge/src/carla_ros_bridge/traffic.py#L28-L47
|
from carla import TrafficLightState
import carla_common.transforms as trans
from carla_ros_bridge.actor import Actor
from carla_msgs.msg import CarlaTrafficLightStatus, CarlaTrafficLightInfo
class Traffic(Actor):
|
MIT License
|
wapm-packages/python
|
Python-3.6.7/Lib/idlelib/config.py
|
ConfigChanges.add_option
|
python
|
def add_option(self, config_type, section, item, value):
page = self[config_type]
value = str(value)
if section not in page:
page[section] = {}
page[section][item] = value
|
Add item/value pair for config_type and section.
|
https://github.com/wapm-packages/python/blob/658c1822f430f6d604ecf2bcc388e469cedb2238/Python-3.6.7/Lib/idlelib/config.py#L825-L831
|
from configparser import ConfigParser
import os
import sys
from tkinter.font import Font
import idlelib
class InvalidConfigType(Exception): pass
class InvalidConfigSet(Exception): pass
class InvalidFgBg(Exception): pass
class InvalidTheme(Exception): pass
class IdleConfParser(ConfigParser):
def __init__(self, cfgFile, cfgDefaults=None):
self.file = cfgFile
ConfigParser.__init__(self, defaults=cfgDefaults, strict=False)
def Get(self, section, option, type=None, default=None, raw=False):
if not self.has_option(section, option):
return default
if type == 'bool':
return self.getboolean(section, option)
elif type == 'int':
return self.getint(section, option)
else:
return self.get(section, option, raw=raw)
def GetOptionList(self, section):
if self.has_section(section):
return self.options(section)
else:
return []
def Load(self):
if self.file:
self.read(self.file)
class IdleUserConfParser(IdleConfParser):
def SetOption(self, section, option, value):
if self.has_option(section, option):
if self.get(section, option) == value:
return False
else:
self.set(section, option, value)
return True
else:
if not self.has_section(section):
self.add_section(section)
self.set(section, option, value)
return True
def RemoveOption(self, section, option):
if self.has_section(section):
return self.remove_option(section, option)
return False
def AddSection(self, section):
if not self.has_section(section):
self.add_section(section)
def RemoveEmptySections(self):
for section in self.sections():
if not self.GetOptionList(section):
self.remove_section(section)
def IsEmpty(self):
self.RemoveEmptySections()
return not self.sections()
def RemoveFile(self):
if os.path.exists(self.file):
os.remove(self.file)
def Save(self):
fname = self.file
if fname:
if not self.IsEmpty():
try:
cfgFile = open(fname, 'w')
except OSError:
os.unlink(fname)
cfgFile = open(fname, 'w')
with cfgFile:
self.write(cfgFile)
else:
self.RemoveFile()
class IdleConf:
def __init__(self, _utest=False):
self.config_types = ('main', 'highlight', 'keys', 'extensions')
self.defaultCfg = {}
self.userCfg = {}
self.cfg = {}
if not _utest:
self.CreateConfigHandlers()
self.LoadCfgFiles()
def CreateConfigHandlers(self):
if __name__ != '__main__':
idleDir = os.path.dirname(__file__)
else:
idleDir = os.path.abspath(sys.path[0])
self.userdir = userDir = self.GetUserCfgDir()
defCfgFiles = {}
usrCfgFiles = {}
for cfgType in self.config_types:
defCfgFiles[cfgType] = os.path.join(
idleDir, 'config-' + cfgType + '.def')
usrCfgFiles[cfgType] = os.path.join(
userDir, 'config-' + cfgType + '.cfg')
for cfgType in self.config_types:
self.defaultCfg[cfgType] = IdleConfParser(defCfgFiles[cfgType])
self.userCfg[cfgType] = IdleUserConfParser(usrCfgFiles[cfgType])
def GetUserCfgDir(self):
cfgDir = '.idlerc'
userDir = os.path.expanduser('~')
if userDir != '~':
if not os.path.exists(userDir):
warn = ('\n Warning: os.path.expanduser("~") points to\n ' +
userDir + ',\n but the path does not exist.')
try:
print(warn, file=sys.stderr)
except OSError:
pass
userDir = '~'
if userDir == "~":
userDir = os.getcwd()
userDir = os.path.join(userDir, cfgDir)
if not os.path.exists(userDir):
try:
os.mkdir(userDir)
except OSError:
warn = ('\n Warning: unable to create user config directory\n' +
userDir + '\n Check path and permissions.\n Exiting!\n')
if not idlelib.testing:
print(warn, file=sys.stderr)
raise SystemExit
return userDir
def GetOption(self, configType, section, option, default=None, type=None,
warn_on_default=True, raw=False):
try:
if self.userCfg[configType].has_option(section, option):
return self.userCfg[configType].Get(section, option,
type=type, raw=raw)
except ValueError:
warning = ('\n Warning: config.py - IdleConf.GetOption -\n'
' invalid %r value for configuration option %r\n'
' from section %r: %r' %
(type, option, section,
self.userCfg[configType].Get(section, option, raw=raw)))
_warn(warning, configType, section, option)
try:
if self.defaultCfg[configType].has_option(section,option):
return self.defaultCfg[configType].Get(
section, option, type=type, raw=raw)
except ValueError:
pass
if warn_on_default:
warning = ('\n Warning: config.py - IdleConf.GetOption -\n'
' problem retrieving configuration option %r\n'
' from section %r.\n'
' returning default value: %r' %
(option, section, default))
_warn(warning, configType, section, option)
return default
def SetOption(self, configType, section, option, value):
self.userCfg[configType].SetOption(section, option, value)
def GetSectionList(self, configSet, configType):
if not (configType in self.config_types):
raise InvalidConfigType('Invalid configType specified')
if configSet == 'user':
cfgParser = self.userCfg[configType]
elif configSet == 'default':
cfgParser=self.defaultCfg[configType]
else:
raise InvalidConfigSet('Invalid configSet specified')
return cfgParser.sections()
def GetHighlight(self, theme, element, fgBg=None):
if self.defaultCfg['highlight'].has_section(theme):
themeDict = self.GetThemeDict('default', theme)
else:
themeDict = self.GetThemeDict('user', theme)
fore = themeDict[element + '-foreground']
if element == 'cursor':
back = themeDict['normal-background']
else:
back = themeDict[element + '-background']
highlight = {"foreground": fore, "background": back}
if not fgBg:
return highlight
else:
if fgBg == 'fg':
return highlight["foreground"]
if fgBg == 'bg':
return highlight["background"]
else:
raise InvalidFgBg('Invalid fgBg specified')
def GetThemeDict(self, type, themeName):
if type == 'user':
cfgParser = self.userCfg['highlight']
elif type == 'default':
cfgParser = self.defaultCfg['highlight']
else:
raise InvalidTheme('Invalid theme type specified')
theme ={'normal-foreground':'#000000',
'normal-background':'#ffffff',
'keyword-foreground':'#000000',
'keyword-background':'#ffffff',
'builtin-foreground':'#000000',
'builtin-background':'#ffffff',
'comment-foreground':'#000000',
'comment-background':'#ffffff',
'string-foreground':'#000000',
'string-background':'#ffffff',
'definition-foreground':'#000000',
'definition-background':'#ffffff',
'hilite-foreground':'#000000',
'hilite-background':'gray',
'break-foreground':'#ffffff',
'break-background':'#000000',
'hit-foreground':'#ffffff',
'hit-background':'#000000',
'error-foreground':'#ffffff',
'error-background':'#000000',
'cursor-foreground':'#000000',
'stdout-foreground':'#000000',
'stdout-background':'#ffffff',
'stderr-foreground':'#000000',
'stderr-background':'#ffffff',
'console-foreground':'#000000',
'console-background':'#ffffff',
'context-foreground':'#000000',
'context-background':'#ffffff',
}
for element in theme:
if not cfgParser.has_option(themeName, element):
warning = ('\n Warning: config.IdleConf.GetThemeDict'
' -\n problem retrieving theme element %r'
'\n from theme %r.\n'
' returning default color: %r' %
(element, themeName, theme[element]))
_warn(warning, 'highlight', themeName, element)
theme[element] = cfgParser.Get(
themeName, element, default=theme[element])
return theme
def CurrentTheme(self):
return self.current_colors_and_keys('Theme')
def CurrentKeys(self):
return self.current_colors_and_keys('Keys')
def current_colors_and_keys(self, section):
cfgname = 'highlight' if section == 'Theme' else 'keys'
default = self.GetOption('main', section, 'default',
type='bool', default=True)
name = ''
if default:
name = self.GetOption('main', section, 'name2', default='')
if not name:
name = self.GetOption('main', section, 'name', default='')
if name:
source = self.defaultCfg if default else self.userCfg
if source[cfgname].has_section(name):
return name
return "IDLE Classic" if section == 'Theme' else self.default_keys()
@staticmethod
def default_keys():
if sys.platform[:3] == 'win':
return 'IDLE Classic Windows'
elif sys.platform == 'darwin':
return 'IDLE Classic OSX'
else:
return 'IDLE Modern Unix'
def GetExtensions(self, active_only=True,
editor_only=False, shell_only=False):
extns = self.RemoveKeyBindNames(
self.GetSectionList('default', 'extensions'))
userExtns = self.RemoveKeyBindNames(
self.GetSectionList('user', 'extensions'))
for extn in userExtns:
if extn not in extns:
extns.append(extn)
for extn in ('AutoComplete','CodeContext',
'FormatParagraph','ParenMatch'):
extns.remove(extn)
if active_only:
activeExtns = []
for extn in extns:
if self.GetOption('extensions', extn, 'enable', default=True,
type='bool'):
if editor_only or shell_only:
if editor_only:
option = "enable_editor"
else:
option = "enable_shell"
if self.GetOption('extensions', extn,option,
default=True, type='bool',
warn_on_default=False):
activeExtns.append(extn)
else:
activeExtns.append(extn)
return activeExtns
else:
return extns
def RemoveKeyBindNames(self, extnNameList):
return [n for n in extnNameList if not n.endswith(('_bindings', '_cfgBindings'))]
def GetExtnNameForEvent(self, virtualEvent):
extName = None
vEvent = '<<' + virtualEvent + '>>'
for extn in self.GetExtensions(active_only=0):
for event in self.GetExtensionKeys(extn):
if event == vEvent:
extName = extn
return extName
def GetExtensionKeys(self, extensionName):
keysName = extensionName + '_cfgBindings'
activeKeys = self.GetCurrentKeySet()
extKeys = {}
if self.defaultCfg['extensions'].has_section(keysName):
eventNames = self.defaultCfg['extensions'].GetOptionList(keysName)
for eventName in eventNames:
event = '<<' + eventName + '>>'
binding = activeKeys[event]
extKeys[event] = binding
return extKeys
def __GetRawExtensionKeys(self,extensionName):
keysName = extensionName+'_cfgBindings'
extKeys = {}
if self.defaultCfg['extensions'].has_section(keysName):
eventNames = self.defaultCfg['extensions'].GetOptionList(keysName)
for eventName in eventNames:
binding = self.GetOption(
'extensions', keysName, eventName, default='').split()
event = '<<' + eventName + '>>'
extKeys[event] = binding
return extKeys
def GetExtensionBindings(self, extensionName):
bindsName = extensionName + '_bindings'
extBinds = self.GetExtensionKeys(extensionName)
if self.defaultCfg['extensions'].has_section(bindsName):
eventNames = self.defaultCfg['extensions'].GetOptionList(bindsName)
for eventName in eventNames:
binding = self.GetOption(
'extensions', bindsName, eventName, default='').split()
event = '<<' + eventName + '>>'
extBinds[event] = binding
return extBinds
def GetKeyBinding(self, keySetName, eventStr):
eventName = eventStr[2:-2]
binding = self.GetOption('keys', keySetName, eventName, default='',
warn_on_default=False).split()
return binding
def GetCurrentKeySet(self):
result = self.GetKeySet(self.CurrentKeys())
if sys.platform == "darwin":
for k, v in result.items():
v2 = [ x.replace('<Alt-', '<Option-') for x in v ]
if v != v2:
result[k] = v2
return result
def GetKeySet(self, keySetName):
keySet = self.GetCoreKeys(keySetName)
activeExtns = self.GetExtensions(active_only=1)
for extn in activeExtns:
extKeys = self.__GetRawExtensionKeys(extn)
if extKeys:
for event in extKeys:
if extKeys[event] in keySet.values():
extKeys[event] = ''
keySet[event] = extKeys[event]
return keySet
def IsCoreBinding(self, virtualEvent):
return ('<<'+virtualEvent+'>>') in self.GetCoreKeys()
former_extension_events = {
'<<force-open-completions>>', '<<expand-word>>',
'<<force-open-calltip>>', '<<flash-paren>>', '<<format-paragraph>>',
'<<run-module>>', '<<check-module>>', '<<zoom-height>>'}
def GetCoreKeys(self, keySetName=None):
keyBindings={
'<<copy>>': ['<Control-c>', '<Control-C>'],
'<<cut>>': ['<Control-x>', '<Control-X>'],
'<<paste>>': ['<Control-v>', '<Control-V>'],
'<<beginning-of-line>>': ['<Control-a>', '<Home>'],
'<<center-insert>>': ['<Control-l>'],
'<<close-all-windows>>': ['<Control-q>'],
'<<close-window>>': ['<Alt-F4>'],
'<<do-nothing>>': ['<Control-x>'],
'<<end-of-file>>': ['<Control-d>'],
'<<python-docs>>': ['<F1>'],
'<<python-context-help>>': ['<Shift-F1>'],
'<<history-next>>': ['<Alt-n>'],
'<<history-previous>>': ['<Alt-p>'],
'<<interrupt-execution>>': ['<Control-c>'],
'<<view-restart>>': ['<F6>'],
'<<restart-shell>>': ['<Control-F6>'],
'<<open-class-browser>>': ['<Alt-c>'],
'<<open-module>>': ['<Alt-m>'],
'<<open-new-window>>': ['<Control-n>'],
'<<open-window-from-file>>': ['<Control-o>'],
'<<plain-newline-and-indent>>': ['<Control-j>'],
'<<print-window>>': ['<Control-p>'],
'<<redo>>': ['<Control-y>'],
'<<remove-selection>>': ['<Escape>'],
'<<save-copy-of-window-as-file>>': ['<Alt-Shift-S>'],
'<<save-window-as-file>>': ['<Alt-s>'],
'<<save-window>>': ['<Control-s>'],
'<<select-all>>': ['<Alt-a>'],
'<<toggle-auto-coloring>>': ['<Control-slash>'],
'<<undo>>': ['<Control-z>'],
'<<find-again>>': ['<Control-g>', '<F3>'],
'<<find-in-files>>': ['<Alt-F3>'],
'<<find-selection>>': ['<Control-F3>'],
'<<find>>': ['<Control-f>'],
'<<replace>>': ['<Control-h>'],
'<<goto-line>>': ['<Alt-g>'],
'<<smart-backspace>>': ['<Key-BackSpace>'],
'<<newline-and-indent>>': ['<Key-Return>', '<Key-KP_Enter>'],
'<<smart-indent>>': ['<Key-Tab>'],
'<<indent-region>>': ['<Control-Key-bracketright>'],
'<<dedent-region>>': ['<Control-Key-bracketleft>'],
'<<comment-region>>': ['<Alt-Key-3>'],
'<<uncomment-region>>': ['<Alt-Key-4>'],
'<<tabify-region>>': ['<Alt-Key-5>'],
'<<untabify-region>>': ['<Alt-Key-6>'],
'<<toggle-tabs>>': ['<Alt-Key-t>'],
'<<change-indentwidth>>': ['<Alt-Key-u>'],
'<<del-word-left>>': ['<Control-Key-BackSpace>'],
'<<del-word-right>>': ['<Control-Key-Delete>'],
'<<force-open-completions>>': ['<Control-Key-space>'],
'<<expand-word>>': ['<Alt-Key-slash>'],
'<<force-open-calltip>>': ['<Control-Key-backslash>'],
'<<flash-paren>>': ['<Control-Key-0>'],
'<<format-paragraph>>': ['<Alt-Key-q>'],
'<<run-module>>': ['<Key-F5>'],
'<<check-module>>': ['<Alt-Key-x>'],
'<<zoom-height>>': ['<Alt-Key-2>'],
}
if keySetName:
if not (self.userCfg['keys'].has_section(keySetName) or
self.defaultCfg['keys'].has_section(keySetName)):
warning = (
'\n Warning: config.py - IdleConf.GetCoreKeys -\n'
' key set %r is not defined, using default bindings.' %
(keySetName,)
)
_warn(warning, 'keys', keySetName)
else:
for event in keyBindings:
binding = self.GetKeyBinding(keySetName, event)
if binding:
keyBindings[event] = binding
elif event not in self.former_extension_events:
warning = (
'\n Warning: config.py - IdleConf.GetCoreKeys -\n'
' problem retrieving key binding for event %r\n'
' from key set %r.\n'
' returning default value: %r' %
(event, keySetName, keyBindings[event])
)
_warn(warning, 'keys', keySetName, event)
return keyBindings
def GetExtraHelpSourceList(self, configSet):
helpSources = []
if configSet == 'user':
cfgParser = self.userCfg['main']
elif configSet == 'default':
cfgParser = self.defaultCfg['main']
else:
raise InvalidConfigSet('Invalid configSet specified')
options=cfgParser.GetOptionList('HelpFiles')
for option in options:
value=cfgParser.Get('HelpFiles', option, default=';')
if value.find(';') == -1:
menuItem = ''
helpPath = ''
else:
value=value.split(';')
menuItem=value[0].strip()
helpPath=value[1].strip()
if menuItem and helpPath:
helpSources.append( (menuItem,helpPath,option) )
helpSources.sort(key=lambda x: x[2])
return helpSources
def GetAllExtraHelpSourcesList(self):
allHelpSources = (self.GetExtraHelpSourceList('default') +
self.GetExtraHelpSourceList('user') )
return allHelpSources
def GetFont(self, root, configType, section):
family = self.GetOption(configType, section, 'font', default='courier')
size = self.GetOption(configType, section, 'font-size', type='int',
default='10')
bold = self.GetOption(configType, section, 'font-bold', default=0,
type='bool')
if (family == 'TkFixedFont'):
f = Font(name='TkFixedFont', exists=True, root=root)
actualFont = Font.actual(f)
family = actualFont['family']
size = actualFont['size']
if size <= 0:
size = 10
bold = actualFont['weight'] == 'bold'
return (family, size, 'bold' if bold else 'normal')
def LoadCfgFiles(self):
for key in self.defaultCfg:
self.defaultCfg[key].Load()
self.userCfg[key].Load()
def SaveUserCfgFiles(self):
for key in self.userCfg:
self.userCfg[key].Save()
idleConf = IdleConf()
_warned = set()
def _warn(msg, *key):
key = (msg,) + key
if key not in _warned:
try:
print(msg, file=sys.stderr)
except OSError:
pass
_warned.add(key)
class ConfigChanges(dict):
def __init__(self):
self.pages = []
for config_type in idleConf.config_types:
self[config_type] = {}
self.pages.append(self[config_type])
|
Apache License 2.0
|
yelp/paasta
|
paasta_tools/mesos_tools.py
|
format_running_mesos_task_row
|
python
|
async def format_running_mesos_task_row(
task: Task, get_short_task_id: Callable[[str], str]
) -> Tuple[str, ...]:
short_task_id = get_short_task_id(task["id"])
short_hostname_future = asyncio.ensure_future(
results_or_unknown(get_short_hostname_from_task(task))
)
mem_usage_future = asyncio.ensure_future(results_or_unknown(get_mem_usage(task)))
cpu_usage_future = asyncio.ensure_future(results_or_unknown(get_cpu_usage(task)))
first_status_timestamp = get_first_status_timestamp_string(task)
await asyncio.wait([short_hostname_future, mem_usage_future, cpu_usage_future])
return (
short_task_id,
short_hostname_future.result(),
mem_usage_future.result(),
cpu_usage_future.result(),
first_status_timestamp,
)
|
Returns a pretty formatted string of a running mesos task attributes
|
https://github.com/yelp/paasta/blob/bc1716253bbe003cec01bd02016010910c2b039c/paasta_tools/mesos_tools.py#L403-L424
|
import asyncio
import datetime
import itertools
import json
import logging
import re
import socket
from collections import namedtuple
from pathlib import Path
from typing import Any
from typing import Awaitable
from typing import Callable
from typing import Collection
from typing import Dict
from typing import List
from typing import Mapping
from typing import MutableMapping
from typing import NamedTuple
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Union
from urllib.parse import urlparse
import a_sync
import humanize
import requests
from kazoo.client import KazooClient
from mypy_extensions import TypedDict
import paasta_tools.mesos.cluster as cluster
import paasta_tools.mesos.exceptions as mesos_exceptions
from paasta_tools.async_utils import aiter_to_list
from paasta_tools.async_utils import async_timeout
from paasta_tools.async_utils import async_ttl_cache
from paasta_tools.long_running_service_tools import host_passes_blacklist
from paasta_tools.long_running_service_tools import host_passes_whitelist
from paasta_tools.mesos.cfg import load_mesos_config
from paasta_tools.mesos.exceptions import SlaveDoesNotExist
from paasta_tools.mesos.master import MesosMaster
from paasta_tools.mesos.master import MesosState
from paasta_tools.mesos.task import Task
from paasta_tools.utils import DeployBlacklist
from paasta_tools.utils import DeployWhitelist
from paasta_tools.utils import format_table
from paasta_tools.utils import get_user_agent
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import PaastaColors
from paasta_tools.utils import SystemPaastaConfig
from paasta_tools.utils import TimeoutError
MARATHON_FRAMEWORK_NAME_PREFIX = "marathon"
ZookeeperHostPath = namedtuple("ZookeeperHostPath", ["host", "path"])
SlaveTaskCount = namedtuple("SlaveTaskCount", ["count", "batch_count", "slave"])
DEFAULT_MESOS_CLI_CONFIG_LOCATION = "/nail/etc/mesos-cli.json"
TERMINAL_STATES = (
"TASK_ERROR",
"TASK_KILLED",
"TASK_FAILED",
"TASK_FINISHED",
"TASK_DROPPED",
"TASK_GONE",
"TASK_GONE_BY_OPERATOR",
)
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
def get_mesos_config_path(
system_paasta_config: Optional[SystemPaastaConfig] = None,
) -> str:
if system_paasta_config is None:
system_paasta_config = load_system_paasta_config()
return system_paasta_config.get_mesos_cli_config().get(
"path", DEFAULT_MESOS_CLI_CONFIG_LOCATION
)
def get_mesos_config(mesos_config_path: Optional[str] = None) -> Dict:
if mesos_config_path is None:
mesos_config_path = get_mesos_config_path()
return load_mesos_config(mesos_config_path)
def get_mesos_master(
mesos_config_path: Optional[str] = None, **overrides: Any
) -> MesosMaster:
config = get_mesos_config(mesos_config_path)
for k, v in overrides.items():
config[k] = v
return MesosMaster(config)
MY_HOSTNAME = socket.getfqdn()
MESOS_MASTER_PORT = 5050
MESOS_SLAVE_PORT = "5051"
class MesosSlaveConnectionError(Exception):
pass
class MesosTailLines(NamedTuple):
stdout: List[str]
stderr: List[str]
error_message: str
def get_mesos_leader(mesos_config_path: Optional[str] = None) -> str:
try:
url = get_mesos_master(mesos_config_path).host
except mesos_exceptions.MasterNotAvailableException:
log.debug("mesos.cli failed to provide the master host")
raise
log.debug("mesos.cli thinks the master host is: %s" % url)
hostname = urlparse(url).hostname
log.debug("The parsed master hostname is: %s" % hostname)
if hostname:
try:
host = socket.gethostbyaddr(hostname)[0]
fqdn = socket.getfqdn(host)
except (socket.error, socket.herror, socket.gaierror, socket.timeout):
log.debug("Failed to convert mesos leader hostname to fqdn!")
raise
log.debug("Mesos Leader: %s" % fqdn)
return fqdn
else:
raise ValueError("Expected to receive a valid URL, got: %s" % url)
def is_mesos_leader(hostname: str = MY_HOSTNAME) -> bool:
return get_mesos_leader() == hostname
class MesosLeaderUnavailable(Exception):
pass
def find_mesos_leader(cluster):
master = (
load_system_paasta_config().get_cluster_fqdn_format().format(cluster=cluster)
)
if master is None:
raise ValueError("Mesos master is required to find leader")
url = f"http://{master}:{MESOS_MASTER_PORT}/redirect"
try:
response = requests.get(url, timeout=(5, 30))
except Exception as e:
raise MesosLeaderUnavailable(e)
hostname = urlparse(response.url).hostname
return f"{hostname}:{MESOS_MASTER_PORT}"
async def get_current_tasks(job_id: str) -> List[Task]:
mesos_master = get_mesos_master()
framework_tasks = await mesos_master.tasks(fltr=job_id, active_only=False)
return framework_tasks
def is_task_running(task: Task) -> bool:
return task["state"] == "TASK_RUNNING"
def filter_running_tasks(tasks: Collection[Task]) -> List[Task]:
return [task for task in tasks if is_task_running(task)]
def filter_not_running_tasks(tasks: Collection[Task]) -> List[Task]:
return [task for task in tasks if not is_task_running(task)]
async def get_running_tasks_from_frameworks(job_id=""):
active_framework_tasks = await get_current_tasks(job_id)
running_tasks = filter_running_tasks(active_framework_tasks)
return running_tasks
async def get_all_running_tasks() -> Collection[Task]:
framework_tasks = await get_current_tasks("")
mesos_master = get_mesos_master()
framework_tasks += await mesos_master.orphan_tasks()
running_tasks = filter_running_tasks(framework_tasks)
return running_tasks
@async_ttl_cache(ttl=600)
async def get_cached_list_of_all_current_tasks():
return await get_current_tasks("")
@async_ttl_cache(ttl=600)
async def get_cached_list_of_running_tasks_from_frameworks():
return [
task
for task in filter_running_tasks(await get_cached_list_of_all_current_tasks())
]
@async_ttl_cache(ttl=600)
async def get_cached_list_of_not_running_tasks_from_frameworks():
return [
task
for task in filter_not_running_tasks(
await get_cached_list_of_all_current_tasks()
)
]
def select_tasks_by_id(tasks: Collection[Task], job_id: str = "") -> List[Task]:
return [task for task in tasks if job_id in task["id"]]
async def get_non_running_tasks_from_frameworks(job_id: str = "") -> List[Task]:
active_framework_tasks = await get_current_tasks(job_id)
not_running_tasks = filter_not_running_tasks(active_framework_tasks)
return not_running_tasks
async def get_short_hostname_from_task(task: Task) -> str:
try:
slave_hostname = (await task.slave())["hostname"]
return slave_hostname.split(".")[0]
except (AttributeError, SlaveDoesNotExist):
return "Unknown"
def get_first_status_timestamp(task: Task) -> Optional[float]:
try:
start_time_string = task["statuses"][0]["timestamp"]
return float(start_time_string)
except (IndexError, SlaveDoesNotExist):
return None
def get_first_status_timestamp_string(task: Task) -> str:
first_status_timestamp = get_first_status_timestamp(task)
if first_status_timestamp is None:
return "Unknown"
else:
first_status_datetime = datetime.datetime.fromtimestamp(first_status_timestamp)
return "{} ({})".format(
first_status_datetime.strftime("%Y-%m-%dT%H:%M"),
humanize.naturaltime(first_status_datetime),
)
async def get_mem_usage(task: Task) -> str:
try:
task_mem_limit = await task.mem_limit()
task_rss = await task.rss()
if task_mem_limit == 0:
return "Undef"
mem_percent = task_rss / task_mem_limit * 100
mem_string = "%d/%dMB" % (
(task_rss / 1024 / 1024),
(task_mem_limit / 1024 / 1024),
)
if mem_percent > 90:
return PaastaColors.red(mem_string)
else:
return mem_string
except (AttributeError, SlaveDoesNotExist):
return "None"
except TimeoutError:
return "Timed Out"
async def get_cpu_shares(task: Task) -> float:
cpu_shares = await task.cpu_limit()
return cpu_shares - 0.1
async def get_cpu_usage(task: Task) -> str:
try:
start_time = round(task["statuses"][0]["timestamp"])
current_time = int(datetime.datetime.now().strftime("%s"))
duration_seconds = current_time - start_time
cpu_shares = await get_cpu_shares(task)
allocated_seconds = duration_seconds * cpu_shares
task_stats = await task.stats()
used_seconds = task_stats.get("cpus_system_time_secs", 0.0) + task_stats.get(
"cpus_user_time_secs", 0.0
)
if allocated_seconds == 0:
return "Undef"
percent = round(100 * (used_seconds / allocated_seconds), 1)
percent_string = "%s%%" % percent
if percent > 90:
return PaastaColors.red(percent_string)
else:
return percent_string
except (AttributeError, SlaveDoesNotExist):
return "None"
except TimeoutError:
return "Timed Out"
async def results_or_unknown(future: Awaitable[str]) -> str:
try:
return await future
except Exception:
return PaastaColors.red("Unknown")
|
Apache License 2.0
|
databiosphere/toil
|
src/toil/test/__init__.py
|
ToilTest._getSourceDistribution
|
python
|
def _getSourceDistribution(cls):
sdistPath = os.path.join(cls._projectRootPath(), 'dist', 'toil-%s.tar.gz' % distVersion)
assert os.path.isfile(sdistPath), "Can't find Toil source distribution at %s. Run 'make sdist'." % sdistPath
excluded = set(cls._run('git', 'ls-files', '--others', '-i', '--exclude-standard',
capture=True,
cwd=cls._projectRootPath()).splitlines())
dirty = cls._run('find', 'src', '-type', 'f', '-newer', sdistPath,
capture=True,
cwd=cls._projectRootPath()).splitlines()
assert all(path.startswith('src') for path in dirty)
dirty = set(dirty)
dirty.difference_update(excluded)
assert not dirty, "Run 'make clean_sdist sdist'. Files newer than {}: {!r}".format(sdistPath, list(dirty))
return sdistPath
|
Find the sdist tarball for this project, check whether it is up-to date and return the
path to it.
:rtype: str
|
https://github.com/databiosphere/toil/blob/eb2ae8365ae2ebdd50132570b20f7d480eb40cac/src/toil/test/__init__.py#L169-L188
|
import datetime
import logging
import os
import random
import re
import shutil
import signal
import subprocess
import tempfile
import threading
import time
import unittest
import uuid
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
from inspect import getsource
from shutil import which
from textwrap import dedent
from unittest.util import strclass
from urllib.request import urlopen
import pytz
from toil import ApplianceImageNotFound, applianceSelf, toilPackageDirPath
from toil.lib.iterables import concat
from toil.lib.memoize import memoize
from toil.lib.threading import ExceptionalThread, cpu_count
from toil.provisioners.aws import running_on_ec2
from toil.version import distVersion
logger = logging.getLogger(__name__)
class ToilTest(unittest.TestCase):
_tempBaseDir = None
_tempDirs = None
def setup_method(self, method):
western = pytz.timezone('America/Los_Angeles')
california_time = western.localize(datetime.datetime.now())
timestamp = california_time.strftime("%b %d %Y %H:%M:%S:%f %Z")
print(f"\n\n[TEST] {strclass(self.__class__)}:{self._testMethodName} ({timestamp})\n\n")
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._tempDirs = []
tempBaseDir = os.environ.get('TOIL_TEST_TEMP', None)
if tempBaseDir is not None and not os.path.isabs(tempBaseDir):
tempBaseDir = os.path.abspath(os.path.join(cls._projectRootPath(), tempBaseDir))
os.makedirs(tempBaseDir, exist_ok=True)
cls._tempBaseDir = tempBaseDir
@classmethod
def tearDownClass(cls):
if cls._tempBaseDir is None:
while cls._tempDirs:
tempDir = cls._tempDirs.pop()
if os.path.exists(tempDir):
shutil.rmtree(tempDir)
else:
cls._tempDirs = []
super().tearDownClass()
def setUp(self):
logger.info("Setting up %s ...", self.id())
super().setUp()
def tearDown(self):
super().tearDown()
logger.info("Tore down %s", self.id())
@classmethod
def awsRegion(cls):
return cls._region() if running_on_ec2() else 'us-west-2'
@classmethod
def _availabilityZone(cls):
zone = urlopen('http://169.254.169.254/latest/meta-data/placement/availability-zone').read()
return zone if not isinstance(zone, bytes) else zone.decode('utf-8')
@classmethod
@memoize
def _region(cls):
region = re.match(r'^([a-z]{2}-[a-z]+-[1-9][0-9]*)([a-z])$', cls._availabilityZone())
assert region
return region.group(1)
@classmethod
def _getUtilScriptPath(cls, script_name):
return os.path.join(toilPackageDirPath(), 'utils', script_name + '.py')
@classmethod
def _projectRootPath(cls):
assert re.search(r'__init__\.pyc?$', __file__)
projectRootPath = os.path.dirname(os.path.abspath(__file__))
packageComponents = __name__.split('.')
expectedSuffix = os.path.join('src', *packageComponents)
assert projectRootPath.endswith(expectedSuffix)
projectRootPath = projectRootPath[:-len(expectedSuffix)]
return projectRootPath
def _createTempDir(self, purpose=None):
return self._createTempDirEx(self._testMethodName, purpose)
@classmethod
def _createTempDirEx(cls, *names):
prefix = ['toil', 'test', strclass(cls)]
prefix.extend([_f for _f in names if _f])
prefix.append('')
temp_dir_path = os.path.realpath(tempfile.mkdtemp(dir=cls._tempBaseDir, prefix='-'.join(prefix)))
cls._tempDirs.append(temp_dir_path)
return temp_dir_path
def _getTestJobStorePath(self):
path = self._createTempDir(purpose='jobstore')
os.rmdir(path)
return path
@classmethod
|
Apache License 2.0
|
google-research/federated
|
utils/models/emnist_models.py
|
create_conv_dropout_model
|
python
|
def create_conv_dropout_model(only_digits: bool = True,
seed: Optional[int] = None):
data_format = 'channels_last'
if seed is not None:
tf.random.set_seed(seed)
initializer = tf.keras.initializers.GlorotNormal(seed=seed)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(
32,
kernel_size=(3, 3),
activation='relu',
data_format=data_format,
input_shape=(28, 28, 1),
kernel_initializer=initializer),
tf.keras.layers.Conv2D(
64,
kernel_size=(3, 3),
activation='relu',
data_format=data_format,
kernel_initializer=initializer),
tf.keras.layers.MaxPool2D(pool_size=(2, 2), data_format=data_format),
tf.keras.layers.Dropout(0.25, seed=seed),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(
128, activation='relu', kernel_initializer=initializer),
tf.keras.layers.Dropout(0.5, seed=seed),
tf.keras.layers.Dense(
10 if only_digits else 62,
activation=tf.nn.softmax,
kernel_initializer=initializer),
])
return model
|
Convolutional model with droupout for EMNIST experiments.
Args:
only_digits: If True, uses a final layer with 10 outputs, for use with the
digits only EMNIST dataset. If False, uses 62 outputs for the larger
dataset.
seed: A random seed governing the model initialization and layer randomness.
If not `None`, then the global random seed will be set before constructing
the tensor initializer, in order to guarantee the same model is produced.
Returns:
A `tf.keras.Model`.
|
https://github.com/google-research/federated/blob/909953fa8945cfac01328e0a6d878e1dc0376c3c/utils/models/emnist_models.py#L22-L68
|
import functools
from typing import Optional
import tensorflow as tf
|
Apache License 2.0
|
rcbops/ansible-lxc-rpc
|
scripts/rpc-wheel-builder.py
|
new_setup
|
python
|
def new_setup(user_args, input_path):
LOG.info('Discovering input file(s)')
var_files = None
if os.path.isdir(user_args['input']):
var_files = get_file_names(path=input_path, ext='.yml')
else:
if not input_path.endswith(('.yml', '.yaml')):
error = (
'The file you specified, [ %s ] does not have a valid yaml'
' extension. Please check your file and try again.'
% input_path
)
_error_handler(msg=error)
else:
var_files = [input_path]
LOG.info('Building the package list')
for var_file in var_files:
package_dict(var_file=var_file)
ensure_consistency()
|
Discover all yaml files in the input directory.
|
https://github.com/rcbops/ansible-lxc-rpc/blob/5b2b31d976596068a559b0a9a9bcd17032e9aeb1/scripts/rpc-wheel-builder.py#L526-L550
|
import argparse
import datetime
import json
import multiprocessing
import os
import subprocess
import sys
import tempfile
import time
import urlparse
from distutils import version
import requests
import yaml
from cloudlib import logger
PYTHON_PACKAGES = {
'base_release': dict(),
'known_release': dict(),
'from_git': dict(),
'required_packages': dict(),
'built_files': list()
}
GIT_REPOS = []
GIT_REQUIREMENTS_MAP = {
'github.com': 'https://raw.githubusercontent.com/%(path)s/%(branch)s'
'/%(file)s',
'openstack.org': 'https://git.openstack.org/cgit/%(path)s/plain'
'/%(file)s?id=%(branch)s'
}
VERSION_DESCRIPTORS = [
'>=', '<=', '==', '!=', '<', '>'
]
LOG = None
class IndicatorThread(object):
def __init__(self, note=None, system=True, debug=False, quiet=False):
self.quiet = quiet
self.debug = debug
self.system = system
self.note = note
if self.note is None:
self.note = 'Please Wait... '
self.job = None
def __enter__(self):
if all([self.debug is False, self.quiet is False]):
return self.indicator_thread()
def __exit__(self, exc_type, exc_val, exc_tb):
self.system = False
if all([self.debug is False, self.quiet is False]):
print('Done.')
self.job.terminate()
def indicator(self):
while self.system:
busy_chars = ['|', '/', '-', '\\']
for bc in busy_chars:
note = self.note
sys.stdout.write('\rProcessing - [ %s ] - %s' % (bc, note))
sys.stdout.flush()
time.sleep(.1)
self.system = self.system
def indicator_thread(self):
self.job = multiprocessing.Process(target=self.indicator)
self.job.start()
return self.job
class LoggerWriter(object):
@property
def fileno(self):
return LOG.handlers[0].stream.fileno
def get_file_names(path, ext=None):
paths = os.walk(os.path.abspath(path))
files = list()
for fpath, _, afiles in paths:
for afile in afiles:
if ext is not None:
if afile.endswith(ext):
files.append(os.path.join(fpath, afile))
else:
files.append(os.path.join(fpath, afile))
else:
return files
def requirements_parse(pkgs):
for pkg in pkgs:
LOG.debug('Parsing python dependencies: %s', pkg)
if '==' in pkg:
required_packages = PYTHON_PACKAGES['required_packages']
pkg_name = '-'.join(pkg.split('=='))
if pkg_name not in required_packages:
required_packages[pkg_name] = pkg
split_pkg = pkg.split(',')
for version_descriptor in VERSION_DESCRIPTORS:
if version_descriptor in split_pkg[0]:
name, ver = split_pkg[0].split(version_descriptor)
ver = '%s%s' % (version_descriptor, ver)
if len(split_pkg) > 1:
versions = split_pkg[1:]
versions.insert(0, ver)
else:
versions = [ver]
break
else:
name = split_pkg[0]
versions = None
base_release = PYTHON_PACKAGES['base_release']
if name in base_release:
saved_versions = base_release[name]
if versions is not None:
if '==' in versions:
_lv = version.LooseVersion
if _lv(versions) < _lv(saved_versions):
versions = saved_versions
LOG.debug(
'New version found for replacement: [ %s ]',
versions
)
if isinstance(versions, list):
base_release[name.lower()] = '%s%s' % (name, ','.join(versions))
elif versions is not None:
base_release[name.lower()] = '%s%s' % (name, versions)
else:
base_release[name.lower()] = name
def package_dict(var_file):
LOG.debug('Opening [ %s ]', var_file)
with open(var_file, 'rb') as f:
package_vars = yaml.safe_load(f.read())
pip_pkgs = package_vars.get('service_pip_dependencies')
if pip_pkgs:
requirements_parse(pkgs=pip_pkgs)
git_repo = package_vars.get('git_repo')
if git_repo:
if git_repo not in GIT_REPOS:
GIT_REPOS.append(git_repo)
LOG.debug('Building git type package [ %s ]', git_repo)
git_url = urlparse.urlsplit(git_repo)
repo_name = os.path.basename(git_url.path)
repo = PYTHON_PACKAGES['from_git'][repo_name] = {}
repo['branch'] = package_vars.get('git_install_branch', 'master')
repo['full_url'] = git_repo
repo['project'] = repo_name
setup_file = None
for k, v in GIT_REQUIREMENTS_MAP.iteritems():
if k in git_repo:
requirements_request = v % {
'path': git_url.path.lstrip('/'),
'file': package_vars.get(
'requirements_file', 'requirements.txt'
),
'branch': repo['branch']
}
req = requests.get(requirements_request)
if req.status_code == 200:
requirements = [
i.split()[0] for i in req.text.splitlines()
if i
if not i.startswith('#')
]
repo['requirements'] = requirements
requirements_parse(pkgs=requirements)
setup_request = v % {
'path': git_url.path.lstrip('/'),
'file': 'setup.py',
'branch': repo['branch']
}
setup = requests.head(setup_request)
if setup.status_code == 200:
setup_file = True
break
git_req = 'git+%s@%s'
known_release = PYTHON_PACKAGES['known_release']
if setup_file is True:
known_release[repo_name] = git_req % (
repo['full_url'], repo['branch']
)
git_repo_plugins = package_vars.get('git_repo_plugins')
if git_repo_plugins:
for grp in git_repo_plugins:
LOG.debug(
'Building git type package with plugins [ %s ]',
git_repo_plugins
)
plugin = '%s/%s' % (
grp['path'].strip('/'),
grp['package'].lstrip('/')
)
known_release[grp['package']] = git_req % (
git_url.geturl(),
'%s#egg=%s&subdirectory=%s' % (
repo['branch'],
grp['package'].strip('/'),
plugin
)
)
def retryloop(attempts, timeout=None, delay=None, backoff=1, obj=None):
starttime = time.time()
success = set()
for _ in range(attempts):
success.add(True)
yield success.clear
if success:
return
duration = time.time() - starttime
if timeout is not None and duration > timeout:
break
if delay:
time.sleep(delay)
delay *= backoff
error = (
'RetryError: FAILED TO PROCESS [ %s ] after [ %s ] Attempts' % (
obj,
attempts
)
)
_error_handler(msg=error)
def build_wheel(wheel_dir, build_dir, link_dir, dist=None, pkg_name=None,
make_opts=None):
command = [
'pip',
'wheel',
'--find-links',
link_dir,
'--timeout',
'120',
'--wheel-dir',
wheel_dir,
'--allow-all-external',
'--build',
build_dir
]
if make_opts is not None:
for make_opt in make_opts:
command.append(make_opt)
if dist is not None:
command.extend(['--requirement', dist])
elif pkg_name is not None:
command.append(pkg_name)
else:
raise SyntaxError('neither "dist" or "pkg_name" was specified')
build_command = ' '.join(command)
LOG.info('Command: %s' % build_command)
output, unused_err = None, None
for retry in retryloop(3, obj=build_command, delay=2, backoff=1):
try:
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=LoggerWriter()
)
output, unused_err = process.communicate()
retcode = process.poll()
LOG.info('Command return code: [ %s ]', retcode)
if retcode:
raise subprocess.CalledProcessError(
retcode, build_command, output=output
)
except subprocess.CalledProcessError as exp:
LOG.warn(
'Process failure. stderr: [ %s ], stdout [ %s ], Exception'
' [ %s ]. Removing build directory for retry. Check log for'
' more details.',
unused_err,
output,
str(exp)
)
remove_dirs(directory=build_dir)
retry()
finally:
remove_dirs(directory=build_dir)
def remove_dirs(directory):
LOG.info('Removing directory [ %s ]', directory)
for file_name in get_file_names(path=directory):
LOG.debug('Removing file [ %s ]', file_name)
os.remove(file_name)
dir_names = []
for dir_name, _, _ in os.walk(directory):
dir_names.append(dir_name)
dir_names = sorted(dir_names, reverse=True)
for dir_name in dir_names:
try:
LOG.debug('Removing subdirectory [ %s ]', dir_name)
os.removedirs(dir_name)
except OSError:
pass
def copy_file(src, dst):
LOG.debug('Copying [ %s ] -> [ %s ]', src, dst)
with open(src, 'rb') as open_src:
with open(dst, 'wb') as open_dst:
while True:
buf = open_src.read(24 * 1024)
if not buf:
break
else:
open_dst.write(buf)
def _requirements_maker(name, wheel_dir, release, build_dir, make_opts,
link_dir=None, iterate=False):
if link_dir is None:
link_dir = wheel_dir
if iterate is True:
for pkg in sorted(release.values()):
build_wheel(
wheel_dir=wheel_dir,
build_dir=build_dir,
link_dir=link_dir,
pkg_name=pkg,
make_opts=make_opts
)
else:
requirements_file_lines = []
for value in sorted(set(release.values())):
requirements_file_lines.append('%s\n' % value)
requirements_file = os.path.join(wheel_dir, name)
with open(requirements_file, 'wb') as f:
f.writelines(requirements_file_lines)
build_wheel(
wheel_dir=wheel_dir,
build_dir=build_dir,
link_dir=link_dir,
dist=requirements_file,
make_opts=make_opts
)
def _make_wheels(wheel_dir, build_dir, temp_store_dir):
LOG.info('Building base packages')
_requirements_maker(
name='rpc_base_requirements.txt',
wheel_dir=temp_store_dir,
release=PYTHON_PACKAGES['base_release'],
build_dir=build_dir,
make_opts=None,
link_dir=wheel_dir
)
LOG.info('Building known absolute packages')
_requirements_maker(
name='rpc_known_requirements.txt',
wheel_dir=temp_store_dir,
release=PYTHON_PACKAGES['known_release'],
build_dir=build_dir,
make_opts=['--no-deps'],
link_dir=wheel_dir
)
LOG.info('Building required packages')
_requirements_maker(
name='rpc_required_requirements.txt',
wheel_dir=temp_store_dir,
release=PYTHON_PACKAGES['required_packages'],
build_dir=build_dir,
make_opts=None,
link_dir=wheel_dir,
iterate=True
)
built_wheels = get_file_names(temp_store_dir)
PYTHON_PACKAGES['built_files'] = [
os.path.basename(i) for i in built_wheels
]
LOG.info('Moving built packages into place')
for built_wheel in built_wheels:
wheel_file = os.path.join(wheel_dir, os.path.basename(built_wheel))
if os.path.exists(wheel_file):
if os.path.getsize(wheel_file) != os.path.getsize(built_wheel):
copy_file(src=built_wheel, dst=wheel_file)
else:
copy_file(src=built_wheel, dst=wheel_file)
def make_wheels(wheel_dir, build_dir):
temp_store_dir = os.path.join(
tempfile.mkdtemp(prefix='rpc_wheels_temp_storage')
)
_mkdirs(path=temp_store_dir)
try:
_make_wheels(
wheel_dir=wheel_dir,
build_dir=build_dir,
temp_store_dir=temp_store_dir
)
finally:
remove_dirs(directory=temp_store_dir)
remove_dirs(
directory=os.path.join(
tempfile.gettempdir(),
'pip_build_root'
)
)
def ensure_consistency():
LOG.info('Ensuring the package list is consistent')
for key in PYTHON_PACKAGES['known_release'].keys():
PYTHON_PACKAGES['base_release'].pop(key, None)
|
Apache License 2.0
|
awemulya/kobo-predict
|
kobocat/lib/python3.5/site-packages/pip/_vendor/cachecontrol/adapter.py
|
CacheControlAdapter.build_response
|
python
|
def build_response(self, request, response, from_cache=False):
if not from_cache and request.method == 'GET':
if response.status == 304:
cached_response = self.controller.update_cached_response(
request, response
)
if cached_response is not response:
from_cache = True
response.read(decode_content=False)
response.release_conn()
response = cached_response
elif response.status == 301:
self.controller.cache_response(request, response)
else:
if self.heuristic:
response = self.heuristic.apply(response)
response._fp = CallbackFileWrapper(
response._fp,
functools.partial(
self.controller.cache_response,
request,
response,
)
)
resp = super(CacheControlAdapter, self).build_response(
request, response
)
if request.method in self.invalidating_methods and resp.ok:
cache_url = self.controller.cache_url(request.url)
self.cache.delete(cache_url)
resp.from_cache = from_cache
return resp
|
Build a response by making a request or using the cache.
This will end up calling send and returning a potentially
cached response
|
https://github.com/awemulya/kobo-predict/blob/f302d084e30fb637d43ec638c701e01a3dddc721/kobocat/lib/python3.5/site-packages/pip/_vendor/cachecontrol/adapter.py#L50-L113
|
import functools
from pip._vendor.requests.adapters import HTTPAdapter
from .controller import CacheController
from .cache import DictCache
from .filewrapper import CallbackFileWrapper
class CacheControlAdapter(HTTPAdapter):
invalidating_methods = set(['PUT', 'DELETE'])
def __init__(self, cache=None,
cache_etags=True,
controller_class=None,
serializer=None,
heuristic=None,
*args, **kw):
super(CacheControlAdapter, self).__init__(*args, **kw)
self.cache = cache or DictCache()
self.heuristic = heuristic
controller_factory = controller_class or CacheController
self.controller = controller_factory(
self.cache,
cache_etags=cache_etags,
serializer=serializer,
)
def send(self, request, **kw):
if request.method == 'GET':
cached_response = self.controller.cached_request(request)
if cached_response:
return self.build_response(request, cached_response,
from_cache=True)
request.headers.update(
self.controller.conditional_headers(request)
)
resp = super(CacheControlAdapter, self).send(request, **kw)
return resp
|
BSD 2-Clause Simplified License
|
adamlwgriffiths/pyglet
|
contrib/layout/layout/Plex/Traditional.py
|
REParser.lookahead
|
python
|
def lookahead(self, n):
j = self.i + n
if j < len(self.s):
return self.s[j]
else:
return ''
|
Look ahead n chars.
|
https://github.com/adamlwgriffiths/pyglet/blob/18bd86a8f235e4f5edd94b0d38073d0e5477a366/contrib/layout/layout/Plex/Traditional.py#L130-L136
|
from Regexps import *
from Errors import PlexError
class RegexpSyntaxError(PlexError):
pass
def re(s):
return REParser(s).parse_re()
class REParser:
def __init__(self, s):
self.s = s
self.i = -1
self.end = 0
self.next()
def parse_re(self):
re = self.parse_alt()
if not self.end:
self.error("Unexpected %s" % repr(self.c))
return re
def parse_alt(self):
re = self.parse_seq()
if self.c == '|':
re_list = [re]
while self.c == '|':
self.next()
re_list.append(self.parse_seq())
re = apply(Alt, tuple(re_list))
return re
def parse_seq(self):
re_list = []
while not self.end and not self.c in "|)":
re_list.append(self.parse_mod())
return apply(Seq, tuple(re_list))
def parse_mod(self):
re = self.parse_prim()
while not self.end and self.c in "*+?":
if self.c == '*':
re = Rep(re)
elif self.c == '+':
re = Rep1(re)
else:
re = Opt(re)
self.next()
return re
def parse_prim(self):
c = self.get()
if c == '.':
re = AnyBut("\n")
elif c == '^':
re = Bol
elif c == '$':
re = Eol
elif c == '(':
re = self.parse_alt()
self.expect(')')
elif c == '[':
re = self.parse_charset()
self.expect(']')
else:
if c == '\\':
c = self.get()
re = Char(c)
return re
def parse_charset(self):
char_list = []
invert = 0
if self.c == '^':
invert = 1
self.next()
if self.c == ']':
char_list.append(']')
self.next()
while not self.end and self.c <> ']':
c1 = self.get()
if self.c == '-' and self.lookahead(1) <> ']':
self.next()
c2 = self.get()
for a in xrange(ord(c1), ord(c2) + 1):
char_list.append(chr(a))
else:
char_list.append(c1)
chars = string.join(char_list, "")
if invert:
return AnyBut(chars)
else:
return Any(chars)
def next(self):
s = self.s
i = self.i = self.i + 1
if i < len(s):
self.c = s[i]
else:
self.c = ''
self.end = 1
def get(self):
if self.end:
self.error("Premature end of string")
c = self.c
self.next()
return c
|
BSD 3-Clause New or Revised License
|
kubevirt/client-python
|
kubevirt/models/v1_virtual_machine_instance_migration_condition.py
|
V1VirtualMachineInstanceMigrationCondition.status
|
python
|
def status(self):
return self._status
|
Gets the status of this V1VirtualMachineInstanceMigrationCondition.
:return: The status of this V1VirtualMachineInstanceMigrationCondition.
:rtype: str
|
https://github.com/kubevirt/client-python/blob/c9f9d3bc429f783076982b46b194d5f7669eab1b/kubevirt/models/v1_virtual_machine_instance_migration_condition.py#L107-L114
|
from pprint import pformat
from six import iteritems
import re
class V1VirtualMachineInstanceMigrationCondition(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'message': 'str',
'reason': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'message': 'message',
'reason': 'reason',
'status': 'status',
'type': 'type'
}
def __init__(self, message=None, reason=None, status=None, type=None):
self._message = None
self._reason = None
self._status = None
self._type = None
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
self.status = status
self.type = type
@property
def message(self):
return self._message
@message.setter
def message(self, message):
self._message = message
@property
def reason(self):
return self._reason
@reason.setter
def reason(self, reason):
self._reason = reason
@property
|
Apache License 2.0
|
aiidateam/aiida-cp2k
|
aiida_cp2k/utils/input_generator.py
|
Cp2kInput._render_section
|
python
|
def _render_section(output, params, indent=0):
for key, val in sorted(params.items()):
if key.upper() != key:
raise ValueError(f"keyword '{key}' not upper case.")
if key.startswith(("@", "$")):
raise ValueError("CP2K preprocessor directives not supported.")
if isinstance(val, Mapping):
line = f"{' ' * indent}&{key}"
if "_" in val:
line += f" {val.pop('_')}"
output.append(line)
Cp2kInput._render_section(output, val, indent + 3)
output.append(f"{' ' * indent}&END {key}")
elif isinstance(val, Sequence) and not isinstance(val, str):
for listitem in val:
Cp2kInput._render_section(output, {key: listitem}, indent)
elif isinstance(val, bool):
val_str = '.TRUE.' if val else '.FALSE.'
output.append(f"{' ' * indent}{key} {val_str}")
else:
output.append(f"{' ' * indent}{key} {val}")
|
It takes a dictionary and recurses through.
For key-value pair it checks whether the value is a dictionary and prepends the key with & (CP2K section).
It passes the valued to the same function, increasing the indentation. If the value is a list, I assume
that this is something the user wants to store repetitively
eg:
.. highlight::
dict['KEY'] = ['val1', 'val2']
===>
KEY val1
KEY val2
or
dict['KIND'] = [{'_': 'Ba', 'ELEMENT':'Ba'},
{'_': 'Ti', 'ELEMENT':'Ti'},
{'_': 'O', 'ELEMENT':'O'}]
====>
&KIND Ba
ELEMENT Ba
&END KIND
&KIND Ti
ELEMENT Ti
&END KIND
&KIND O
ELEMENT O
&END KIND
|
https://github.com/aiidateam/aiida-cp2k/blob/36644fdf69a20898f82da34206b0c02f97068390/aiida_cp2k/utils/input_generator.py#L119-L175
|
from copy import deepcopy
from collections.abc import Mapping, Sequence, MutableSequence
from aiida.orm import Dict
from aiida.engine import calcfunction
from .workchains import merge_dict
class Cp2kInput:
DISCLAIMER = "!!! Generated by AiiDA !!!"
def __init__(self, params=None):
if not params:
self._params = {}
else:
self._params = deepcopy(params)
def __getitem__(self, key):
return self._params[key]
def add_keyword(self, kwpath, value, override=True, conflicting_keys=None):
if isinstance(kwpath, str):
kwpath = kwpath.split("/")
Cp2kInput._add_keyword(kwpath, value, self._params, ovrd=override, cfct=conflicting_keys)
def render(self):
output = [self.DISCLAIMER]
self._render_section(output, deepcopy(self._params))
return "\n".join(output)
def param_iter(self, sections=True):
stack = [((k,), v) for k, v in self._params.items()]
while stack:
key, value = stack.pop(0)
if isinstance(value, Mapping):
if sections:
yield (key, value)
stack += [(key + (k,), v) for k, v in value.items()]
elif isinstance(value, MutableSequence):
for entry in value:
stack += [(key, entry)]
else:
yield (key, value)
@staticmethod
def _add_keyword(kwpath, value, params, ovrd, cfct):
conflicting_keys_present = []
if len(kwpath) == 1:
if cfct:
conflicting_keys_present = [key for key in cfct if key in params]
if ovrd:
params[kwpath[0]] = value
for key in conflicting_keys_present:
params.pop(key)
elif not conflicting_keys_present and kwpath[0] not in params:
params[kwpath[0]] = value
elif kwpath[0] not in params:
params[kwpath[0]] = {}
Cp2kInput._add_keyword(kwpath[1:], value, params[kwpath[0]], ovrd, cfct)
elif isinstance(params[kwpath[0]], Sequence) and not isinstance(params[kwpath[0]], str):
for element in params[kwpath[0]]:
Cp2kInput._add_keyword(kwpath[1:], value, element, ovrd, cfct)
elif not isinstance(params[kwpath[0]], Mapping):
if ovrd:
params[kwpath[0]] = {}
Cp2kInput._add_keyword(kwpath[1:], value, params[kwpath[0]], ovrd, cfct)
else:
Cp2kInput._add_keyword(kwpath[1:], value, params[kwpath[0]], ovrd, cfct)
@staticmethod
|
MIT License
|
openstack/zun
|
zun/image/glance/driver.py
|
GlanceDriver.delete_image_tar
|
python
|
def delete_image_tar(self, context, image):
repo = image.split(':')[0]
tag = image.split(':')[1]
image = self._search_image_on_host(context, repo, tag)
if image:
tarfile = image.get('path')
try:
os.unlink(tarfile)
except Exception as e:
LOG.exception('Cannot delete tar file %s', tarfile)
raise exception.ZunException(str(e))
|
Delete image tar file that pull from glance
|
https://github.com/openstack/zun/blob/7ed094696b75d2971d1a6d467bb95e2a641ad9ae/zun/image/glance/driver.py#L173-L184
|
import hashlib
import io
import os
import types
from oslo_log import log as logging
from oslo_utils import fileutils
from zun.common import exception
from zun.common.i18n import _
from zun.common import utils as common_utils
import zun.conf
from zun.image import driver
from zun.image.glance import utils
CONF = zun.conf.CONF
LOG = logging.getLogger(__name__)
class GlanceDriver(driver.ContainerImageDriver):
def __init__(self):
super(GlanceDriver, self).__init__()
def _search_image_on_host(self, context, repo, tag):
LOG.debug('Searching for image %s locally', repo)
images_directory = CONF.glance.images_directory
try:
image_meta = utils.find_image(context, repo, tag)
except exception.ImageNotFound:
return None
if image_meta:
out_path = os.path.join(images_directory,
image_meta.id + '.tar')
if os.path.isfile(out_path):
return {
'image': repo,
'path': out_path,
'checksum': image_meta.checksum}
else:
return None
def _verify_md5sum_for_image(self, image):
image_path = image['path']
image_checksum = image['checksum']
md5sum = hashlib.md5()
with open(image_path, 'rb') as fd:
while True:
data = fd.read(10 * 1024 * 1024)
if not data:
break
md5sum.update(data)
md5sum = md5sum.hexdigest()
if md5sum == image_checksum:
return True
return False
def pull_image(self, context, repo, tag, image_pull_policy, registry):
image_loaded = False
image = self._search_image_on_host(context, repo, tag)
if not common_utils.should_pull_image(image_pull_policy, bool(image)):
if image:
if self._verify_md5sum_for_image(image):
image_loaded = True
return image, image_loaded
else:
message = _('Image %s not present with pull policy of Never'
) % repo
raise exception.ImageNotFound(message)
LOG.debug('Pulling image from glance %s', repo)
try:
image_meta = utils.find_image(context, repo, tag)
LOG.debug('Image %s was found in glance, downloading now...', repo)
image_chunks = utils.download_image_in_chunks(context,
image_meta.id)
except exception.ImageNotFound:
LOG.error('Image %s was not found in glance', repo)
raise
except Exception as e:
msg = _('Cannot download image from glance: {0}')
raise exception.ZunException(msg.format(e))
try:
images_directory = CONF.glance.images_directory
fileutils.ensure_tree(images_directory)
out_path = os.path.join(images_directory, image_meta.id + '.tar')
with open(out_path, 'wb') as fd:
for chunk in image_chunks:
fd.write(chunk)
except Exception as e:
msg = _('Error occurred while writing image: {0}')
raise exception.ZunException(msg.format(e))
LOG.debug('Image %(repo)s was downloaded to path : %(path)s',
{'repo': repo, 'path': out_path})
image = {'image': image_meta.name, 'tags': image_meta.tags,
'path': out_path}
return image, image_loaded
def search_image(self, context, repo, tag, exact_match):
LOG.debug('Searching image in glance %s', repo)
try:
return utils.find_images(context, repo, tag, exact_match)
except Exception as e:
raise exception.ZunException(str(e))
def create_image(self, context, image_name):
LOG.debug('Creating a new image in glance %s', image_name)
try:
return utils.create_image(context, image_name)
except Exception as e:
raise exception.ZunException(str(e))
def update_image(self, context, img_id, disk_format='qcow2',
container_format='docker', tag=None):
LOG.debug('Updating an image %s in glance', img_id)
try:
tags = [tag] if tag else []
return utils.update_image(context, img_id, disk_format,
container_format, tags=tags)
except Exception as e:
raise exception.ZunException(str(e))
def upload_image_data(self, context, img_id, data):
LOG.debug('Uploading an image to glance %s', img_id)
try:
if isinstance(data, types.GeneratorType):
data = ''.encode("latin-1").join(data)
data = io.BytesIO(data)
return utils.upload_image_data(context, img_id, data)
except Exception as e:
raise exception.ZunException(str(e))
def delete_committed_image(self, context, img_id):
LOG.debug('Delete the committed image %s in glance', img_id)
try:
return utils.delete_image(context, img_id)
except Exception as e:
LOG.exception('Unknown exception occurred while deleting '
'image %s in glance: %s',
img_id,
str(e))
raise exception.ZunException(str(e))
|
Apache License 2.0
|
varianapis/pyesapi
|
pyesapi/stubs/Microsoft/CSharp.py
|
CSharpCodeProvider.GenerateCodeFromMember
|
python
|
def GenerateCodeFromMember(self, member, writer, options):
pass
|
GenerateCodeFromMember(self: CSharpCodeProvider, member: CodeTypeMember, writer: TextWriter, options: CodeGeneratorOptions)
Generates code for the specified class member using the specified text writer and code generator options.
member: A System.CodeDom.CodeTypeMember to generate code for.
writer: The System.IO.TextWriter to write to.
options: The System.CodeDom.Compiler.CodeGeneratorOptions to use when generating the code.
|
https://github.com/varianapis/pyesapi/blob/c7b1d2986cab9387e85dbb4331a44e5b743b86ea/pyesapi/stubs/Microsoft/CSharp.py#L63-L79
|
class CSharpCodeProvider(CodeDomProvider, IComponent, IDisposable):
def CreateCompiler(self):
pass
def CreateGenerator(self, *__args):
pass
def Dispose(self):
pass
|
MIT License
|
morganstanley/testplan
|
testplan/common/utils/strings.py
|
map_to_str
|
python
|
def map_to_str(value):
if isinstance(value, bytes):
return "".join(map(chr, value))
else:
return value
|
Convert bytes to str byte-by-byte
|
https://github.com/morganstanley/testplan/blob/8cb6a0ed0682698b2d6af82382fbb66d8d9e3ff7/testplan/common/utils/strings.py#L22-L30
|
import sys
import os
import re
import inspect
import uuid
import unicodedata
import textwrap
import colorama
colorama.init()
from termcolor import colored
from reportlab.pdfbase.pdfmetrics import stringWidth
_DESCRIPTION_CUTOFF_REGEX = re.compile(r"^(\s|\t)+")
|
Apache License 2.0
|
janpipek/physt
|
physt/histogram_collection.py
|
HistogramCollection.normalize_bins
|
python
|
def normalize_bins(self, inplace: bool = False) -> "HistogramCollection":
col = self if inplace else self.copy()
sums = self.sum().frequencies
for h in col.histograms:
h.set_dtype(float)
h._frequencies /= sums
h._errors2 /= sums ** 2
return col
|
Normalize each bin in the collection so that the sum is 1.0 for each bin.
Note: If a bin is zero in all collections, the result will be inf.
|
https://github.com/janpipek/physt/blob/bf6b05952b7d09bbbdae2b077f0989c392eac13e/physt/histogram_collection.py#L109-L120
|
from typing import Optional, Container, Tuple, Dict, Any, TYPE_CHECKING, cast
import numpy as np
from physt.histogram1d import Histogram1D, ObjectWithBinning
from physt.binnings import BinningBase, BinningLike, as_binning
from physt.typing_aliases import ArrayLike
if TYPE_CHECKING:
import physt
class HistogramCollection(Container[Histogram1D], ObjectWithBinning):
def __init__(
self,
*histograms: Histogram1D,
binning: Optional[BinningLike] = None,
title: Optional[str] = None,
name: Optional[str] = None,
):
self.histograms = list(histograms)
if histograms:
if binning:
raise ValueError(
"When creating collection from histograms, binning is deduced from them."
)
self._binning = histograms[0].binning
if not all(h.binning == self._binning for h in histograms):
raise ValueError("All histograms should share the same binning.")
else:
if binning is None:
raise ValueError("Either binning or at least one histogram must be provided.")
self._binning = as_binning(binning)
self.name = name
self.title = title or self.name
def __contains__(self, item):
try:
_ = self[item]
return True
except KeyError:
return False
def __iter__(self):
return iter(self.histograms)
def __len__(self):
return len(self.histograms)
def copy(self) -> "HistogramCollection":
binning_copy = self.binning.copy()
histograms = [h.copy() for h in self.histograms]
for histogram in histograms:
histogram._binning = binning_copy
return HistogramCollection(*histograms, title=self.title, name=self.name)
@property
def binning(self) -> BinningBase:
return self._binning
@property
def axis_name(self) -> str:
return self.histograms[0].axis_name if self.histograms else "axis0"
@property
def axis_names(self) -> Tuple[str]:
return (self.axis_name,)
def add(self, histogram: Histogram1D) -> None:
if self.binning and not self.binning == histogram.binning:
raise ValueError("Cannot add histogram with different binning.")
self.histograms.append(histogram)
def create(
self, name: str, values, *, weights=None, dropna: bool = True, **kwargs
) -> Histogram1D:
init_kwargs: Dict[str, Any] = {"axis_name": self.axis_name}
init_kwargs.update(kwargs)
histogram = Histogram1D(binning=self.binning, name=name, **init_kwargs)
histogram.fill_n(values, weights=weights, dropna=dropna)
self.histograms.append(histogram)
return histogram
def __getitem__(self, item) -> Histogram1D:
if isinstance(item, str):
candidates = [h for h in self.histograms if h.name == item]
if len(candidates) == 0:
raise KeyError(f"Collection does not contain histogram named '{item}'.")
return candidates[0]
else:
return self.histograms[item]
def __eq__(self, other) -> bool:
return (
(type(other) == HistogramCollection)
and (len(other) == len(self))
and all((h1 == h2) for h1, h2 in zip(self.histograms, other.histograms))
)
|
MIT License
|
coleifer/walrus
|
walrus/search/porter.py
|
PorterStemmer.cons
|
python
|
def cons(self, i):
if (self.b[i] == 'a' or self.b[i] == 'e' or self.b[i] == 'i' or
self.b[i] == 'o' or self.b[i] == 'u'):
return 0
if self.b[i] == 'y':
if i == self.k0:
return 1
else:
return (not self.cons(i - 1))
return 1
|
cons(i) is TRUE <=> b[i] is a consonant.
|
https://github.com/coleifer/walrus/blob/a66f3d7d1dbc27a7105284238634168b6762b633/walrus/search/porter.py#L54-L64
|
import sys
class PorterStemmer(object):
def __init__(self):
self.b = ""
self.k = 0
self.k0 = 0
self.j = 0
|
MIT License
|
criteo/criteo-python-marketing-sdk
|
criteo_marketing/models/policy_route_info.py
|
PolicyRouteInfo.__eq__
|
python
|
def __eq__(self, other):
if not isinstance(other, PolicyRouteInfo):
return False
return self.__dict__ == other.__dict__
|
Returns true if both objects are equal
|
https://github.com/criteo/criteo-python-marketing-sdk/blob/1093f86cf035cb6ce657b47f0f5e768c1fc2271c/criteo_marketing/models/policy_route_info.py#L181-L186
|
import pprint
import re
import six
class PolicyRouteInfo(object):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'route': 'str',
'method': 'str',
'controller_name': 'str',
'action_name': 'str'
}
attribute_map = {
'route': 'route',
'method': 'method',
'controller_name': 'controllerName',
'action_name': 'actionName'
}
def __init__(self, route=None, method=None, controller_name=None, action_name=None):
self._route = None
self._method = None
self._controller_name = None
self._action_name = None
self.discriminator = None
if route is not None:
self.route = route
if method is not None:
self.method = method
if controller_name is not None:
self.controller_name = controller_name
if action_name is not None:
self.action_name = action_name
@property
def route(self):
return self._route
@route.setter
def route(self, route):
self._route = route
@property
def method(self):
return self._method
@method.setter
def method(self, method):
self._method = method
@property
def controller_name(self):
return self._controller_name
@controller_name.setter
def controller_name(self, controller_name):
self._controller_name = controller_name
@property
def action_name(self):
return self._action_name
@action_name.setter
def action_name(self, action_name):
self._action_name = action_name
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
|
Apache License 2.0
|
gawel/aiocron
|
aiocron/__init__.py
|
wrap_func
|
python
|
def wrap_func(func):
if isinstance(func, functools.partial):
_func = func.func
else:
_func = func
if not asyncio.iscoroutinefunction(_func):
@functools.wraps(func)
async def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
return func
|
wrap in a coroutine
|
https://github.com/gawel/aiocron/blob/17de9d15fc3ed51ad0d2da64b11577dfe64db6c7/aiocron/__init__.py#L15-L26
|
from croniter.croniter import croniter
from datetime import datetime
from tzlocal import get_localzone
from uuid import uuid4
import time
import functools
import asyncio
async def null_callback(*args):
return args
|
MIT License
|
denik/vwoptimize
|
vwoptimizelib/third_party/hyperopt/mongoexp.py
|
MongoTrials.attachments
|
python
|
def attachments(self):
gfs = self.handle.gfs
query = {}
if self._exp_key:
query['exp_key'] = self._exp_key
class Attachments(object):
def __iter__(_self):
if query:
filenames = [fname
for fname in gfs.list()
if fname in _self]
else:
filenames = gfs.list()
return iter(filenames)
def __contains__(_self, name):
return gfs.exists(filename=name, **query)
def __getitem__(_self, name):
try:
rval = gfs.get_version(filename=name, **query).read()
return rval
except gridfs.NoFile:
raise KeyError(name)
def __setitem__(_self, name, value):
if gfs.exists(filename=name, **query):
gout = gfs.get_last_version(filename=name, **query)
gfs.delete(gout._id)
gfs.put(value, filename=name, encoding='utf-8', **query)
def __delitem__(_self, name):
gout = gfs.get_last_version(filename=name, **query)
gfs.delete(gout._id)
return Attachments()
|
Attachments to a Trials set (such as bandit args).
Support syntax for load: self.attachments[name]
Support syntax for store: self.attachments[name] = value
|
https://github.com/denik/vwoptimize/blob/854612d0372810461256ccf594a686da55af1194/vwoptimizelib/third_party/hyperopt/mongoexp.py#L916-L962
|
from __future__ import print_function
from __future__ import absolute_import
from future import standard_library
import copy
import six.moves.cPickle as pickle
import logging
import optparse
import os
import signal
import socket
import subprocess
import sys
import time
import urllib.parse
import warnings
import numpy
import pymongo
import gridfs
from bson import SON
from .base import JOB_STATES
from .base import (JOB_STATE_NEW, JOB_STATE_RUNNING, JOB_STATE_DONE,
JOB_STATE_ERROR)
from .base import Trials
from .base import InvalidTrial
from .base import Ctrl
from .base import SONify
from .base import spec_from_misc
from .utils import coarse_utcnow
from .utils import fast_isin
from .utils import get_most_recent_inds
from .utils import json_call
from .utils import working_dir, temp_dir
import six
from six.moves import map
from six.moves import range
__authors__ = ["James Bergstra", "Dan Yamins"]
__license__ = "3-clause BSD License"
__contact__ = "github.com/hyperopt/hyperopt"
standard_library.install_aliases()
logger = logging.getLogger(__name__)
class OperationFailure(Exception):
class Shutdown(Exception):
class WaitQuit(Exception):
class InvalidMongoTrial(InvalidTrial):
pass
class DomainSwapError(Exception):
class ReserveTimeout(Exception):
def read_pw():
username = 'hyperopt'
password = open(os.path.join(os.getenv('HOME'), ".hyperopt")).read()[:-1]
return dict(
username=username,
password=password)
def authenticate_for_db(db):
d = read_pw()
db.authenticate(d['username'], d['password'])
def parse_url(url, pwfile=None):
protocol = url[:url.find(':')]
ftp_url = 'ftp' + url[url.find(':'):]
tmp = urllib.parse.urlparse(ftp_url)
logger.info('PROTOCOL %s' % protocol)
logger.info('USERNAME %s' % tmp.username)
logger.info('HOSTNAME %s' % tmp.hostname)
logger.info('PORT %s' % tmp.port)
logger.info('PATH %s' % tmp.path)
try:
_, dbname, collection = tmp.path.split('/')
except:
print("Failed to parse '%s'" % (str(tmp.path)), file=sys.stderr)
raise
logger.info('DB %s' % dbname)
logger.info('COLLECTION %s' % collection)
if tmp.password is None:
if (tmp.username is not None) and pwfile:
password = open(pwfile).read()[:-1]
else:
password = None
else:
password = tmp.password
logger.info('PASS %s' % password)
port = int(float(tmp.port))
return (protocol, tmp.username, password, tmp.hostname, port, dbname, collection)
def connection_with_tunnel(host='localhost',
auth_dbname='admin', port=27017,
ssh=False, user='hyperopt', pw=None):
if ssh:
local_port = numpy.random.randint(low=27500, high=28000)
ssh_tunnel = subprocess.Popen(
['ssh', '-NTf', '-L',
'%i:%s:%i' % (local_port, '127.0.0.1', port),
host],
)
time.sleep(.5)
connection = pymongo.MongoClient('127.0.0.1', local_port,
document_class=SON, w=1, j=True)
else:
connection = pymongo.MongoClient(host, port, document_class=SON, w=1, j=True)
if user:
if user == 'hyperopt':
authenticate_for_db(connection[auth_dbname])
else:
raise NotImplementedError()
ssh_tunnel = None
return connection, ssh_tunnel
def connection_from_string(s):
protocol, user, pw, host, port, db, collection = parse_url(s)
if protocol == 'mongo':
ssh = False
elif protocol in ('mongo+ssh', 'ssh+mongo'):
ssh = True
else:
raise ValueError('unrecognized protocol for MongoJobs', protocol)
connection, tunnel = connection_with_tunnel(
ssh=ssh,
user=user,
pw=pw,
host=host,
port=port,
)
return connection, tunnel, connection[db], connection[db][collection]
class MongoJobs(object):
def __init__(self, db, jobs, gfs, conn, tunnel, config_name):
self.db = db
self.jobs = jobs
self.gfs = gfs
self.conn = conn
self.tunnel = tunnel
self.config_name = config_name
coll = property(lambda s: s.jobs)
@classmethod
def alloc(cls, dbname, host='localhost',
auth_dbname='admin', port=27017,
jobs_coll='jobs', gfs_coll='fs', ssh=False, user=None, pw=None):
connection, tunnel = connection_with_tunnel(
host, auth_dbname, port, ssh, user, pw)
db = connection[dbname]
gfs = gridfs.GridFS(db, collection=gfs_coll)
return cls(db, db[jobs_coll], gfs, connection, tunnel)
@classmethod
def new_from_connection_str(cls, conn_str, gfs_coll='fs', config_name='spec'):
connection, tunnel, db, coll = connection_from_string(conn_str)
gfs = gridfs.GridFS(db, collection=gfs_coll)
return cls(db, coll, gfs, connection, tunnel, config_name)
def __iter__(self):
return self.jobs.find()
def __len__(self):
try:
return self.jobs.count()
except:
return 0
def create_jobs_indexes(self):
jobs = self.db.jobs
for k in ['exp_key', 'result.loss', 'book_time']:
jobs.create_index(k)
def create_drivers_indexes(self):
drivers = self.db.drivers
drivers.create_index('exp_key', unique=True)
def create_indexes(self):
self.create_jobs_indexes()
self.create_drivers_indexes()
def jobs_complete(self, cursor=False):
c = self.jobs.find(filter=dict(state=JOB_STATE_DONE))
return c if cursor else list(c)
def jobs_error(self, cursor=False):
c = self.jobs.find(filter=dict(state=JOB_STATE_ERROR))
return c if cursor else list(c)
def jobs_running(self, cursor=False):
if cursor:
raise NotImplementedError()
rval = list(self.jobs.find(filter=dict(state=JOB_STATE_RUNNING)))
rval = [r for r in rval if not r.get('MIA', False)]
return rval
def jobs_dead(self, cursor=False):
if cursor:
raise NotImplementedError()
rval = list(self.jobs.find(filter=dict(state=JOB_STATE_RUNNING)))
rval = [r for r in rval if r.get('MIA', False)]
return rval
def jobs_queued(self, cursor=False):
c = self.jobs.find(filter=dict(state=JOB_STATE_NEW))
return c if cursor else list(c)
def insert(self, job):
try:
cpy = copy.deepcopy(job)
_id = self.jobs.insert(cpy, check_keys=True)
assert _id == cpy['_id']
return cpy
except pymongo.errors.OperationFailure as e:
raise OperationFailure(e)
def delete(self, job):
try:
self.jobs.remove(job)
except pymongo.errors.OperationFailure as e:
raise OperationFailure(e)
def delete_all(self, cond=None):
if cond is None:
cond = {}
try:
for d in self.jobs.find(filter=cond, projection=['_id', '_attachments']):
logger.info('deleting job %s' % d['_id'])
for name, file_id in d.get('_attachments', []):
try:
self.gfs.delete(file_id)
except gridfs.errors.NoFile:
logger.error('failed to remove attachment %s:%s' % (
name, file_id))
self.jobs.remove(d)
except pymongo.errors.OperationFailure as e:
raise OperationFailure(e)
def delete_all_error_jobs(self):
return self.delete_all(cond={'state': JOB_STATE_ERROR})
def reserve(self, host_id, cond=None, exp_key=None):
now = coarse_utcnow()
if cond is None:
cond = {}
else:
cond = copy.copy(cond)
if exp_key is not None:
cond['exp_key'] = exp_key
if cond.get('owner') is not None:
raise ValueError('refusing to reserve owned job')
else:
cond['owner'] = None
cond['state'] = JOB_STATE_NEW
try:
rval = self.jobs.find_and_modify(
cond,
{'$set':
{'owner': host_id,
'book_time': now,
'state': JOB_STATE_RUNNING,
'refresh_time': now,
}
},
new=True,
upsert=False)
except pymongo.errors.OperationFailure as e:
logger.error('Error during reserve_job: %s' % str(e))
rval = None
return rval
def refresh(self, doc):
self.update(doc, dict(refresh_time=coarse_utcnow()))
def update(self, doc, dct, collection=None, do_sanity_checks=True):
if collection is None:
collection = self.coll
dct = copy.deepcopy(dct)
if '_id' not in doc:
raise ValueError('doc must have an "_id" key to be updated')
if '_id' in dct:
if dct['_id'] != doc['_id']:
raise ValueError('cannot update the _id field')
del dct['_id']
if 'version' in dct:
if dct['version'] != doc['version']:
warnings.warn('Ignoring "version" field in update dictionary')
if 'version' in doc:
doc_query = dict(_id=doc['_id'], version=doc['version'])
dct['version'] = doc['version'] + 1
else:
doc_query = dict(_id=doc['_id'])
dct['version'] = 1
try:
collection.update(
doc_query,
{'$set': dct},
upsert=False,
multi=False,)
except pymongo.errors.OperationFailure as e:
raise OperationFailure(e)
doc.update(dct)
if do_sanity_checks:
server_doc = collection.find_one(
dict(_id=doc['_id'], version=doc['version']))
if server_doc is None:
raise OperationFailure('updated doc not found : %s'
% str(doc))
elif server_doc != doc:
if 0:
mismatching_keys = []
for k, v in list(server_doc.items()):
if k in doc:
if doc[k] != v:
mismatching_keys.append((k, v, doc[k]))
else:
mismatching_keys.append((k, v, '<missing>'))
for k, v in list(doc.items()):
if k not in server_doc:
mismatching_keys.append((k, '<missing>', v))
raise OperationFailure('local and server doc documents are out of sync: %s' %
repr((doc, server_doc, mismatching_keys)))
return doc
def attachment_names(self, doc):
def as_str(name_id):
assert isinstance(name_id[0], six.string_types), name_id
return str(name_id[0])
return list(map(as_str, doc.get('_attachments', [])))
def set_attachment(self, doc, blob, name, collection=None):
attachments = doc.get('_attachments', [])
name_matches = [a for a in attachments if a[0] == name]
new_file_id = self.gfs.put(blob, filename='%s_%s' % (doc['_id'], name))
logger.info('stored blob of %i bytes with id=%s and filename %s_%s' % (
len(blob), str(new_file_id), doc['_id'], name))
new_attachments = ([a for a in attachments if a[0] != name] +
[(name, new_file_id)])
try:
ii = 0
doc = self.update(doc, {'_attachments': new_attachments},
collection=collection)
while ii < len(name_matches):
self.gfs.delete(name_matches[ii][1])
ii += 1
except:
while ii < len(name_matches):
logger.warning("Leak during set_attachment: old_file_id=%s" % (
name_matches[ii][1]))
ii += 1
raise
assert len([n for n in self.attachment_names(doc) if n == name]) == 1
def get_attachment(self, doc, name):
attachments = doc.get('_attachments', [])
file_ids = [a[1] for a in attachments if a[0] == name]
if not file_ids:
raise OperationFailure('Attachment not found: %s' % name)
if len(file_ids) > 1:
raise OperationFailure('multiple name matches', (name, file_ids))
return self.gfs.get(file_ids[0]).read()
def delete_attachment(self, doc, name, collection=None):
attachments = doc.get('_attachments', [])
file_id = None
for i, a in enumerate(attachments):
if a[0] == name:
file_id = a[1]
break
if file_id is None:
raise OperationFailure('Attachment not found: %s' % name)
del attachments[i]
self.update(doc, {'_attachments': attachments}, collection=collection)
self.gfs.delete(file_id)
class MongoTrials(Trials):
async = True
def __init__(self, arg, exp_key=None, cmd=None, workdir=None,
refresh=True):
if isinstance(arg, MongoJobs):
self.handle = arg
else:
connection_string = arg
self.handle = MongoJobs.new_from_connection_str(connection_string)
self.handle.create_indexes()
self._exp_key = exp_key
self.cmd = cmd
self.workdir = workdir
if refresh:
self.refresh()
def view(self, exp_key=None, cmd=None, workdir=None, refresh=True):
rval = self.__class__(self.handle,
exp_key=self._exp_key if exp_key is None else exp_key,
cmd=self.cmd if cmd is None else cmd,
workdir=self.workdir if workdir is None else workdir,
refresh=refresh)
return rval
def refresh_tids(self, tids):
exp_key = self._exp_key
if exp_key != None:
query = {'exp_key': exp_key}
else:
query = {}
t0 = time.time()
query['state'] = {'$ne': JOB_STATE_ERROR}
if tids is not None:
query['tid'] = {'$in': list(tids)}
orig_trials = getattr(self, '_trials', [])
_trials = orig_trials[:]
if _trials:
db_data = list(self.handle.jobs.find(query,
projection=['_id', 'version']))
if db_data:
db_data = numpy.rec.array([(x['_id'], int(x['version']))
for x in db_data],
names=['_id', 'version'])
db_data.sort(order=['_id', 'version'])
db_data = db_data[get_most_recent_inds(db_data)]
existing_data = numpy.rec.array([(x['_id'],
int(x['version'])) for x in _trials],
names=['_id', 'version'])
existing_data.sort(order=['_id', 'version'])
db_in_existing = fast_isin(db_data['_id'], existing_data['_id'])
existing_in_db = fast_isin(existing_data['_id'], db_data['_id'])
_trials = [_trials[_ind] for _ind in existing_in_db.nonzero()[0]]
new_data = db_data[numpy.invert(db_in_existing)]
db_data = db_data[db_in_existing]
existing_data = existing_data[existing_in_db]
try:
assert len(db_data) == len(existing_data)
assert (existing_data['_id'] == db_data['_id']).all()
assert (existing_data['version'] <= db_data['version']).all()
except:
reportpath = os.path.join(os.getcwd(),
'hyperopt_refresh_crash_report_' +
str(numpy.random.randint(1e8)) + '.pkl')
logger.error('HYPEROPT REFRESH ERROR: writing error file to %s' % reportpath)
_file = open(reportpath, 'w')
pickle.dump({'db_data': db_data,
'existing_data': existing_data},
_file)
_file.close()
raise
same_version = existing_data['version'] == db_data['version']
_trials = [_trials[_ind] for _ind in same_version.nonzero()[0]]
version_changes = existing_data[numpy.invert(same_version)]
update_ids = new_data['_id'].tolist() + version_changes['_id'].tolist()
num_new = len(update_ids)
update_query = copy.deepcopy(query)
update_query['_id'] = {'$in': update_ids}
updated_trials = list(self.handle.jobs.find(update_query))
_trials.extend(updated_trials)
else:
num_new = 0
_trials = []
else:
_trials = list(self.handle.jobs.find(query))
if _trials:
_trials = [_trials[_i] for _i in get_most_recent_inds(_trials)]
num_new = len(_trials)
logger.debug('Refresh data download took %f seconds for %d ids' %
(time.time() - t0, num_new))
if tids is not None:
new_trials = _trials
tids_set = set(tids)
assert all(t['tid'] in tids_set for t in new_trials)
old_trials = [t for t in orig_trials if t['tid'] not in tids_set]
_trials = new_trials + old_trials
jarray = numpy.array([j['_id'] for j in _trials])
jobsort = jarray.argsort()
self._trials = [_trials[_idx] for _idx in jobsort]
self._specs = [_trials[_idx]['spec'] for _idx in jobsort]
self._results = [_trials[_idx]['result'] for _idx in jobsort]
self._miscs = [_trials[_idx]['misc'] for _idx in jobsort]
def refresh(self):
self.refresh_tids(None)
def _insert_trial_docs(self, docs):
rval = []
for doc in docs:
rval.append(self.handle.jobs.insert(doc))
return rval
def count_by_state_unsynced(self, arg):
exp_key = self._exp_key
if isinstance(arg, int):
if arg not in JOB_STATES:
raise ValueError('invalid state', arg)
query = dict(state=arg)
else:
assert hasattr(arg, '__iter__')
states = list(arg)
assert all([x in JOB_STATES for x in states])
query = dict(state={'$in': states})
if exp_key != None:
query['exp_key'] = exp_key
rval = self.handle.jobs.find(query).count()
return rval
def delete_all(self, cond=None):
if cond is None:
cond = {}
else:
cond = dict(cond)
if self._exp_key:
cond['exp_key'] = self._exp_key
self.handle.delete_all(cond)
gfs = self.handle.gfs
for filename in gfs.list():
try:
fdoc = gfs.get_last_version(filename=filename, **cond)
except gridfs.errors.NoFile:
continue
gfs.delete(fdoc._id)
self.refresh()
def new_trial_ids(self, N):
db = self.handle.db
query = {'a': 0}
doc = None
while doc is None:
doc = db.job_ids.find_and_modify(
query,
{'$inc': {'last_id': N}},
upsert=True)
if doc is None:
logger.warning('no last_id found, re-trying')
time.sleep(1.0)
lid = doc.get('last_id', 0)
return list(range(lid, lid + N))
def trial_attachments(self, trial):
class Attachments(object):
def __contains__(_self, name):
return name in self.handle.attachment_names(doc=trial)
def __len__(_self):
return len(self.handle.attachment_names(doc=trial))
def __iter__(_self):
return iter(self.handle.attachment_names(doc=trial))
def __getitem__(_self, name):
try:
return self.handle.get_attachment(
doc=trial,
name=name)
except OperationFailure:
raise KeyError(name)
def __setitem__(_self, name, value):
self.handle.set_attachment(
doc=trial,
blob=value,
name=name,
collection=self.handle.db.jobs)
def __delitem__(_self, name):
raise NotImplementedError('delete trial_attachment')
def keys(self):
return [k for k in self]
def values(self):
return [self[k] for k in self]
def items(self):
return [(k, self[k]) for k in self]
return Attachments()
@property
|
MIT License
|
gerritcodereview/git-repo
|
project.py
|
Project.UpdatePaths
|
python
|
def UpdatePaths(self, relpath, worktree, gitdir, objdir):
self.gitdir = gitdir.replace('\\', '/')
self.objdir = objdir.replace('\\', '/')
if worktree:
self.worktree = os.path.normpath(worktree).replace('\\', '/')
else:
self.worktree = None
self.relpath = relpath
self.config = GitConfig.ForRepository(gitdir=self.gitdir,
defaults=self.manifest.globalConfig)
if self.worktree:
self.work_git = self._GitGetByExec(self, bare=False, gitdir=self.gitdir)
else:
self.work_git = None
self.bare_git = self._GitGetByExec(self, bare=True, gitdir=self.gitdir)
self.bare_ref = GitRefs(self.gitdir)
self.bare_objdir = self._GitGetByExec(self, bare=True, gitdir=self.objdir)
|
Update paths used by this project
|
https://github.com/gerritcodereview/git-repo/blob/03ff276cd70e78639232d2e878d972f15ebcd461/project.py#L561-L580
|
import errno
import filecmp
import glob
import os
import random
import re
import shutil
import stat
import subprocess
import sys
import tarfile
import tempfile
import time
import urllib.parse
from color import Coloring
from git_command import GitCommand, git_require
from git_config import GitConfig, IsId, GetSchemeFromUrl, GetUrlCookieFile, ID_RE
from error import GitError, UploadError, DownloadError
from error import ManifestInvalidRevisionError, ManifestInvalidPathError
from error import NoManifestException
import platform_utils
import progress
from repo_trace import IsTrace, Trace
from git_refs import GitRefs, HEAD, R_HEADS, R_TAGS, R_PUB, R_M, R_WORKTREE_M
MAXIMUM_RETRY_SLEEP_SEC = 3600.0
RETRY_JITTER_PERCENT = 0.1
def _lwrite(path, content):
lock = '%s.lock' % path
with open(lock, 'w', newline='\n') as fd:
fd.write(content)
try:
platform_utils.rename(lock, path)
except OSError:
platform_utils.remove(lock)
raise
def _error(fmt, *args):
msg = fmt % args
print('error: %s' % msg, file=sys.stderr)
def _warn(fmt, *args):
msg = fmt % args
print('warn: %s' % msg, file=sys.stderr)
def not_rev(r):
return '^' + r
def sq(r):
return "'" + r.replace("'", "'\''") + "'"
_project_hook_list = None
def _ProjectHooks():
global _project_hook_list
if _project_hook_list is None:
d = platform_utils.realpath(os.path.abspath(os.path.dirname(__file__)))
d = os.path.join(d, 'hooks')
_project_hook_list = [os.path.join(d, x) for x in platform_utils.listdir(d)]
return _project_hook_list
class DownloadedChange(object):
_commit_cache = None
def __init__(self, project, base, change_id, ps_id, commit):
self.project = project
self.base = base
self.change_id = change_id
self.ps_id = ps_id
self.commit = commit
@property
def commits(self):
if self._commit_cache is None:
self._commit_cache = self.project.bare_git.rev_list('--abbrev=8',
'--abbrev-commit',
'--pretty=oneline',
'--reverse',
'--date-order',
not_rev(self.base),
self.commit,
'--')
return self._commit_cache
class ReviewableBranch(object):
_commit_cache = None
_base_exists = None
def __init__(self, project, branch, base):
self.project = project
self.branch = branch
self.base = base
@property
def name(self):
return self.branch.name
@property
def commits(self):
if self._commit_cache is None:
args = ('--abbrev=8', '--abbrev-commit', '--pretty=oneline', '--reverse',
'--date-order', not_rev(self.base), R_HEADS + self.name, '--')
try:
self._commit_cache = self.project.bare_git.rev_list(*args)
except GitError:
if self.base_exists:
raise
self._commit_cache = []
return self._commit_cache
@property
def unabbrev_commits(self):
r = dict()
for commit in self.project.bare_git.rev_list(not_rev(self.base),
R_HEADS + self.name,
'--'):
r[commit[0:8]] = commit
return r
@property
def date(self):
return self.project.bare_git.log('--pretty=format:%cd',
'-n', '1',
R_HEADS + self.name,
'--')
@property
def base_exists(self):
if self._base_exists is None:
try:
self.project.bare_git.rev_parse('--verify', not_rev(self.base))
self._base_exists = True
except GitError:
self._base_exists = False
return self._base_exists
def UploadForReview(self, people,
dryrun=False,
auto_topic=False,
hashtags=(),
labels=(),
private=False,
notify=None,
wip=False,
dest_branch=None,
validate_certs=True,
push_options=None):
self.project.UploadForReview(branch=self.name,
people=people,
dryrun=dryrun,
auto_topic=auto_topic,
hashtags=hashtags,
labels=labels,
private=private,
notify=notify,
wip=wip,
dest_branch=dest_branch,
validate_certs=validate_certs,
push_options=push_options)
def GetPublishedRefs(self):
refs = {}
output = self.project.bare_git.ls_remote(
self.branch.remote.SshReviewUrl(self.project.UserEmail),
'refs/changes/*')
for line in output.split('\n'):
try:
(sha, ref) = line.split()
refs[sha] = ref
except ValueError:
pass
return refs
class StatusColoring(Coloring):
def __init__(self, config):
super().__init__(config, 'status')
self.project = self.printer('header', attr='bold')
self.branch = self.printer('header', attr='bold')
self.nobranch = self.printer('nobranch', fg='red')
self.important = self.printer('important', fg='red')
self.added = self.printer('added', fg='green')
self.changed = self.printer('changed', fg='red')
self.untracked = self.printer('untracked', fg='red')
class DiffColoring(Coloring):
def __init__(self, config):
super().__init__(config, 'diff')
self.project = self.printer('header', attr='bold')
self.fail = self.printer('fail', fg='red')
class Annotation(object):
def __init__(self, name, value, keep):
self.name = name
self.value = value
self.keep = keep
def __eq__(self, other):
if not isinstance(other, Annotation):
return False
return self.__dict__ == other.__dict__
def __lt__(self, other):
if not isinstance(other, Annotation):
raise ValueError('comparison is not between two Annotation objects')
if self.name == other.name:
if self.value == other.value:
return self.keep < other.keep
return self.value < other.value
return self.name < other.name
def _SafeExpandPath(base, subpath, skipfinal=False):
resep = re.compile(r'[/%s]' % re.escape(os.path.sep))
components = resep.split(subpath)
if skipfinal:
finalpart = components.pop()
path = base
for part in components:
if part in {'.', '..'}:
raise ManifestInvalidPathError(
'%s: "%s" not allowed in paths' % (subpath, part))
path = os.path.join(path, part)
if platform_utils.islink(path):
raise ManifestInvalidPathError(
'%s: traversing symlinks not allow' % (path,))
if os.path.exists(path):
if not os.path.isfile(path) and not platform_utils.isdir(path):
raise ManifestInvalidPathError(
'%s: only regular files & directories allowed' % (path,))
if skipfinal:
path = os.path.join(path, finalpart)
return path
class _CopyFile(object):
def __init__(self, git_worktree, src, topdir, dest):
self.git_worktree = git_worktree
self.topdir = topdir
self.src = src
self.dest = dest
def _Copy(self):
src = _SafeExpandPath(self.git_worktree, self.src)
dest = _SafeExpandPath(self.topdir, self.dest)
if platform_utils.isdir(src):
raise ManifestInvalidPathError(
'%s: copying from directory not supported' % (self.src,))
if platform_utils.isdir(dest):
raise ManifestInvalidPathError(
'%s: copying to directory not allowed' % (self.dest,))
if not os.path.exists(dest) or not filecmp.cmp(src, dest):
try:
if os.path.exists(dest):
platform_utils.remove(dest)
else:
dest_dir = os.path.dirname(dest)
if not platform_utils.isdir(dest_dir):
os.makedirs(dest_dir)
shutil.copy(src, dest)
mode = os.stat(dest)[stat.ST_MODE]
mode = mode & ~(stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)
os.chmod(dest, mode)
except IOError:
_error('Cannot copy file %s to %s', src, dest)
class _LinkFile(object):
def __init__(self, git_worktree, src, topdir, dest):
self.git_worktree = git_worktree
self.topdir = topdir
self.src = src
self.dest = dest
def __linkIt(self, relSrc, absDest):
if not platform_utils.islink(absDest) or (platform_utils.readlink(absDest) != relSrc):
try:
if os.path.lexists(absDest):
platform_utils.remove(absDest)
else:
dest_dir = os.path.dirname(absDest)
if not platform_utils.isdir(dest_dir):
os.makedirs(dest_dir)
platform_utils.symlink(relSrc, absDest)
except IOError:
_error('Cannot link file %s to %s', relSrc, absDest)
def _Link(self):
if self.src == '.':
src = self.git_worktree
else:
src = _SafeExpandPath(self.git_worktree, self.src)
if not glob.has_magic(src):
dest = _SafeExpandPath(self.topdir, self.dest, skipfinal=True)
relpath = os.path.relpath(src, os.path.dirname(dest))
self.__linkIt(relpath, dest)
else:
dest = _SafeExpandPath(self.topdir, self.dest)
if os.path.exists(dest) and not platform_utils.isdir(dest):
_error('Link error: src with wildcard, %s must be a directory', dest)
else:
for absSrcFile in glob.glob(src):
absSrcDir = os.path.dirname(absSrcFile)
relSrcDir = os.path.relpath(absSrcDir, dest)
srcFile = os.path.basename(absSrcFile)
absDest = os.path.join(dest, srcFile)
relSrc = os.path.join(relSrcDir, srcFile)
self.__linkIt(relSrc, absDest)
class RemoteSpec(object):
def __init__(self,
name,
url=None,
pushUrl=None,
review=None,
revision=None,
orig_name=None,
fetchUrl=None):
self.name = name
self.url = url
self.pushUrl = pushUrl
self.review = review
self.revision = revision
self.orig_name = orig_name
self.fetchUrl = fetchUrl
class Project(object):
shareable_files = ['description', 'info']
shareable_dirs = ['hooks', 'objects', 'rr-cache', 'svn']
working_tree_files = ['config', 'packed-refs', 'shallow']
working_tree_dirs = ['logs', 'refs']
def __init__(self,
manifest,
name,
remote,
gitdir,
objdir,
worktree,
relpath,
revisionExpr,
revisionId,
rebase=True,
groups=None,
sync_c=False,
sync_s=False,
sync_tags=True,
clone_depth=None,
upstream=None,
parent=None,
use_git_worktrees=False,
is_derived=False,
dest_branch=None,
optimized_fetch=False,
retry_fetches=0,
old_revision=None):
self.client = self.manifest = manifest
self.name = name
self.remote = remote
self.UpdatePaths(relpath, worktree, gitdir, objdir)
self.SetRevision(revisionExpr, revisionId=revisionId)
self.rebase = rebase
self.groups = groups
self.sync_c = sync_c
self.sync_s = sync_s
self.sync_tags = sync_tags
self.clone_depth = clone_depth
self.upstream = upstream
self.parent = parent
self.use_git_worktrees = use_git_worktrees
self.is_derived = is_derived
self.optimized_fetch = optimized_fetch
self.retry_fetches = max(0, retry_fetches)
self.subprojects = []
self.snapshots = {}
self.copyfiles = []
self.linkfiles = []
self.annotations = []
self.dest_branch = dest_branch
self.old_revision = old_revision
self.enabled_repo_hooks = []
def SetRevision(self, revisionExpr, revisionId=None):
self.revisionExpr = revisionExpr
if revisionId is None and revisionExpr and IsId(revisionExpr):
self.revisionId = self.revisionExpr
else:
self.revisionId = revisionId
|
Apache License 2.0
|
muxinc/mux-python
|
mux_python/api/live_streams_api.py
|
LiveStreamsApi.disable_live_stream_with_http_info
|
python
|
def disable_live_stream_with_http_info(self, live_stream_id, **kwargs):
local_var_params = locals()
all_params = [
'live_stream_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method disable_live_stream" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('live_stream_id' not in local_var_params or
local_var_params['live_stream_id'] is None):
raise ApiValueError("Missing the required parameter `live_stream_id` when calling `disable_live_stream`")
collection_formats = {}
path_params = {}
if 'live_stream_id' in local_var_params:
path_params['LIVE_STREAM_ID'] = local_var_params['live_stream_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
auth_settings = ['accessToken']
response_types_map = {
200: "DisableLiveStreamResponse",
}
return self.api_client.call_api(
'/video/v1/live-streams/{LIVE_STREAM_ID}/disable', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
|
Disable a live stream # noqa: E501
Disables a live stream, making it reject incoming RTMP streams until re-enabled. The API also ends the live stream recording immediately when active. Ending the live stream recording adds the `EXT-X-ENDLIST` tag to the HLS manifest which notifies the player that this live stream is over. Mux also closes the encoder connection immediately. Any attempt from the encoder to re-establish connection will fail till the live stream is re-enabled. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.disable_live_stream_with_http_info(live_stream_id, async_req=True)
>>> result = thread.get()
:param live_stream_id: The live stream ID (required)
:type live_stream_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DisableLiveStreamResponse, status_code(int), headers(HTTPHeaderDict))
|
https://github.com/muxinc/mux-python/blob/57c10a3002a0bc65a0dc8938f08176bd5b030a93/mux_python/api/live_streams_api.py#L910-L1013
|
from __future__ import absolute_import
import re
import six
from mux_python.api_client import ApiClient
from mux_python.exceptions import (
ApiTypeError,
ApiValueError
)
class LiveStreamsApi(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_live_stream(self, create_live_stream_request, **kwargs):
kwargs['_return_http_data_only'] = True
return self.create_live_stream_with_http_info(create_live_stream_request, **kwargs)
def create_live_stream_with_http_info(self, create_live_stream_request, **kwargs):
local_var_params = locals()
all_params = [
'create_live_stream_request'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_live_stream" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('create_live_stream_request' not in local_var_params or
local_var_params['create_live_stream_request'] is None):
raise ApiValueError("Missing the required parameter `create_live_stream_request` when calling `create_live_stream`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'create_live_stream_request' in local_var_params:
body_params = local_var_params['create_live_stream_request']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = ['accessToken']
response_types_map = {
201: "LiveStreamResponse",
}
return self.api_client.call_api(
'/video/v1/live-streams', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def create_live_stream_playback_id(self, live_stream_id, create_playback_id_request, **kwargs):
kwargs['_return_http_data_only'] = True
return self.create_live_stream_playback_id_with_http_info(live_stream_id, create_playback_id_request, **kwargs)
def create_live_stream_playback_id_with_http_info(self, live_stream_id, create_playback_id_request, **kwargs):
local_var_params = locals()
all_params = [
'live_stream_id',
'create_playback_id_request'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_live_stream_playback_id" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('live_stream_id' not in local_var_params or
local_var_params['live_stream_id'] is None):
raise ApiValueError("Missing the required parameter `live_stream_id` when calling `create_live_stream_playback_id`")
if self.api_client.client_side_validation and ('create_playback_id_request' not in local_var_params or
local_var_params['create_playback_id_request'] is None):
raise ApiValueError("Missing the required parameter `create_playback_id_request` when calling `create_live_stream_playback_id`")
collection_formats = {}
path_params = {}
if 'live_stream_id' in local_var_params:
path_params['LIVE_STREAM_ID'] = local_var_params['live_stream_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'create_playback_id_request' in local_var_params:
body_params = local_var_params['create_playback_id_request']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = ['accessToken']
response_types_map = {
201: "CreatePlaybackIDResponse",
}
return self.api_client.call_api(
'/video/v1/live-streams/{LIVE_STREAM_ID}/playback-ids', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def create_live_stream_simulcast_target(self, live_stream_id, create_simulcast_target_request, **kwargs):
kwargs['_return_http_data_only'] = True
return self.create_live_stream_simulcast_target_with_http_info(live_stream_id, create_simulcast_target_request, **kwargs)
def create_live_stream_simulcast_target_with_http_info(self, live_stream_id, create_simulcast_target_request, **kwargs):
local_var_params = locals()
all_params = [
'live_stream_id',
'create_simulcast_target_request'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_live_stream_simulcast_target" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('live_stream_id' not in local_var_params or
local_var_params['live_stream_id'] is None):
raise ApiValueError("Missing the required parameter `live_stream_id` when calling `create_live_stream_simulcast_target`")
if self.api_client.client_side_validation and ('create_simulcast_target_request' not in local_var_params or
local_var_params['create_simulcast_target_request'] is None):
raise ApiValueError("Missing the required parameter `create_simulcast_target_request` when calling `create_live_stream_simulcast_target`")
collection_formats = {}
path_params = {}
if 'live_stream_id' in local_var_params:
path_params['LIVE_STREAM_ID'] = local_var_params['live_stream_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'create_simulcast_target_request' in local_var_params:
body_params = local_var_params['create_simulcast_target_request']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = ['accessToken']
response_types_map = {
201: "SimulcastTargetResponse",
}
return self.api_client.call_api(
'/video/v1/live-streams/{LIVE_STREAM_ID}/simulcast-targets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def delete_live_stream(self, live_stream_id, **kwargs):
kwargs['_return_http_data_only'] = True
return self.delete_live_stream_with_http_info(live_stream_id, **kwargs)
def delete_live_stream_with_http_info(self, live_stream_id, **kwargs):
local_var_params = locals()
all_params = [
'live_stream_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_live_stream" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('live_stream_id' not in local_var_params or
local_var_params['live_stream_id'] is None):
raise ApiValueError("Missing the required parameter `live_stream_id` when calling `delete_live_stream`")
collection_formats = {}
path_params = {}
if 'live_stream_id' in local_var_params:
path_params['LIVE_STREAM_ID'] = local_var_params['live_stream_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
auth_settings = ['accessToken']
response_types_map = {}
return self.api_client.call_api(
'/video/v1/live-streams/{LIVE_STREAM_ID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def delete_live_stream_playback_id(self, live_stream_id, playback_id, **kwargs):
kwargs['_return_http_data_only'] = True
return self.delete_live_stream_playback_id_with_http_info(live_stream_id, playback_id, **kwargs)
def delete_live_stream_playback_id_with_http_info(self, live_stream_id, playback_id, **kwargs):
local_var_params = locals()
all_params = [
'live_stream_id',
'playback_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_live_stream_playback_id" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('live_stream_id' not in local_var_params or
local_var_params['live_stream_id'] is None):
raise ApiValueError("Missing the required parameter `live_stream_id` when calling `delete_live_stream_playback_id`")
if self.api_client.client_side_validation and ('playback_id' not in local_var_params or
local_var_params['playback_id'] is None):
raise ApiValueError("Missing the required parameter `playback_id` when calling `delete_live_stream_playback_id`")
collection_formats = {}
path_params = {}
if 'live_stream_id' in local_var_params:
path_params['LIVE_STREAM_ID'] = local_var_params['live_stream_id']
if 'playback_id' in local_var_params:
path_params['PLAYBACK_ID'] = local_var_params['playback_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
auth_settings = ['accessToken']
response_types_map = {}
return self.api_client.call_api(
'/video/v1/live-streams/{LIVE_STREAM_ID}/playback-ids/{PLAYBACK_ID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def delete_live_stream_simulcast_target(self, live_stream_id, simulcast_target_id, **kwargs):
kwargs['_return_http_data_only'] = True
return self.delete_live_stream_simulcast_target_with_http_info(live_stream_id, simulcast_target_id, **kwargs)
def delete_live_stream_simulcast_target_with_http_info(self, live_stream_id, simulcast_target_id, **kwargs):
local_var_params = locals()
all_params = [
'live_stream_id',
'simulcast_target_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_live_stream_simulcast_target" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('live_stream_id' not in local_var_params or
local_var_params['live_stream_id'] is None):
raise ApiValueError("Missing the required parameter `live_stream_id` when calling `delete_live_stream_simulcast_target`")
if self.api_client.client_side_validation and ('simulcast_target_id' not in local_var_params or
local_var_params['simulcast_target_id'] is None):
raise ApiValueError("Missing the required parameter `simulcast_target_id` when calling `delete_live_stream_simulcast_target`")
collection_formats = {}
path_params = {}
if 'live_stream_id' in local_var_params:
path_params['LIVE_STREAM_ID'] = local_var_params['live_stream_id']
if 'simulcast_target_id' in local_var_params:
path_params['SIMULCAST_TARGET_ID'] = local_var_params['simulcast_target_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
auth_settings = ['accessToken']
response_types_map = {}
return self.api_client.call_api(
'/video/v1/live-streams/{LIVE_STREAM_ID}/simulcast-targets/{SIMULCAST_TARGET_ID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def disable_live_stream(self, live_stream_id, **kwargs):
kwargs['_return_http_data_only'] = True
return self.disable_live_stream_with_http_info(live_stream_id, **kwargs)
|
MIT License
|
wbond/oscrypto
|
oscrypto/_openssl/symmetric.py
|
_setup_evp_encrypt_decrypt
|
python
|
def _setup_evp_encrypt_decrypt(cipher, data):
evp_cipher = {
'aes128': libcrypto.EVP_aes_128_cbc,
'aes192': libcrypto.EVP_aes_192_cbc,
'aes256': libcrypto.EVP_aes_256_cbc,
'rc2': libcrypto.EVP_rc2_cbc,
'rc4': libcrypto.EVP_rc4,
'des': libcrypto.EVP_des_cbc,
'tripledes_2key': libcrypto.EVP_des_ede_cbc,
'tripledes_3key': libcrypto.EVP_des_ede3_cbc,
}[cipher]()
if cipher == 'rc4':
buffer_size = len(data)
else:
block_size = {
'aes128': 16,
'aes192': 16,
'aes256': 16,
'rc2': 8,
'des': 8,
'tripledes_2key': 8,
'tripledes_3key': 8,
}[cipher]
buffer_size = block_size * int(math.ceil(len(data) / block_size))
return (evp_cipher, buffer_size)
|
Creates an EVP_CIPHER pointer object and determines the buffer size
necessary for the parameter specified.
:param evp_cipher_ctx:
An EVP_CIPHER_CTX pointer
:param cipher:
A unicode string of "aes128", "aes192", "aes256", "des",
"tripledes_2key", "tripledes_3key", "rc2", "rc4"
:param key:
The key byte string
:param data:
The plaintext or ciphertext as a byte string
:param padding:
If padding is to be used
:return:
A 2-element tuple with the first element being an EVP_CIPHER pointer
and the second being an integer that is the required buffer size
|
https://github.com/wbond/oscrypto/blob/d40c62577706682a0f6da5616ad09964f1c9137d/oscrypto/_openssl/symmetric.py#L790-L841
|
from __future__ import unicode_literals, division, absolute_import, print_function
import math
from .._errors import pretty_message
from .._ffi import new, null, is_null, buffer_from_bytes, bytes_from_buffer, deref
from ._libcrypto import libcrypto, LibcryptoConst, handle_openssl_error
from ..util import rand_bytes
from .._types import type_name, byte_cls
__all__ = [
'aes_cbc_no_padding_decrypt',
'aes_cbc_no_padding_encrypt',
'aes_cbc_pkcs7_decrypt',
'aes_cbc_pkcs7_encrypt',
'des_cbc_pkcs5_decrypt',
'des_cbc_pkcs5_encrypt',
'rc2_cbc_pkcs5_decrypt',
'rc2_cbc_pkcs5_encrypt',
'rc4_decrypt',
'rc4_encrypt',
'tripledes_cbc_pkcs5_decrypt',
'tripledes_cbc_pkcs5_encrypt',
]
def aes_cbc_no_padding_encrypt(key, data, iv):
cipher = _calculate_aes_cipher(key)
if not iv:
iv = rand_bytes(16)
elif len(iv) != 16:
raise ValueError(pretty_message(
'''
iv must be 16 bytes long - is %s
''',
len(iv)
))
if len(data) % 16 != 0:
raise ValueError(pretty_message(
'''
data must be a multiple of 16 bytes long - is %s
''',
len(data)
))
return (iv, _encrypt(cipher, key, data, iv, False))
def aes_cbc_no_padding_decrypt(key, data, iv):
cipher = _calculate_aes_cipher(key)
if len(iv) != 16:
raise ValueError(pretty_message(
'''
iv must be 16 bytes long - is %s
''',
len(iv)
))
return _decrypt(cipher, key, data, iv, False)
def aes_cbc_pkcs7_encrypt(key, data, iv):
cipher = _calculate_aes_cipher(key)
if not iv:
iv = rand_bytes(16)
elif len(iv) != 16:
raise ValueError(pretty_message(
'''
iv must be 16 bytes long - is %s
''',
len(iv)
))
return (iv, _encrypt(cipher, key, data, iv, True))
def aes_cbc_pkcs7_decrypt(key, data, iv):
cipher = _calculate_aes_cipher(key)
if len(iv) != 16:
raise ValueError(pretty_message(
'''
iv must be 16 bytes long - is %s
''',
len(iv)
))
return _decrypt(cipher, key, data, iv, True)
def _calculate_aes_cipher(key):
if len(key) not in [16, 24, 32]:
raise ValueError(pretty_message(
'''
key must be either 16, 24 or 32 bytes (128, 192 or 256 bits)
long - is %s
''',
len(key)
))
if len(key) == 16:
cipher = 'aes128'
elif len(key) == 24:
cipher = 'aes192'
elif len(key) == 32:
cipher = 'aes256'
return cipher
def rc4_encrypt(key, data):
if len(key) < 5 or len(key) > 16:
raise ValueError(pretty_message(
'''
key must be 5 to 16 bytes (40 to 128 bits) long - is %s
''',
len(key)
))
return _encrypt('rc4', key, data, None, None)
def rc4_decrypt(key, data):
if len(key) < 5 or len(key) > 16:
raise ValueError(pretty_message(
'''
key must be 5 to 16 bytes (40 to 128 bits) long - is %s
''',
len(key)
))
return _decrypt('rc4', key, data, None, None)
def rc2_cbc_pkcs5_encrypt(key, data, iv):
if len(key) < 5 or len(key) > 16:
raise ValueError(pretty_message(
'''
key must be 5 to 16 bytes (40 to 128 bits) long - is %s
''',
len(key)
))
if not iv:
iv = rand_bytes(8)
elif len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
return (iv, _encrypt('rc2', key, data, iv, True))
def rc2_cbc_pkcs5_decrypt(key, data, iv):
if len(key) < 5 or len(key) > 16:
raise ValueError(pretty_message(
'''
key must be 5 to 16 bytes (40 to 128 bits) long - is %s
''',
len(key)
))
if len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
return _decrypt('rc2', key, data, iv, True)
def tripledes_cbc_pkcs5_encrypt(key, data, iv):
if len(key) != 16 and len(key) != 24:
raise ValueError(pretty_message(
'''
key must be 16 bytes (2 key) or 24 bytes (3 key) long - %s
''',
len(key)
))
if not iv:
iv = rand_bytes(8)
elif len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - %s
''',
len(iv)
))
cipher = 'tripledes_3key'
if len(key) == 16:
key = key + key[0:8]
cipher = 'tripledes_2key'
return (iv, _encrypt(cipher, key, data, iv, True))
def tripledes_cbc_pkcs5_decrypt(key, data, iv):
if len(key) != 16 and len(key) != 24:
raise ValueError(pretty_message(
'''
key must be 16 bytes (2 key) or 24 bytes (3 key) long - is %s
''',
len(key)
))
if len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
cipher = 'tripledes_3key'
if len(key) == 16:
key = key + key[0:8]
cipher = 'tripledes_2key'
return _decrypt(cipher, key, data, iv, True)
def des_cbc_pkcs5_encrypt(key, data, iv):
if len(key) != 8:
raise ValueError(pretty_message(
'''
key must be 8 bytes (56 bits + 8 parity bits) long - is %s
''',
len(key)
))
if not iv:
iv = rand_bytes(8)
elif len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
return (iv, _encrypt('des', key, data, iv, True))
def des_cbc_pkcs5_decrypt(key, data, iv):
if len(key) != 8:
raise ValueError(pretty_message(
'''
key must be 8 bytes (56 bits + 8 parity bits) long - is %s
''',
len(key)
))
if len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
return _decrypt('des', key, data, iv, True)
def _encrypt(cipher, key, data, iv, padding):
if not isinstance(key, byte_cls):
raise TypeError(pretty_message(
'''
key must be a byte string, not %s
''',
type_name(key)
))
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
'''
data must be a byte string, not %s
''',
type_name(data)
))
if cipher != 'rc4' and not isinstance(iv, byte_cls):
raise TypeError(pretty_message(
'''
iv must be a byte string, not %s
''',
type_name(iv)
))
if cipher != 'rc4' and not padding:
aes128_no_padding = (
cipher == 'aes128' and
padding is False and
len(data) % 16 == 0
)
aes192_no_padding = (
cipher == 'aes192' and
padding is False and
len(data) % 24 == 0
)
aes256_no_padding = (
cipher == 'aes256' and
padding is False and
len(data) % 32 == 0
)
if aes128_no_padding is False and aes192_no_padding is False and aes256_no_padding is False:
raise ValueError('padding must be specified')
evp_cipher_ctx = None
try:
evp_cipher_ctx = libcrypto.EVP_CIPHER_CTX_new()
if is_null(evp_cipher_ctx):
handle_openssl_error(0)
evp_cipher, buffer_size = _setup_evp_encrypt_decrypt(cipher, data)
if iv is None:
iv = null()
if cipher in set(['rc2', 'rc4']):
res = libcrypto.EVP_EncryptInit_ex(evp_cipher_ctx, evp_cipher, null(), null(), null())
handle_openssl_error(res)
res = libcrypto.EVP_CIPHER_CTX_set_key_length(evp_cipher_ctx, len(key))
handle_openssl_error(res)
if cipher == 'rc2':
res = libcrypto.EVP_CIPHER_CTX_ctrl(
evp_cipher_ctx,
LibcryptoConst.EVP_CTRL_SET_RC2_KEY_BITS,
len(key) * 8,
null()
)
handle_openssl_error(res)
evp_cipher = null()
res = libcrypto.EVP_EncryptInit_ex(evp_cipher_ctx, evp_cipher, null(), key, iv)
handle_openssl_error(res)
if padding is not None:
res = libcrypto.EVP_CIPHER_CTX_set_padding(evp_cipher_ctx, int(padding))
handle_openssl_error(res)
buffer = buffer_from_bytes(buffer_size)
output_length = new(libcrypto, 'int *')
res = libcrypto.EVP_EncryptUpdate(evp_cipher_ctx, buffer, output_length, data, len(data))
handle_openssl_error(res)
output = bytes_from_buffer(buffer, deref(output_length))
res = libcrypto.EVP_EncryptFinal_ex(evp_cipher_ctx, buffer, output_length)
handle_openssl_error(res)
output += bytes_from_buffer(buffer, deref(output_length))
return output
finally:
if evp_cipher_ctx:
libcrypto.EVP_CIPHER_CTX_free(evp_cipher_ctx)
def _decrypt(cipher, key, data, iv, padding):
if not isinstance(key, byte_cls):
raise TypeError(pretty_message(
'''
key must be a byte string, not %s
''',
type_name(key)
))
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
'''
data must be a byte string, not %s
''',
type_name(data)
))
if cipher != 'rc4' and not isinstance(iv, byte_cls):
raise TypeError(pretty_message(
'''
iv must be a byte string, not %s
''',
type_name(iv)
))
if cipher != 'rc4' and padding is None:
raise ValueError('padding must be specified')
evp_cipher_ctx = None
try:
evp_cipher_ctx = libcrypto.EVP_CIPHER_CTX_new()
if is_null(evp_cipher_ctx):
handle_openssl_error(0)
evp_cipher, buffer_size = _setup_evp_encrypt_decrypt(cipher, data)
if iv is None:
iv = null()
if cipher in set(['rc2', 'rc4']):
res = libcrypto.EVP_DecryptInit_ex(evp_cipher_ctx, evp_cipher, null(), null(), null())
handle_openssl_error(res)
res = libcrypto.EVP_CIPHER_CTX_set_key_length(evp_cipher_ctx, len(key))
handle_openssl_error(res)
if cipher == 'rc2':
res = libcrypto.EVP_CIPHER_CTX_ctrl(
evp_cipher_ctx,
LibcryptoConst.EVP_CTRL_SET_RC2_KEY_BITS,
len(key) * 8,
null()
)
handle_openssl_error(res)
evp_cipher = null()
res = libcrypto.EVP_DecryptInit_ex(evp_cipher_ctx, evp_cipher, null(), key, iv)
handle_openssl_error(res)
if padding is not None:
res = libcrypto.EVP_CIPHER_CTX_set_padding(evp_cipher_ctx, int(padding))
handle_openssl_error(res)
buffer = buffer_from_bytes(buffer_size)
output_length = new(libcrypto, 'int *')
res = libcrypto.EVP_DecryptUpdate(evp_cipher_ctx, buffer, output_length, data, len(data))
handle_openssl_error(res)
output = bytes_from_buffer(buffer, deref(output_length))
res = libcrypto.EVP_DecryptFinal_ex(evp_cipher_ctx, buffer, output_length)
handle_openssl_error(res)
output += bytes_from_buffer(buffer, deref(output_length))
return output
finally:
if evp_cipher_ctx:
libcrypto.EVP_CIPHER_CTX_free(evp_cipher_ctx)
|
MIT License
|
neuralmagic/sparseml
|
src/sparseml/tensorflow_v1/utils/variable.py
|
get_prunable_ops
|
python
|
def get_prunable_ops(
graph: tf_compat.Graph = None,
) -> List[Tuple[str, tf_compat.Operation]]:
if not graph:
graph = tf_compat.get_default_graph()
ops = []
for op in graph.get_operations():
if is_prunable_op(op):
ops.append((op.name, op))
return ops
|
Get the prunable operations from a TensorFlow graph.
:param graph: the graph to get the prunable operations from.
If not supplied, then will use the default graph
:return: a list containing the names and ops of the prunable operations
(MatMul, Conv1D, Conv2D, Conv3D)
|
https://github.com/neuralmagic/sparseml/blob/e2dcb66bad713542158dfe54cba113a0cc02ed39/src/sparseml/tensorflow_v1/utils/variable.py#L168-L185
|
import re
from typing import List, Tuple, Union
import numpy
try:
import tensorflow.contrib.graph_editor as graph_editor
from tensorflow.contrib.graph_editor.util import ListView
tf_contrib_err = None
except Exception as err:
graph_editor = None
ListView = None
tf_contrib_err = err
from sparseml.tensorflow_v1.utils.helpers import tf_compat
__all__ = [
"VAR_INDEX_FROM_TRAINABLE",
"get_op_var_index",
"clean_tensor_name",
"get_op_input_var",
"get_tensor_var",
"is_prunable_op",
"get_prunable_ops",
"get_ops_and_inputs_by_name_or_regex",
"any_str_or_regex_matches_tensor_name",
"eval_tensor_density",
"eval_tensor_sparsity",
]
VAR_INDEX_FROM_TRAINABLE = "from_trainable"
def get_op_var_index(var_index: Union[str, int], op_inputs: ListView) -> int:
if isinstance(var_index, int):
if var_index < 0:
var_index += len(op_inputs)
return var_index
if var_index == "from_trainable":
weight_index = len(op_inputs) - 1
trainable_vars = [var.name for var in tf_compat.trainable_variables()]
for index, inp in enumerate(op_inputs):
expected_name = "{}:0".format(clean_tensor_name(inp.name))
if expected_name in trainable_vars:
return index
return weight_index
for index, inp in enumerate(op_inputs):
if var_index in inp.name:
return index
raise ValueError("unknown value given for var_index of {}".format(var_index))
def clean_tensor_name(var_tens: Union[str, tf_compat.Tensor]) -> str:
name = var_tens if isinstance(var_tens, str) else var_tens.name
name = re.sub(r"/read/_.+:[0-9]+$", "", name)
name = re.sub(r"/read:[0-9]+$", "", name)
name = re.sub(r":[0-9]+$", "", name)
return name
def get_op_input_var(
operation: tf_compat.Operation,
var_index: Union[str, int] = VAR_INDEX_FROM_TRAINABLE,
) -> tf_compat.Tensor:
if tf_contrib_err:
raise tf_contrib_err
op_sgv = graph_editor.sgv(operation)
var_index = get_op_var_index(var_index, op_sgv.inputs)
return op_sgv.inputs[var_index]
def get_tensor_var(tens: tf_compat.Tensor) -> tf_compat.Variable:
expected_name = "{}:0".format(clean_tensor_name(tens))
for var in tf_compat.global_variables():
if expected_name == var.name:
return var
raise ValueError(
"could not find a global variable that matched the tensor {}".format(tens)
)
def is_prunable_op(op: tf_compat.Operation):
return (
op.type in ["MatMul", "Conv1D", "Conv2D", "Conv3D", "DepthwiseConv2dNative"]
and "gradients/" not in op.name
and "_grad/" not in op.name
)
|
Apache License 2.0
|
goodmami/penman
|
penman/layout.py
|
node_contexts
|
python
|
def node_contexts(g: Graph) -> List[Union[Variable, None]]:
variables = g.variables()
stack = [g.top]
contexts: List[Union[Variable, None]] = [None] * len(g.triples)
for i, triple in enumerate(g.triples):
eligible: List[Variable] = [triple[0]]
if triple[1] != CONCEPT_ROLE and triple[2] in variables:
eligible.append(cast(Variable, triple[2]))
if stack[-1] not in eligible:
break
else:
contexts[i] = stack[-1]
pushed = get_pushed_variable(g, triple)
if pushed:
stack.append(pushed)
try:
for epi in g.epidata[triple]:
if isinstance(epi, Pop):
stack.pop()
except IndexError:
break
return contexts
|
Return the list of node contexts corresponding to triples in *g*.
If a node context is unknown, the value ``None`` is substituted.
Example:
>>> from penman import decode, layout
>>> g = decode('''
... (a / alpha
... :attr val
... :ARG0 (b / beta :ARG0 (g / gamma))
... :ARG0-of g)''')
>>> for ctx, trp in zip(layout.node_contexts(g), g.triples):
... print(ctx, ':', trp)
...
a : ('a', ':instance', 'alpha')
a : ('a', ':attr', 'val')
a : ('a', ':ARG0', 'b')
b : ('b', ':instance', 'beta')
b : ('b', ':ARG0', 'g')
g : ('g', ':instance', 'gamma')
a : ('g', ':ARG0', 'a')
|
https://github.com/goodmami/penman/blob/23df626456ac561ed71bcf6fb32bcb8125436f3b/penman/layout.py#L625-L672
|
from typing import Union, Mapping, Callable, Any, List, Set, cast
import copy
import logging
from penman.exceptions import LayoutError
from penman.types import (Variable, Role, BasicTriple, Branch, Node)
from penman.epigraph import Epidatum
from penman.surface import (Alignment, RoleAlignment)
from penman.tree import (Tree, is_atomic)
from penman.graph import (Graph, CONCEPT_ROLE)
from penman.model import Model
logger = logging.getLogger(__name__)
_default_model = Model()
_Nodemap = Mapping[Variable, Union[Node, None]]
class LayoutMarker(Epidatum):
class Push(LayoutMarker):
__slots__ = 'variable',
def __init__(self, variable):
self.variable = variable
def __repr__(self):
return f'Push({self.variable})'
class Pop(LayoutMarker):
__slots__ = ()
def __repr__(self):
return 'POP'
POP = Pop()
def interpret(t: Tree, model: Model = None) -> Graph:
if model is None:
model = _default_model
variables = {v for v, _ in t.nodes()}
top, triples, epidata = _interpret_node(t.node, variables, model)
epimap = {}
for triple, epis in epidata:
if triple in epimap:
logger.warning(
f'ignoring epigraph data for duplicate triple: {triple}'
)
else:
epimap[triple] = epis
g = Graph(triples, top=top, epidata=epimap, metadata=t.metadata)
logger.info('Interpreted: %s', g)
return g
def _interpret_node(t: Node, variables: Set[Variable], model: Model):
has_concept = False
triples = []
epidata = []
var, edges = t
for role, target in edges:
epis: List[Epidatum] = []
role, role_epis = _process_role(role)
epis.extend(role_epis)
has_concept |= role == CONCEPT_ROLE
if is_atomic(target):
target, target_epis = _process_atomic(target)
epis.extend(target_epis)
triple = (var, role, target)
if model.is_role_inverted(role):
if target in variables:
triple = model.invert(triple)
else:
logger.warning('cannot deinvert attribute: %r', triple)
triples.append(triple)
epidata.append((triple, epis))
else:
triple = model.deinvert((var, role, target[0]))
triples.append(triple)
epis.append(Push(target[0]))
epidata.append((triple, epis))
_, _triples, _epis = _interpret_node(target, variables, model)
triples.extend(_triples)
_epis[-1][1].append(POP)
epidata.extend(_epis)
if not has_concept:
instance = (var, CONCEPT_ROLE, None)
triples.insert(0, instance)
epidata.append((instance, []))
return var, triples, epidata
def _process_role(role):
epis = ()
if role == '/':
role = CONCEPT_ROLE
elif '~' in role:
role, _, alignment = role.partition('~')
epis = (RoleAlignment.from_string(alignment),)
return role, epis
def _process_atomic(target):
epis = ()
if target and '~' in target:
if target.startswith('"'):
pivot = target.rindex('"') + 1
if pivot < len(target):
epis = (Alignment.from_string(target[pivot:]),)
target = target[:pivot]
else:
target, _, alignment = target.partition('~')
epis = (Alignment.from_string(alignment),)
return target, epis
def configure(g: Graph,
top: Variable = None,
model: Model = None) -> Tree:
if model is None:
model = _default_model
node, data, nodemap = _configure(g, top, model)
while data and isinstance(data[-1], Pop):
data.pop()
skipped: List[BasicTriple] = []
while data:
_skipped, var, data = _find_next(data, nodemap)
skipped.extend(_skipped)
data_count = len(data)
if var is None or data_count == 0:
raise LayoutError('possibly disconnected graph')
_, surprising = _configure_node(var, data, nodemap, model)
if len(data) == data_count and surprising:
skipped.insert(0, data.pop())
elif len(data) >= data_count:
raise LayoutError('unknown configuration error')
else:
data = skipped + data
skipped.clear()
while data and isinstance(data[-1], Pop):
data.pop()
if skipped:
raise LayoutError('incomplete configuration')
_process_epigraph(node)
tree = Tree(node, metadata=g.metadata)
logger.debug('Configured: %s', tree)
return tree
def _configure(g, top, model):
if len(g.triples) == 0:
return (g.top, []), [], {}
nodemap: _Nodemap = {var: None for var in g.variables()}
if top is None:
top = g.top
if top not in nodemap:
raise LayoutError(f'top is not a variable: {top!r}')
nodemap[top] = (top, [])
data = list(reversed(_preconfigure(g, model)))
node, _ = _configure_node(top, data, nodemap, model)
return node, data, nodemap
def _preconfigure(g, model):
data = []
epidata = g.epidata
pushed = set()
for triple in g.triples:
var, role, target = triple
epis, push, pops = [], False, []
for epi in epidata.get(triple, []):
if isinstance(epi, Push):
pvar = epi.variable
if pvar in pushed:
logger.warning(
f"ignoring secondary node contexts for '{pvar}'"
)
continue
if pvar not in (var, target) or role == CONCEPT_ROLE:
logger.warning(
f"node context '{pvar}' invalid for triple: {triple!r}"
)
continue
if pvar == var:
triple = model.invert(triple)
pushed.add(pvar)
push = True
elif isinstance(epi, Pop):
pops.append(epi)
else:
epis.append(epi)
data.append((triple, push, epis))
data.extend(pops)
return data
def _configure_node(var, data, nodemap, model):
node = nodemap[var]
edges = node[1]
surprising = False
while data:
datum = data.pop()
if isinstance(datum, Pop):
break
triple, push, epis = datum
if triple[0] == var:
_, role, target = triple
elif triple[2] == var and triple[1] != CONCEPT_ROLE:
_, role, target = model.invert(triple)
push = False
surprising = True
else:
data.append(datum)
surprising = True
break
if role == CONCEPT_ROLE:
if not target:
continue
edges.insert(0, ('/', target, epis))
else:
if push:
nodemap[target] = (target, [])
target, _surprising = _configure_node(
target, data, nodemap, model
)
surprising &= _surprising
elif target in nodemap and nodemap[target] is None:
nodemap[target] = node
edges.append((role, target, epis))
return node, surprising
def _find_next(data, nodemap):
var = None
for i in range(len(data) - 1, -1, -1):
datum = data[i]
if isinstance(datum, Pop):
continue
source, _, target = datum[0]
if source in nodemap and _get_or_establish_site(source, nodemap):
var = source
break
elif target in nodemap and _get_or_establish_site(target, nodemap):
var = target
break
pivot = i + 1
return data[pivot:], var, data[:pivot]
def _get_or_establish_site(var, nodemap):
if nodemap[var] is not None:
_var, edges = nodemap[var]
if var != _var:
node = (var, [])
nodemap[var] = node
for i in range(len(edges)):
if edges[i][1] == var and edges[i][0] != '/':
edge = list(edges[i])
edge[1] = node
edges[i] = tuple(edge)
break
else:
pass
return True
return False
def _process_epigraph(node):
_, edges = node
for i, (role, target, epis) in enumerate(edges):
atomic_target = is_atomic(target)
for epi in epis:
if epi.mode == 1:
role = f'{role!s}{epi!s}'
elif epi.mode == 2 and atomic_target:
target = f'{target!s}{epi!s}'
else:
logger.warning('epigraphical marker ignored: %r', epi)
if not atomic_target:
_process_epigraph(target)
edges[i] = (role, target)
def reconfigure(g: Graph,
top: Variable = None,
model: Model = None,
key: Callable[[Role], Any] = None) -> Tree:
p = copy.deepcopy(g)
for epilist in p.epidata.values():
epilist[:] = [epi for epi in epilist
if not isinstance(epi, LayoutMarker)]
if key is not None:
def _key(triple):
return key(triple[1])
p.triples.sort(key=_key)
return configure(p, top=top, model=model)
def rearrange(t: Tree,
key: Callable[[Role], Any] = None,
attributes_first: bool = False) -> None:
if attributes_first:
variables = {node[0] for node in t.nodes()}
else:
variables = set()
def sort_key(branch: Branch):
role, target = branch
if is_atomic(target):
criterion1 = target in variables
else:
criterion1 = target[0] in variables
criterion2 = True if key is None else key(role)
return (criterion1, criterion2)
_rearrange(t.node, sort_key)
def _rearrange(node: Node, key: Callable[[Branch], Any]) -> None:
_, branches = node
if branches and branches[0][0] == '/':
first = branches[0:1]
rest = branches[1:]
else:
first = []
rest = branches[:]
for _, target in rest:
if not is_atomic(target):
_rearrange(target, key=key)
branches[:] = first + sorted(rest, key=key)
def get_pushed_variable(g: Graph,
triple: BasicTriple) -> Union[Variable, None]:
for epi in g.epidata[triple]:
if isinstance(epi, Push):
return epi.variable
return None
def appears_inverted(g: Graph, triple: BasicTriple) -> bool:
variables = g.variables()
if triple[1] == CONCEPT_ROLE or triple[2] not in variables:
return False
else:
variable = get_pushed_variable(g, triple)
if variable is not None:
return variable == triple[0]
else:
for variable, _triple in zip(node_contexts(g), g.triples):
if variable is None:
break
elif _triple == triple:
return triple[2] == variable
return False
|
MIT License
|
pymeasure/pymeasure
|
pymeasure/experiment/experiment.py
|
Experiment.plot
|
python
|
def plot(self, *args, **kwargs):
if self.wait_for_data():
kwargs['title'] = self.title
ax = self.data.plot(*args, **kwargs)
self.plots.append({'type': 'plot', 'args': args, 'kwargs': kwargs, 'ax': ax})
if ax.get_figure() not in self.figs:
self.figs.append(ax.get_figure())
self._user_interrupt = False
|
Plot the results from the experiment.data pandas dataframe. Store the
plots in a plots list attribute.
|
https://github.com/pymeasure/pymeasure/blob/658d8fb9a02bdb62f64cc3838875c0de12f49ca1/pymeasure/experiment/experiment.py#L170-L179
|
import logging
log = logging.getLogger()
log.addHandler(logging.NullHandler())
try:
from IPython import display
except ImportError:
log.warning("IPython could not be imported")
from .results import unique_filename
from .config import get_config, set_mpl_rcparams
from pymeasure.log import setup_logging, console_log
from pymeasure.experiment import Results, Worker
from .parameters import Measurable
import time, signal
import numpy as np
import pandas as pd
import tempfile
import gc
def get_array(start, stop, step):
step = np.sign(stop - start) * abs(step)
return np.arange(start, stop + step, step)
def get_array_steps(start, stop, numsteps):
return get_array(start, stop, (abs(stop - start) / numsteps))
def get_array_zero(maxval, step):
return np.concatenate((np.arange(0, maxval, step), np.arange(maxval, -maxval, -step),
np.arange(-maxval, 0, step)))
def create_filename(title):
config = get_config()
if 'Filename' in config._sections.keys():
filename = unique_filename(suffix='_%s' % title, **config._sections['Filename'])
else:
filename = tempfile.mktemp()
return filename
class Experiment(object):
def __init__(self, title, procedure, analyse=(lambda x: x)):
self.title = title
self.procedure = procedure
self.measlist = []
self.port = 5888
self.plots = []
self.figs = []
self._data = []
self.analyse = analyse
self._data_timeout = 10
config = get_config()
set_mpl_rcparams(config)
if 'Logging' in config._sections.keys():
self.scribe = setup_logging(log, **config._sections['Logging'])
else:
self.scribe = console_log(log)
self.scribe.start()
self.filename = create_filename(self.title)
log.info("Using data file: %s" % self.filename)
self.results = Results(self.procedure, self.filename)
log.info("Set up Results")
self.worker = Worker(self.results, self.scribe.queue, logging.DEBUG)
log.info("Create worker")
def start(self):
log.info("Starting worker...")
self.worker.start()
@property
def data(self):
self._data = self.analyse(self.results.data.copy())
return self._data
def wait_for_data(self):
t = time.time()
while self.data.empty:
time.sleep(.1)
if (time.time() - t) > self._data_timeout:
log.warning('Timeout, no data received for liveplot')
return False
return True
def plot_live(self, *args, **kwargs):
if self.wait_for_data():
if not (self.plots):
self.plot(*args, **kwargs)
while not self.worker.should_stop():
self.update_plot()
display.clear_output(wait=True)
if self.worker.is_alive():
self.worker.terminate()
self.scribe.stop()
|
MIT License
|
ngageoint/sarpy
|
sarpy/consistency/parsers.py
|
parse_bool
|
python
|
def parse_bool(elem):
return parse_bool_text(elem.text)
|
Gets a boolean from an element.
Parameters
----------
elem : lxml.etree.ElementTree.Element
Element to convert.
Returns
-------
val : bool
Boolean value of the `elem`'s text.
|
https://github.com/ngageoint/sarpy/blob/91405721a7e6ffe7c76dd7b143915fee4bee1e82/sarpy/consistency/parsers.py#L68-L83
|
__classification__ = "UNCLASSIFIED"
__author__ = "Nathan Bombaci, Valkyrie"
from typing import List, Callable
import numpy as np
def parse_text(elem):
for converter in (int, float, parse_bool_text, str):
try:
val = converter(elem.text)
break
except ValueError:
continue
return val
def parse_bool_text(text):
text = text.lower()
if text in ['true', '1']:
return True
if text in ['false', '0']:
return False
raise ValueError("Cannot parse bool from {}".format(text))
|
MIT License
|
microsoft/presidio
|
presidio-analyzer/presidio_analyzer/predefined_recognizers/uk_nhs_recognizer.py
|
NhsRecognizer.validate_result
|
python
|
def validate_result(self, pattern_text: str) -> bool:
text = self.__sanitize_value(pattern_text, self.replacement_pairs)
total = sum(
[int(c) * multiplier for c, multiplier in zip(text, reversed(range(11)))]
)
remainder = total % 11
check_remainder = remainder == 0
return check_remainder
|
Validate the pattern logic e.g., by running checksum on a detected pattern.
:param pattern_text: the text to validated.
Only the part in text that was detected by the regex engine
:return: A bool indicating whether the validation was successful.
|
https://github.com/microsoft/presidio/blob/9d03112be79195937446daee4b583e07fa081667/presidio-analyzer/presidio_analyzer/predefined_recognizers/uk_nhs_recognizer.py#L54-L69
|
from typing import Optional, List, Tuple
from presidio_analyzer import Pattern, PatternRecognizer
class NhsRecognizer(PatternRecognizer):
PATTERNS = [
Pattern(
"NHS (medium)",
r"\b([0-9]{3})[- ]?([0-9]{3})[- ]?([0-9]{4})\b",
0.5,
),
]
CONTEXT = [
"national health service",
"nhs",
"health services authority",
"health authority",
]
def __init__(
self,
patterns: Optional[List[Pattern]] = None,
context: Optional[List[str]] = None,
supported_language: str = "en",
supported_entity: str = "UK_NHS",
replacement_pairs: Optional[List[Tuple[str, str]]] = None,
):
self.replacement_pairs = (
replacement_pairs if replacement_pairs else [("-", ""), (" ", "")]
)
context = context if context else self.CONTEXT
patterns = patterns if patterns else self.PATTERNS
super().__init__(
supported_entity=supported_entity,
patterns=patterns,
context=context,
supported_language=supported_language,
)
|
MIT License
|
demisto/demisto-sdk
|
demisto_sdk/commands/format/update_generic.py
|
BaseUpdate.set_fromVersion_of_generic_object
|
python
|
def set_fromVersion_of_generic_object(self, from_version=None):
if self.verbose:
click.echo('Setting fromVersion field of a generic object')
if from_version:
if LooseVersion(from_version) < LooseVersion(GENERIC_OBJECTS_DEFAULT_FROMVERSION):
click.echo(f'The given fromVersion value for generic entities should be'
f' {GENERIC_OBJECTS_DEFAULT_FROMVERSION} or above , given: {from_version}.\n'
f'Setting fromVersion field to {GENERIC_OBJECTS_DEFAULT_FROMVERSION}')
self.data[self.from_version_key] = GENERIC_OBJECTS_DEFAULT_FROMVERSION
else:
self.data[self.from_version_key] = from_version
else:
if LooseVersion(self.data.get(self.from_version_key, '0.0.0')) < LooseVersion(GENERIC_OBJECTS_DEFAULT_FROMVERSION):
self.data[self.from_version_key] = GENERIC_OBJECTS_DEFAULT_FROMVERSION
|
Sets fromVersion key in a generic object file:
Args:
from_version: The specific from_version value.
|
https://github.com/demisto/demisto-sdk/blob/8d8767c2dfec77b67c35f4e1022e30ed2893e864/demisto_sdk/commands/format/update_generic.py#L215-L234
|
import os
import re
from copy import deepcopy
from distutils.version import LooseVersion
from typing import Dict, Optional, Set, Union
import click
import yaml
from ruamel.yaml import YAML
from demisto_sdk.commands.common.constants import INTEGRATION, PLAYBOOK
from demisto_sdk.commands.common.tools import (LOG_COLORS, get_dict_from_file,
get_pack_metadata,
get_remote_file,
is_file_from_content_repo)
from demisto_sdk.commands.format.format_constants import (
DEFAULT_VERSION, ERROR_RETURN_CODE, GENERIC_OBJECTS_DEFAULT_FROMVERSION,
GENERIC_OBJECTS_FILE_TYPES, NEW_FILE_DEFAULT_5_5_0_FROMVERSION,
OLD_FILE_DEFAULT_1_FROMVERSION, SKIP_RETURN_CODE, SUCCESS_RETURN_CODE,
VERSION_6_0_0)
from demisto_sdk.commands.validate.validate_manager import ValidateManager
ryaml = YAML()
ryaml.allow_duplicate_keys = True
ryaml.preserve_quotes = True
class BaseUpdate:
def __init__(self,
input: str = '',
output: str = '',
path: str = '',
from_version: str = '',
no_validate: bool = False,
verbose: bool = False,
assume_yes: bool = False,
deprecate: bool = False):
self.source_file = input
self.output_file = self.set_output_file_path(output)
self.verbose = verbose
_, self.relative_content_path = is_file_from_content_repo(self.output_file)
self.old_file = self.is_old_file(self.relative_content_path if self.relative_content_path
else self.output_file, self.verbose)
self.schema_path = path
self.from_version = from_version
self.no_validate = no_validate
self.assume_yes = assume_yes
self.updated_ids: Dict = {}
if not self.no_validate:
self.validate_manager = ValidateManager(silence_init_prints=True, skip_conf_json=True,
skip_dependencies=True, skip_pack_rn_validation=True,
check_is_unskipped=False, validate_id_set=False)
if not self.source_file:
raise Exception('Please provide <source path>, <optional - destination path>.')
try:
self.data, self.file_type = get_dict_from_file(self.source_file, use_ryaml=True)
except Exception:
raise Exception(F'Provided file {self.source_file} is not a valid file.')
self.from_version_key = self.set_from_version_key_name()
def set_output_file_path(self, output_file_path) -> str:
if not output_file_path:
source_dir = os.path.dirname(self.source_file)
file_name = os.path.basename(self.source_file)
if self.__class__.__name__ == 'PlaybookYMLFormat':
if "Pack" not in source_dir:
if not file_name.startswith('playbook-'):
file_name = F'playbook-{file_name}'
return os.path.join(source_dir, file_name)
else:
return output_file_path
def set_version_to_default(self, location=None):
if self.verbose:
click.echo(f'Setting JSON version to default: {DEFAULT_VERSION}')
if location:
location['version'] = DEFAULT_VERSION
else:
self.data['version'] = DEFAULT_VERSION
def remove_unnecessary_keys(self):
with open(self.schema_path, 'r') as file_obj:
schema = yaml.safe_load(file_obj)
extended_schema = self.recursive_extend_schema(schema, schema)
if self.verbose:
print('Removing Unnecessary fields from file')
if isinstance(extended_schema, dict):
self.recursive_remove_unnecessary_keys(extended_schema.get('mapping', {}), self.data)
@staticmethod
def recursive_extend_schema(current_schema: Union[str, bool, list, dict],
full_schema: dict) -> Union[str, bool, list, dict]:
if isinstance(current_schema, str) or isinstance(current_schema, bool):
return current_schema
if isinstance(current_schema, list):
return [BaseUpdate.recursive_extend_schema(value, full_schema) for value in current_schema]
if isinstance(current_schema, dict):
modified_schema = {}
for key, value in current_schema.items():
if key.startswith('schema;'):
continue
if isinstance(value, str) and key == 'include':
extended_schema: dict = full_schema.get(f'schema;{value}')
if extended_schema is None:
click.echo(f"Could not find sub-schema for {value}", LOG_COLORS.YELLOW)
return BaseUpdate.recursive_extend_schema(deepcopy(extended_schema), full_schema)
else:
modified_schema[key] = BaseUpdate.recursive_extend_schema(value, full_schema)
return modified_schema
def recursive_remove_unnecessary_keys(self, schema: dict, data: dict) -> None:
data_fields = set(data.keys())
for field in data_fields:
if field not in schema.keys():
matching_key = self.regex_matching_key(field, schema.keys())
if matching_key:
mapping = schema.get(matching_key, {}).get('mapping')
if mapping:
self.recursive_remove_unnecessary_keys(
schema.get(matching_key, {}).get('mapping'),
data.get(field, {})
)
else:
if self.verbose:
print(f'Removing {field} field')
data.pop(field, None)
else:
mapping = schema.get(field, {}).get('mapping')
if mapping:
self.recursive_remove_unnecessary_keys(
schema.get(field, {}).get('mapping'),
data.get(field, {})
)
else:
sequence = schema.get(field, {}).get('sequence', [])
if sequence and sequence[0].get('mapping'):
for list_element in data[field]:
self.recursive_remove_unnecessary_keys(
sequence[0].get('mapping'),
list_element
)
def regex_matching_key(self, field, schema_keys):
regex_keys = [regex_key for regex_key in schema_keys if 'regex;' in regex_key]
for reg in regex_keys:
if re.match(reg.split(';')[1], field):
return reg
return None
|
MIT License
|
kubevirt/client-python
|
kubevirt/models/v1_disk_target.py
|
V1DiskTarget.readonly
|
python
|
def readonly(self):
return self._readonly
|
Gets the readonly of this V1DiskTarget.
ReadOnly. Defaults to false.
:return: The readonly of this V1DiskTarget.
:rtype: bool
|
https://github.com/kubevirt/client-python/blob/c9f9d3bc429f783076982b46b194d5f7669eab1b/kubevirt/models/v1_disk_target.py#L108-L116
|
from pprint import pformat
from six import iteritems
import re
class V1DiskTarget(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'bus': 'str',
'pci_address': 'str',
'readonly': 'bool'
}
attribute_map = {
'bus': 'bus',
'pci_address': 'pciAddress',
'readonly': 'readonly'
}
def __init__(self, bus=None, pci_address=None, readonly=None):
self._bus = None
self._pci_address = None
self._readonly = None
if bus is not None:
self.bus = bus
if pci_address is not None:
self.pci_address = pci_address
if readonly is not None:
self.readonly = readonly
@property
def bus(self):
return self._bus
@bus.setter
def bus(self, bus):
self._bus = bus
@property
def pci_address(self):
return self._pci_address
@pci_address.setter
def pci_address(self, pci_address):
self._pci_address = pci_address
@property
|
Apache License 2.0
|
gregorch/ipet
|
ipet/TestRun.py
|
TestRun.getKeySet
|
python
|
def getKeySet(self):
if self.datadict != {}:
return list(self.datadict.keys())
else:
return set(self.data.columns)
|
Return a list or set of keys (which are the columns headers of the data)
|
https://github.com/gregorch/ipet/blob/e4135ff936d3aa447a960d854f9c51554e5ba7dc/ipet/TestRun.py#L195-L201
|
from ipet import Key
from ipet import misc
from pandas import DataFrame, notnull
import os, sys
import logging
from ipet.Key import CONTEXT_LOGFILE, CONTEXT_METAFILE
from ipet.validation import Validation
logger = logging.getLogger(__name__)
try:
import pickle as pickle
except:
import pickle
class TestRun:
FILE_EXTENSION = ".trn"
def __init__(self, filenames = []):
self.inputfromstdin = False
self.filenames = []
for filename in filenames:
self.appendFilename(filename)
self.data = DataFrame(dtype = object)
self.datadict = {}
self.currentproblemdata = {}
self.currentproblemid = 0
self.metadatadict = {}
self.historic_metadatadict = {}
self.parametervalues = {}
self.defaultparametervalues = {}
self.keyset = set()
self.validation = None
self.currentfileiterator = None
self.currentfile = None
self.consumedStdinput = []
def __iter__(self):
if(self.currentfile != ""):
with open(self.currentfile, "r") as f:
for line in enumerate(f):
yield line
else:
for line in enumerate(self.consumedStdinput):
yield line
for line in enumerate(sys.stdin, len(self.consumedStdinput)):
yield line
def iterationPrepare(self):
filenames = sorted(self.filenames, key = lambda x:misc.sortingKeyContext(misc.filenameGetContext(x)))
self.currentfileiterator = iter(filenames)
def iterationNextFile(self):
try:
self.currentproblemid = 0
self.currentfile = next(self.currentfileiterator)
return True
except StopIteration:
return False
def iterationAddConsumedStdinput(self, consumedlines):
if self.currentfile == "":
for line in consumedlines:
self.consumedStdinput.append(line)
def iterationCleanUp(self):
self.currentfileiterator = None
def iterationGetCurrentFile(self):
return self.currentfile
def setInputFromStdin(self):
self.filenames.append("")
def setValidation(self, validation : Validation):
self.validation = validation
def appendFilename(self, filename):
filename = os.path.abspath(filename)
if filename not in self.filenames:
self.filenames.append(filename)
else:
return
extension = misc.filenameGetContext(filename)
if extension in [Key.CONTEXT_ERRFILE, Key.CONTEXT_LOGFILE]:
metafile = os.path.splitext(filename)[0] + ".meta"
if os.path.isfile(metafile) and (metafile not in self.filenames):
self.filenames.append(metafile)
def addDataByName(self, datakeys, data, problem):
for problemid, name in self.datadict.setdefault(Key.ProblemName, {}).items():
if name == problem:
self.addDataById(datakeys, data, problemid)
def addData(self, datakey, data):
logger.debug("TestRun %s receives data Datakey %s, %s" % (self.getName(), repr(datakey), repr(data)))
if type(datakey) is list and type(data) is list:
for key, datum in zip(datakey, data):
self.currentproblemdata[key] = datum
else:
self.currentproblemdata[datakey] = data
def getCurrentProblemData(self, datakey : str = None):
if datakey is None:
return self.currentproblemdata
else:
return self.currentproblemdata.get(datakey)
def addDataById(self, datakeys, data, problemid):
logger.debug("TestRun %s receives data Datakey %s, %s to problem %s" % (self.getName(), repr(datakeys), repr(data), problemid))
if not self.data.empty:
self.data.loc[problemid, datakeys] = data
else:
if type(datakeys) is list and type(data) is list:
for key, datum in zip(datakeys, data):
self.datadict.setdefault(key, {})[problemid] = datum
else:
self.datadict.setdefault(datakeys, {})[problemid] = data
def addParameterValue(self, paramname, paramval):
self.parametervalues[paramname] = paramval
def addDefaultParameterValue(self, paramname, defaultval):
self.defaultparametervalues[paramname] = defaultval
def getParameterData(self):
return (self.parametervalues, self.defaultparametervalues)
def getLogFile(self, fileextension = ".out"):
for filename in self.filenames:
if filename.endswith(fileextension):
return filename
return None
|
MIT License
|
gdquest/product-packager
|
programs/mavenseed/publish-lessons.py
|
delete_lessons
|
python
|
def delete_lessons(auth_token: str, args: Args) -> None:
def delete_lesson(auth_token: str, lesson_id: int) -> bool:
print(f"Deleting lesson {lesson_id} from course {lesson_id}.")
delete_url: str = f"{args.mavenseed_url}/{API_SLUG_LESSONS}/{lesson_id}"
response: requests.Response = requests.delete(
delete_url, headers={"Authorization": f"Bearer {auth_token}"}
)
if response.status_code != ResponseCodes.OK:
print(
f"Error deleting lesson {lesson_id}.\n"
f"Response status code: {response.status_code}.\n"
f"Response text: {response.text}"
)
return response.status_code == ResponseCodes.OK
cached_data: dict = {}
if CACHE_FILE.exists():
with open(CACHE_FILE) as f:
cached_data = json.load(f)
if not cached_data:
raise RuntimeError("Cache file empty.")
lessons_to_delete: List[int] = [get_lesson_title(f) for f in args.lesson_files]
course_to_update = cached_data[args.course]
course_chapters: dict = course_to_update["chapters"]
for chapter_name in course_chapters:
chapter_data: dict = course_chapters[chapter_name]
for lesson_title in chapter_data["lessons"]:
if lesson_title not in lessons_to_delete:
continue
lesson_data: dict = chapter_data["lessons"][lesson_title]
did_deletion_succeed: bool = delete_lesson(auth_token, lesson_data["id"])
if did_deletion_succeed:
del cached_data[course_to_update.title]["chapters"][chapter_name][
"lessons"
][lesson_title]
print("Saving changes to cache file.")
save_cache(cached_data)
|
Deletes the lessons corresponding to the given course and lesson_files
using the Mavenseed API.
|
https://github.com/gdquest/product-packager/blob/aa8b5da9813400fb8cecb4247d100999881f4693/programs/mavenseed/publish-lessons.py#L705-L749
|
import re
import json
import dotenv
import os
import sys
from enum import Enum
from dataclasses import dataclass
from pathlib import Path
from typing import List, Sequence, Set, Generator, Dict, Tuple
import requests
from datargs import arg, parse
dotenv.load_dotenv()
YOUR_MAVENSEED_URL: str = os.environ.get("MAVENSEED_URL", "")
YOUR_EMAIL: str = os.environ.get("MAVENSEED_EMAIL", "")
YOUR_PASSWORD: str = os.environ.get("MAVENSEED_PASSWORD", "")
API_SLUG_LOGIN: str = "/api/login"
API_SLUG_COURSES: str = "/api/v1/courses"
API_SLUG_CHAPTERS: str = "/api/v1/chapters"
API_SLUG_COURSE_CHAPTERS: str = "/api/v1/course_chapters"
API_SLUG_LESSONS: str = "/api/v1/lessons"
ERROR_NO_VALID_LESSON_FILES: int = 1
ERROR_COURSE_NOT_FOUND: int = 2
ERROR_CACHE_FILE_EMPTY: int = 3
CACHE_FILE: Path = Path(".cache") / "courses.json"
class Status(Enum):
DRAFT: int = 0
PUBLISHED: int = 1
class ResponseCodes(Enum):
OK: int = 200
CREATED: int = 201
NO_CONTENT: int = 204
BAD_REQUEST: int = 400
UNAUTHORIZED: int = 401
FORBIDDEN: int = 403
NOT_FOUND: int = 404
METHOD_NOT_ALLOWED: int = 405
CONFLICT: int = 409
UNPROCESSABLE_ENTITY: int = 422
INTERNAL_SERVER_ERROR: int = 500
NOT_IMPLEMENTED: int = 501
@dataclass
class Args:
course: str = arg(
positional=True,
help="The name or URL slug of the course to upload the lessons to.",
)
lesson_files: Sequence[Path] = arg(
positional=True,
default=None,
help="A sequence of paths to html files to upload to Mavenseed.",
)
overwrite: bool = arg(
default=True,
help="If set, overwrite existing lessons in the course. Otherwise, skip existing lessons.",
aliases=["-o"],
)
mavenseed_url: str = arg(
default=YOUR_MAVENSEED_URL,
help="""the url of your mavenseed website.
if you omit this option, the program tries to read it from the environment variable MAVENSEED_URL.
""",
aliases=["-u"],
)
email: str = arg(
default=YOUR_EMAIL,
help="""Your email to log into your Mavenseed's admin account.
if you omit this option, the program tries to read it from the environment variable MAVENSEED_EMAIL.
""",
aliases=["-e"],
)
password: str = arg(
default=YOUR_PASSWORD,
help="""Your password to log into your Mavenseed's admin account.
if you omit this option, the program tries to read it from the environment variable MAVENSEED_PASSWORD.
""",
aliases=["-p"],
)
list_courses: bool = arg(
default=False,
help="If true, list all courses on the Mavenseed website and exit.",
)
refresh_cache: bool = arg(
default=False,
aliases=["-r"],
help="If True, resets the cache file before running the program."
" This deletes thecache file and forces the program to re-query the Mavenseed API.",
)
delete: bool = arg(
default=False,
aliases=["-d"],
help="If True, deletes the lessons instead of uploading them.",
)
verbose: bool = arg(
default=False,
aliases=["-v"],
help="If True, prints more information about the process.",
)
@dataclass
class Course:
id: int
title: str
slug: str
status: str
created_at: str
updated_at: str
scheduled_at: str
published_at: str
excerpt: str
free: bool
questions_enabled: bool
signin_required: bool
view_count: int
metadata: dict
banner_data: object
def to_dict(self):
return {
"id": self.id,
"title": self.title,
"slug": self.slug,
"status": self.status,
"created_at": self.created_at,
"updated_at": self.updated_at,
"scheduled_at": self.scheduled_at,
"published_at": self.published_at,
"excerpt": self.excerpt,
"free": self.free,
"questions_enabled": self.questions_enabled,
"signin_required": self.signin_required,
"view_count": self.view_count,
"metadata": self.metadata,
"banner_data": self.banner_data,
}
@dataclass
class Chapter:
id: int
course_id: int
title: str
content: str
created_at: str
updated_at: str
ordinal: int
def to_dict(self):
return {
"id": self.id,
"course_id": self.course_id,
"title": self.title,
"content": self.content,
"created_at": self.created_at,
"updated_at": self.updated_at,
"ordinal": self.ordinal,
}
@dataclass
class NewChapter:
title: str
course_id: int
ordinal: int
@dataclass
class NewLesson:
title: str
filepath: Path
ordinal: int
chapter_id: int = -1
def get_file_content(self) -> str:
with open(self.filepath, "r") as f:
return f.read()
def get_title(self, content: str) -> str:
title = re.search(r"<title>(.*?)</title>", content)
if title:
return title.group(1)
else:
return self.slug.replace("-", " ").title()
@dataclass
class Lesson:
id: int
lessonable_type: str
lessonable_id: int
title: str
slug: str
content: str
status: str
created_at: str
updated_at: str
ordinal: int
exercise_votes_threshold: int
exercise_type: int
free: bool
media_type: str
signin_required: bool
metadata: dict
embed_data: object
def to_dict(self):
return {
"id": self.id,
"lessonable_type": self.lessonable_type,
"lessonable_id": self.lessonable_id,
"title": self.title,
"slug": self.slug,
"status": self.status,
"created_at": self.created_at,
"updated_at": self.updated_at,
"ordinal": self.ordinal,
"exercise_votes_threshold": self.exercise_votes_threshold,
"exercise_type": self.exercise_type,
"free": self.free,
"media_type": self.media_type,
"signin_required": self.signin_required,
"metadata": self.metadata,
"embed_data": self.embed_data,
}
def get_lesson_title(filepath: Path) -> str:
lesson_title: str = ""
with open(filepath) as f:
while True:
line = f.readline()
if re.search(r"<title>", line):
lesson_title = re.search(r"<title>(.*)</title>", line).group(1)
break
if re.search(r"<h1.*?>", line):
lesson_title = re.search(r"<h1.*?>(.*)</h1>", line).group(1)
break
if not line:
break
assert lesson_title != "", (
f"Unable to find the <title>or <h1> HTML tag in file {filepath}.\n"
"This is required for the program to work."
)
return lesson_title.strip("'").replace("'", "")
def cache_all_courses(url: str, auth_token: str) -> dict:
output = dict()
def get_all_courses(api_url: str, auth_token: str) -> List[Course]:
response = requests.get(
api_url + API_SLUG_COURSES,
headers={"Authorization": "Bearer " + auth_token},
)
courses: List[Course] = [Course(**course) for course in response.json()]
return courses
def get_all_chapters_in_course(
api_url: str, auth_token: str, course_id: int
) -> List[Chapter]:
print(f"Getting chapters for course {course_id}.", end="\r")
response = requests.get(
f"{api_url}/{API_SLUG_COURSE_CHAPTERS}/{course_id}",
headers={"Authorization": "Bearer " + auth_token},
)
chapters: List[Chapter] = [Chapter(**data) for data in response.json()]
return chapters
def get_all_lessons(
api_url: str, auth_token: str, max_pages: int = -1
) -> Generator:
page: int = 0
while True:
print(f"Getting lessons {page * 20} to {(page + 1) * 20}", end="\r")
response = requests.get(
f"{api_url}/{API_SLUG_LESSONS}",
headers={"Authorization": "Bearer " + auth_token},
params={"page": page},
)
lessons: List[Lesson] = [Lesson(**data) for data in response.json()]
page += 1
if max_pages != -1 and page >= max_pages:
break
if not lessons:
break
yield lessons
print("Downloading all lessons, chapters, and course data. This may take a while.")
lessons_lists: List[List[Lesson]] = list(get_all_lessons(url, auth_token))
lessons: List[Lesson] = [
lesson for lesson_list in lessons_lists for lesson in lesson_list
]
courses: List[Course] = get_all_courses(url, auth_token)
for course in courses:
output[course.title] = {}
output[course.title]["data"] = course.to_dict()
output[course.title]["chapters"] = {}
chapters: Dict[str, Chapter] = {
chapter.title: chapter
for chapter in get_all_chapters_in_course(url, auth_token, course.id)
}
for chapter_title in chapters:
chapter: Chapter = chapters[chapter_title]
lessons_in_chapter_as_dict = [
lesson.to_dict()
for lesson in lessons
if lesson.lessonable_id == chapter.id
]
chapter_as_dict = chapter.to_dict()
chapter_as_dict["lessons"] = {
lesson["slug"]: lesson for lesson in lessons_in_chapter_as_dict
}
output[course.title]["chapters"][chapter_title] = chapter_as_dict
return output
def save_cache(cache: dict) -> None:
if not CACHE_FILE.parent.exists():
print("Creating .cache/ directory.")
CACHE_FILE.parent.mkdir()
print(f"Writing the data of {len(cache)} courses to {CACHE_FILE.as_posix()}.")
json.dump(cache, open(CACHE_FILE, "w"), indent=2)
def does_chapter_exist_in_course(
auth_token: str, api_url: str, chapter_id: int, course_id: int
) -> bool:
response = requests.get(
f"{api_url}/{API_SLUG_CHAPTERS}/{chapter_id}",
headers={"Authorization": "Bearer " + auth_token},
)
if response.status_code == ResponseCodes.OK.value:
data: dict = response.json()
return (
data is not None
and data["id"] == chapter_id
and data["course_id"] == course_id
)
return False
def create_lesson(auth_token: str, api_url: str, new_lesson: NewLesson) -> dict:
content: str = new_lesson.get_file_content()
print(f"Creating lesson {new_lesson.title}.")
response = requests.post(
f"{api_url}/{API_SLUG_LESSONS}",
headers={"Authorization": "Bearer " + auth_token},
json={
"lessonable_id": new_lesson.chapter_id,
"lessonable_type": "Chapter",
"title": new_lesson.title,
"content": content,
"ordinal": new_lesson.ordinal,
"status": Status.PUBLISHED.value,
},
)
if response.status_code != ResponseCodes.CREATED.value:
raise Exception(
f"Failed to create lesson {new_lesson.title}. Status code: {response.status_code}."
)
return response.json()
def update_lesson(auth_token: str, api_url: str, lesson: Lesson) -> dict:
print(f"Updating lesson {lesson.title}.")
response = requests.patch(
f"{api_url}/{API_SLUG_LESSONS}/{lesson.id}",
headers={"Authorization": "Bearer " + auth_token},
json={
"title": lesson.title,
"content": lesson.content,
"status": Status.PUBLISHED.value,
},
)
if response.status_code != ResponseCodes.OK.value:
raise Exception(
f"Failed to update lesson {lesson.title}. Status code: {response.status_code}."
)
return response.json()
def create_chapter(auth_token: str, api_url: str, new_chapter: NewChapter) -> dict:
print(f"Creating chapter {new_chapter.title}.")
response = requests.post(
f"{api_url}/{API_SLUG_CHAPTERS}",
headers={"Authorization": "Bearer " + auth_token},
json={
"course_id": new_chapter.course_id,
"title": new_chapter.title,
"ordinal": new_chapter.ordinal,
},
)
if response.status_code != 201:
raise Exception(
f"Failed to create chapter {new_chapter.title}. Status code: {response.status_code}."
)
return response.json()
def list_all_courses(auth_token: str, api_url: str) -> None:
print("Listing all courses.\n")
response = requests.get(
f"{api_url}/{API_SLUG_COURSES}",
headers={"Authorization": "Bearer " + auth_token},
)
if response.status_code != 200:
raise Exception(
f"Failed to list all courses. Status code: {response.status_code}."
)
data = response.json()
print(f"Found {len(data)} courses:\n")
for course in data:
print(f"- {course['title']} (id: {course['id']})")
def create_and_update_course(auth_token: str, args: Args) -> None:
def validate_files(args: Args) -> List[Path]:
def validate_lesson_files(files: Sequence[Path]) -> List[Path]:
def is_valid_file(filepath: Path) -> bool:
if not filepath.exists() and filepath.suffix.lower() == ".html":
return False
with filepath.open() as f:
content = f.read()
return (
re.search(r"<title>.*</title>", content) is not None
or re.search(r"<h1.*?>.*</h1>", content) is not None
)
return [filepath for filepath in files if is_valid_file(filepath)]
valid_files: List[Path] = validate_lesson_files(args.lesson_files)
if len(valid_files) != len(args.lesson_files):
invalid_files: Set[Path] = {
filepath
for filepath in args.lesson_files
if filepath not in valid_files
}
for filepath in invalid_files:
print(f"{filepath} is not a valid lesson file. It won't be uploaded.")
if len(valid_files) == 0:
print(
"No valid lesson files found to upload in the provided list. Exiting."
)
sys.exit(ERROR_NO_VALID_LESSON_FILES)
return valid_files
def validate_and_update_cache(args: Args, auth_token: str) -> dict:
cached_data: dict = {}
if args.refresh_cache:
print("Refreshing cache.")
if CACHE_FILE.exists():
CACHE_FILE.unlink()
if CACHE_FILE.exists():
with open(CACHE_FILE) as f:
cached_data = json.load(f)
else:
print("Downloading and caching all data from Mavenseed.")
cached_data = cache_all_courses(args.mavenseed_url, auth_token)
save_cache(cached_data)
if not cached_data:
print("Cache file is empty. Exiting.")
sys.exit(ERROR_CACHE_FILE_EMPTY)
return cached_data
def find_course_to_update(
args: Args, auth_token: str, cached_data: dict
) -> Tuple[dict, Course]:
course_to_update: Course = None
while not course_to_update:
course_to_update = next(
(
Course(**cached_data[course_title]["data"])
for course_title in cached_data.keys()
if course_title == args.course
),
None,
)
if not course_to_update:
print(
f"No cached course data found for the course named '{args.course}'. "
f"Do you want to update the course data? [y/N]"
)
if input().lower() != "y":
print("Course missing. Exiting.")
sys.exit(ERROR_COURSE_NOT_FOUND)
else:
print("Updating course data.")
cached_data = cache_all_courses(args.mavenseed_url, auth_token)
save_cache(cached_data)
return cached_data, course_to_update
def get_lessons_in_course(course_chapters: dict) -> List[Lesson]:
lessons_in_course: List[Lesson] = []
for chapter_name in course_chapters:
chapter_data: dict = course_chapters[chapter_name]
for lesson_title in chapter_data["lessons"]:
lesson_data: dict = chapter_data["lessons"][lesson_title]
lessons_in_course.append(Lesson(**lesson_data, content=""))
return lessons_in_course
def map_lessons_to_chapters(valid_files: List[Path]) -> dict:
lessons_map: dict = {}
for filepath in valid_files:
chapter_name: str = filepath.parent.name
chapter_name = re.sub(r"[\-_]", " ", chapter_name)
chapter_name = re.sub(r"\d+\.", "", chapter_name)
chapter_name = chapter_name.capitalize()
if not lessons_map.get(chapter_name):
lessons_map[chapter_name] = []
lesson_title: str = get_lesson_title(filepath)
lessons_map[chapter_name].append((lesson_title, filepath))
return lessons_map
valid_files: List[Path] = validate_files(args)
cached_data = validate_and_update_cache(args, auth_token)
cached_data, course_to_update = find_course_to_update(args, auth_token, cached_data)
course_chapters: dict = cached_data[course_to_update.title]["chapters"]
lessons_in_course: List[Lesson] = get_lessons_in_course(course_chapters)
lessons_map: dict = map_lessons_to_chapters(valid_files)
chapter_loopindex: int = 1
for chapter_name in lessons_map:
chapters_filter: filter = filter(
lambda k: k == chapter_name, course_chapters.keys()
)
matching_chapter_title: str = next(chapters_filter, "")
matching_chapter: dict = (
course_chapters[matching_chapter_title] if matching_chapter_title else {}
)
chapter_id: int = matching_chapter["id"] if matching_chapter else -1
if not matching_chapter:
new_chapter: NewChapter = NewChapter(
title=chapter_name,
course_id=course_to_update.id,
ordinal=chapter_loopindex,
)
new_chapter_data: dict = create_chapter(
auth_token, args.mavenseed_url, new_chapter
)
new_chapter_data["lessons"] = {}
cached_data[course_to_update.title]["chapters"][
chapter_name
] = new_chapter_data
chapter_id = new_chapter_data["id"]
chapter_loopindex += 1
lesson_loopindex: int = 1
for lesson_title, filepath in lessons_map[chapter_name]:
lessons_filter: filter = filter(
lambda l: l.title == lesson_title, lessons_in_course
)
lesson: Lesson = next(lessons_filter, None)
if not lesson:
if not does_chapter_exist_in_course(
auth_token, args.mavenseed_url, chapter_id, course_to_update.id
):
print(
f"Chapter {chapter_id} not found in course {course_to_update.title}.\n"
f"Skipping lesson {lesson_title}."
)
continue
new_lesson: NewLesson = NewLesson(
title=lesson_title,
filepath=filepath,
ordinal=lesson_loopindex,
chapter_id=chapter_id,
)
new_lesson_data: dict = create_lesson(
auth_token, args.mavenseed_url, new_lesson
)
del new_lesson_data["content"]
cached_data[course_to_update.title]["chapters"][chapter_name][
"lessons"
][new_lesson_data["title"]] = new_lesson_data
else:
content: str = ""
with open(filepath) as f:
content = f.read()
lesson.content = content
assert (
content != ""
), f"Empty lesson content found for lesson {lesson.title}."
update_lesson(auth_token, args.mavenseed_url, lesson)
lesson_loopindex += 1
print("Saving changes to cache file.")
save_cache(cached_data)
|
MIT License
|
treigerm/waternet
|
waterNet/geo_util.py
|
visualise_results
|
python
|
def visualise_results(results, tile_size, out_path, out_format="GeoTIFF"):
get_predictions = lambda (tiles, pos, path): (tiles[0], pos, path)
get_labels = lambda (tiles, pos, path): (tiles[1], pos, path)
get_false_positives = lambda (tiles, pos, path): (tiles[2], pos, path)
get_path = lambda (tiles,pos , path): path
sorted_by_path = sorted(results, key=get_path)
for path, result_tiles in itertools.groupby(sorted_by_path, get_path):
raster_dataset = rasterio.open(path)
dataset_shape = (raster_dataset.shape[0], raster_dataset.shape[1])
result_tiles = list(result_tiles)
predictions = map(get_predictions, result_tiles)
labels = map(get_labels, result_tiles)
false_positives = map(get_false_positives, result_tiles)
satellite_img_name = get_file_name(path)
file_extension = "tif" if out_format == "GeoTIFF" else "shp"
out_file_name = "{}_results.{}".format(satellite_img_name, file_extension)
out = os.path.join(out_path, out_file_name)
if out_format == "GeoTIFF":
for tiles, color in [(labels, 'blue'), (predictions, 'green'), (false_positives, 'red')]:
bitmap = image_from_tiles(tiles, tile_size, dataset_shape)
raster_dataset = overlay_bitmap(bitmap, raster_dataset, out, color=color)
elif out_format == "Shapefile":
bitmap = image_from_tiles(predictions, tile_size, dataset_shape)
create_shapefile(bitmap, raster_dataset, out)
|
Given the predictions, false positves and the labels of our model visualise them on the satellite
image they belong to.
|
https://github.com/treigerm/waternet/blob/5f30e796b03519b1d79be2ac1f148b873bf9e877/waterNet/geo_util.py#L144-L179
|
import rasterio
import rasterio.warp
import fiona
import os
import itertools
import numpy as np
from io_util import get_file_name
from config import WGS84_DIR
def reproject_dataset(geotiff_path):
dst_crs = 'EPSG:4326'
with rasterio.open(geotiff_path) as src:
transform, width, height = rasterio.warp.calculate_default_transform(
src.crs, dst_crs, src.width, src.height, *src.bounds)
kwargs = src.meta.copy()
kwargs.update({
'crs': dst_crs,
'transform': transform,
'width': width,
'height': height
})
satellite_img_name = get_file_name(geotiff_path)
out_file_name = "{}_wgs84.tif".format(satellite_img_name)
out_path = os.path.join(WGS84_DIR, out_file_name)
with rasterio.open(out_path, 'w', **kwargs) as dst:
for i in range(1, src.count + 1):
rasterio.warp.reproject(
source=rasterio.band(src, i),
destination=rasterio.band(dst, i),
src_transform=src.transform,
src_crs=src.crs,
dst_transform=transform,
dst_crs=dst_crs,
resampling=rasterio.warp.Resampling.nearest)
return rasterio.open(out_path), out_path
def create_tiles(bands_data, tile_size, path_to_geotiff):
rows, cols = bands_data.shape[0], bands_data.shape[1]
all_tiled_data = []
tile_indexes = itertools.product(
range(0, rows, tile_size), range(0, cols, tile_size))
for (row, col) in tile_indexes:
in_bounds = row + tile_size < rows and col + tile_size < cols
if in_bounds:
new_tile = bands_data[row:row + tile_size, col:col + tile_size]
all_tiled_data.append((new_tile, (row, col), path_to_geotiff))
return all_tiled_data
def image_from_tiles(tiles, tile_size, image_shape):
image = np.zeros(image_shape, dtype=np.uint8)
for tile, (row, col), _ in tiles:
tile = np.reshape(tile, (tile_size, tile_size))
image[row:row + tile_size, col:col + tile_size] = tile
return image
def overlay_bitmap(bitmap, raster_dataset, out_path, color='blue'):
colors = {
"red": (255, 0, 0),
"green": (0, 255, 0),
"blue": (0, 0, 255)
}
red, green, blue = raster_dataset.read()
red[bitmap == 1] = colors[color][0]
green[bitmap == 1] = colors[color][1]
blue[bitmap == 1] = colors[color][2]
profile = raster_dataset.profile
with rasterio.open(out_path, 'w', **profile) as dst:
dst.write(red, 1)
dst.write(green, 2)
dst.write(blue, 3)
return rasterio.open(out_path)
def create_shapefile(bitmap, raster_dataset, out_path):
shapes = rasterio.features.shapes(bitmap, transform=raster_dataset.transform)
records = map(lambda (geom, _): {"geometry": geom, "properties": {}}, shapes)
schema = {
"geometry": "Polygon",
"properties": {}
}
with fiona.open(out_path, 'w', driver="ESRI Shapefile", crs=raster_dataset.crs, schema=schema) as f:
f.writerecords(records)
def visualise_labels(labels, tile_size, out_path):
get_path = lambda (tiles, pos, path): path
sorted_by_path = sorted(labels, key=get_path)
for path, predictions in itertools.groupby(sorted_by_path, get_path):
raster_dataset = rasterio.open(path)
bitmap_shape = (raster_dataset.shape[0], raster_dataset.shape[1])
bitmap = image_from_tiles(predictions, tile_size, bitmap_shape)
satellite_img_name = get_file_name(path)
out_file_name = "{}.tif".format(satellite_img_name)
out = os.path.join(out_path, out_file_name)
overlay_bitmap(bitmap, raster_dataset, out)
|
MIT License
|
make-all/tuya-local
|
custom_components/tuya_local/dehumidifier/climate.py
|
GoldairDehumidifier.icon
|
python
|
def icon(self):
if self.tank_full_or_missing:
return "mdi:cup-water"
elif self.defrosting:
return "mdi:snowflake-melt"
elif (
self.hvac_mode is not HVAC_MODE_OFF
and self.preset_mode is PRESET_DRY_CLOTHES
):
return "mdi:tshirt-crew-outline"
elif (
self.hvac_mode is not HVAC_MODE_OFF and self.preset_mode is PRESET_AIR_CLEAN
):
return "mdi:air-purifier"
else:
return "mdi:air-humidifier"
|
Return the icon to use in the frontend based on the device state.
|
https://github.com/make-all/tuya-local/blob/636d0cd4cb2432676d862d290d2f6deea7328437/custom_components/tuya_local/dehumidifier/climate.py#L83-L99
|
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
ATTR_FAN_MODE,
ATTR_HUMIDITY,
ATTR_HVAC_MODE,
ATTR_PRESET_MODE,
FAN_HIGH,
FAN_LOW,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_HUMIDITY,
)
from homeassistant.const import ATTR_TEMPERATURE, STATE_UNAVAILABLE
from ..device import TuyaLocalDevice
from .const import (
ATTR_AIR_CLEAN_ON,
ATTR_DEFROSTING,
ATTR_ERROR,
ATTR_ERROR_CODE,
ATTR_TARGET_HUMIDITY,
ERROR_CODE_TO_DPS_CODE,
ERROR_TANK,
FAN_MODE_TO_DPS_MODE,
HVAC_MODE_TO_DPS_MODE,
PRESET_AIR_CLEAN,
PRESET_DRY_CLOTHES,
PRESET_HIGH,
PRESET_LOW,
PRESET_MODE_TO_DPS_MODE,
PRESET_NORMAL,
PROPERTY_TO_DPS_ID,
)
SUPPORT_FLAGS = SUPPORT_TARGET_HUMIDITY | SUPPORT_PRESET_MODE | SUPPORT_FAN_MODE
class GoldairDehumidifier(ClimateEntity):
def __init__(self, device):
self._device = device
self._support_flags = SUPPORT_FLAGS
self._HUMIDITY_STEP = 5
self._HUMIDITY_LIMITS = {"min": 30, "max": 80}
@property
def supported_features(self):
return self._support_flags
@property
def should_poll(self):
return True
@property
def name(self):
return self._device.name
@property
def unique_id(self):
return self._device.unique_id
@property
def device_info(self):
return self._device.device_info
@property
|
MIT License
|
google/gazoo-device
|
gazoo_device/base_classes/auxiliary_device_base.py
|
AuxiliaryDeviceBase.get_property_names
|
python
|
def get_property_names(self):
|
Returns a list of all property names.
|
https://github.com/google/gazoo-device/blob/f333b386f5993c8d4c9e12c89ebb620a0c4f5506/gazoo_device/base_classes/auxiliary_device_base.py#L127-L128
|
import abc
from typing import Optional
from gazoo_device import console_config
class AuxiliaryDeviceBase(abc.ABC):
COMMUNICATION_TYPE = None
DETECT_MATCH_CRITERIA = None
DEVICE_TYPE = None
_COMMUNICATION_KWARGS = {}
_OWNER_EMAIL = ""
_RECOVERY_ATTEMPTS = 1
@abc.abstractproperty
def alias(self):
@abc.abstractproperty
def commands(self):
@abc.abstractproperty
def communication_address(self):
@abc.abstractproperty
def connected(self):
@abc.abstractmethod
def get_console_configuration(
self) -> Optional[console_config.ConsoleConfiguration]:
@abc.abstractproperty
def health_checks(self):
@abc.abstractproperty
def model(self):
@abc.abstractproperty
def name(self):
@abc.abstractproperty
def regexes(self):
@abc.abstractproperty
def serial_number(self):
@abc.abstractproperty
def timeouts(self):
@abc.abstractmethod
def check_device_ready(self):
@abc.abstractmethod
def close(self, force: bool = False):
@abc.abstractmethod
def get_detection_info(self):
@abc.abstractmethod
def get_dynamic_properties(self):
@abc.abstractmethod
def get_optional_properties(self):
@abc.abstractmethod
def get_persistent_properties(self):
@abc.abstractmethod
|
Apache License 2.0
|
hypothesis/h
|
h/services/search_index/service.py
|
SearchIndexService.add_annotation
|
python
|
def add_annotation(self, annotation):
if annotation.deleted:
return
body = AnnotationSearchIndexPresenter(annotation, self._request).asdict()
self._index_annotation_body(annotation.id, body, refresh=False)
|
Add an annotation into the search index.
A new annotation document will be created in the search index or,
if the index already contains an annotation document with the same Id
as the given annotation then it will be updated.
:param annotation: Annotation object to index
|
https://github.com/hypothesis/h/blob/1bf1fe34fd471f26a216e682d15ce986dd400fdb/h/services/search_index/service.py#L53-L68
|
from h_pyramid_sentry import report_exception
from h import storage
from h.presenters import AnnotationSearchIndexPresenter
from h.tasks import indexer
class SearchIndexService:
REINDEX_SETTING_KEY = "reindex.new_index"
def __init__(
self, request, es_client, session, settings, queue
):
self._request = request
self._es = es_client
self._db = session
self._settings = settings
self._queue = queue
def add_annotation_by_id(self, annotation_id):
annotation = storage.fetch_annotation(self._db, annotation_id)
if not annotation or annotation.deleted:
return
self.add_annotation(annotation)
if annotation.is_reply:
self.add_annotation_by_id(annotation.thread_root_id)
|
BSD 2-Clause Simplified License
|
mne-tools/mne-nirs
|
mne_nirs/channels/_short.py
|
get_short_channels
|
python
|
def get_short_channels(raw, max_dist=0.01):
short_chans = raw.copy().load_data()
_validate_type(short_chans, BaseRaw, 'raw')
picks = mne.pick_types(short_chans.info, meg=False, eeg=False, fnirs=True,
exclude=[])
if not len(picks):
raise RuntimeError('Short channel extraction for NIRS signals only.')
dists = source_detector_distances(short_chans.info, picks=picks)
short_chans.pick(picks[dists < max_dist])
return short_chans
|
Return channels with a short source-detector separation.
Parameters
----------
raw : instance of Raw
Raw instance containing fNIRS data.
max_dist : number
Maximum distance of returned channels (m).
Returns
-------
raw : instance of Raw
Raw instance with only short channels.
|
https://github.com/mne-tools/mne-nirs/blob/09e17454fc78e1d386c872399ad7813e7a80ddf2/mne_nirs/channels/_short.py#L11-L39
|
from mne.preprocessing.nirs import source_detector_distances
from mne.utils import _validate_type
from mne.io import BaseRaw
import mne
|
BSD 3-Clause New or Revised License
|
softlayer/softlayer-python
|
SoftLayer/managers/network.py
|
NetworkManager.assign_global_ip
|
python
|
def assign_global_ip(self, global_ip_id, target):
return self.client['Network_Subnet_IpAddress_Global'].route(
target, id=global_ip_id)
|
Assigns a global IP address to a specified target.
:param int global_ip_id: The ID of the global IP being assigned
:param string target: The IP address to assign
|
https://github.com/softlayer/softlayer-python/blob/98feac7db01b50eddeeb45769182ab978ebeefc3/SoftLayer/managers/network.py#L219-L226
|
import collections
import json
import logging
from SoftLayer.decoration import retry
from SoftLayer import exceptions
from SoftLayer import utils
from SoftLayer.managers import event_log
LOGGER = logging.getLogger(__name__)
DEFAULT_SUBNET_MASK = ','.join(['hardware',
'datacenter',
'networkVlanId',
'ipAddressCount',
'virtualGuests',
'id',
'networkIdentifier',
'cidr',
'subnetType',
'gateway',
'broadcastAddress',
'usableIpAddressCount',
'note',
'tagReferences[tag]',
'networkVlan[id,networkSpace]'])
DEFAULT_VLAN_MASK = ','.join([
'firewallInterfaces',
'hardwareCount',
'primaryRouter[id, fullyQualifiedDomainName, datacenter]',
'subnetCount',
'totalPrimaryIpAddressCount',
'virtualGuestCount',
'networkSpace',
'networkVlanFirewall[id,fullyQualifiedDomainName,primaryIpAddress]',
'attachedNetworkGateway[id,name,networkFirewall]',
])
DEFAULT_GET_VLAN_MASK = ','.join([
'firewallInterfaces',
'primaryRouter[id, fullyQualifiedDomainName, datacenter]',
'totalPrimaryIpAddressCount',
'networkSpace',
'billingItem',
'hardware',
'subnets',
'virtualGuests',
'networkVlanFirewall[id,fullyQualifiedDomainName,primaryIpAddress]',
'attachedNetworkGateway[id,name,networkFirewall]',
])
class NetworkManager(object):
def __init__(self, client):
self.client = client
self.account = client['Account']
self.vlan = client['Network_Vlan']
self.subnet = client['Network_Subnet']
self.network_storage = self.client['Network_Storage']
self.security_group = self.client['Network_SecurityGroup']
def add_global_ip(self, version=4, test_order=False):
return self.add_subnet('global', version=version,
test_order=test_order)
def add_securitygroup_rule(self, group_id, remote_ip=None,
remote_group=None, direction=None,
ethertype=None, port_max=None,
port_min=None, protocol=None):
rule = {'direction': direction}
if ethertype is not None:
rule['ethertype'] = ethertype
if port_max is not None:
rule['portRangeMax'] = port_max
if port_min is not None:
rule['portRangeMin'] = port_min
if protocol is not None:
rule['protocol'] = protocol
if remote_ip is not None:
rule['remoteIp'] = remote_ip
if remote_group is not None:
rule['remoteGroupId'] = remote_group
return self.add_securitygroup_rules(group_id, [rule])
def add_securitygroup_rules(self, group_id, rules):
if not isinstance(rules, list):
raise TypeError("The rules provided must be a list of dictionaries")
return self.security_group.addRules(rules, id=group_id)
def add_subnet(self, subnet_type, quantity=None, endpoint_id=None, version=4,
test_order=False):
package = self.client['Product_Package']
category = 'sov_sec_ip_addresses_priv'
desc = ''
if version == 4:
if subnet_type == 'global':
quantity = 0
category = "global_ipv4"
elif subnet_type == 'public':
category = "sov_sec_ip_addresses_pub"
elif subnet_type == 'static':
category = "static_sec_ip_addresses"
else:
category = 'static_ipv6_addresses'
if subnet_type == 'global':
quantity = 0
category = 'global_ipv6'
desc = 'Global'
elif subnet_type == 'public':
desc = 'Portable'
elif subnet_type == 'static':
desc = 'Static'
price_id = None
quantity_str = str(quantity)
package_items = package.getItems(id=0, mask='mask[prices[packageReferences[package[keyName]]]]')
for item in package_items:
category_code = utils.lookup(item, 'itemCategory', 'categoryCode')
if all([category_code == category,
item.get('capacity') == quantity_str,
version == 4 or (version == 6 and
desc in item['description'])]):
price_id = self.get_subnet_item_price(item, subnet_type, version)
break
order = {
'packageId': 0,
'prices': [{'id': price_id}],
'quantity': 1,
'complexType': 'SoftLayer_Container_Product_Order_Network_Subnet',
}
if subnet_type == 'static':
order['endPointIpAddressId'] = endpoint_id
elif subnet_type != 'global' and subnet_type != 'static':
order['endPointVlanId'] = endpoint_id
if test_order:
return self.client['Product_Order'].verifyOrder(order)
else:
return self.client['Product_Order'].placeOrder(order)
@staticmethod
def get_subnet_item_price(item, subnet_type, version):
price_id = None
if version == 4 and subnet_type == 'static':
for item_price in item['prices']:
for package_reference in item_price['packageReferences']:
if subnet_type.upper() in package_reference['package']['keyName']:
price_id = item_price['id']
else:
price_id = item['prices'][0]['id']
return price_id
|
MIT License
|
amzn/mxfusion
|
mxfusion/components/variables/var_trans.py
|
Softplus.transform
|
python
|
def transform(self, var, F=None, dtype=None):
F = get_default_MXNet_mode() if F is None else F
return F.Activation(var, act_type='softrelu') + self._offset
|
Forward transformation.
:param var: Variable to be transformed.
:type var: mx.ndarray or mx.sym
:param F: Mode to run MxNet in.
:type F: mxnet.ndarray or mxnet.symbol
:param dtype: data type.
:type dtype: e.g. np.float32
|
https://github.com/amzn/mxfusion/blob/af6223e9636b055d029d136dd7ae023b210b4560/mxfusion/components/variables/var_trans.py#L63-L75
|
from abc import ABC, abstractmethod
import numpy as np
from ...common.config import get_default_MXNet_mode
class VariableTransformation(ABC):
@abstractmethod
def transform(self, var, F=None, dtype=None):
pass
@abstractmethod
def inverseTransform(self, out_var, F=None, dtype=None):
pass
class Softplus(VariableTransformation):
def __init__(self, offset):
self._offset = offset
|
Apache License 2.0
|
timoschick/self-debiasing
|
generation.py
|
SelfDebiasingGPT2LMHeadModel.sample
|
python
|
def sample(self, input_ids: torch.LongTensor, logits_processor: Optional[LogitsProcessorList] = None,
logits_warper: Optional[LogitsProcessorList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, **model_kwargs) -> Union[
SampleOutput, torch.LongTensor]:
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList()
max_length = max_length if max_length is not None else self.config.max_length
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
output_scores = output_scores if output_scores is not None else self.config.output_scores
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate
)
scores = () if (return_dict_in_generate and output_scores) else None
decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
if return_dict_in_generate and self.config.is_encoder_decoder:
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
sequence_lengths, unfinished_sequences, cur_len = self._init_sequence_length_for_generation(
input_ids, max_length
)
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
outputs = self(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
next_token_logits = outputs.logits[:, -1, :]
next_token_scores = logits_processor(input_ids, next_token_logits)
next_token_scores = logits_warper(input_ids, next_token_scores)
if return_dict_in_generate:
if output_scores:
scores += (next_token_scores,)
if output_attentions:
decoder_attentions += (
(outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
)
if output_hidden_states:
decoder_hidden_states += (
(outputs.decoder_hidden_states,)
if self.config.is_encoder_decoder
else (outputs.hidden_states,)
)
probs = F.softmax(next_token_scores, dim=-1)
next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
if self.logits_processor is not None:
batch_size = next_tokens.shape[0] // (1 + self.logits_processor.num_debiasing_prefixes)
regular_sentence_indices = range(batch_size)
for regular_sentence_idx in regular_sentence_indices:
debiasing_sentence_indices = self.logits_processor._get_bias_indices(regular_sentence_idx, batch_size)
for debiasing_sentence_idx in debiasing_sentence_indices:
next_tokens[debiasing_sentence_idx] = next_tokens[regular_sentence_idx]
if eos_token_id is not None:
assert pad_token_id is not None, "If eos_token_id is defined, make sure that pad_token_id is defined."
next_tokens = next_tokens * unfinished_sequences + (pad_token_id) * (1 - unfinished_sequences)
input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
cur_len = cur_len + 1
if eos_token_id is not None:
sequence_lengths, unfinished_sequences = self._update_seq_length_for_generation(
sequence_lengths, unfinished_sequences, cur_len, next_tokens == eos_token_id
)
if unfinished_sequences.max() == 0:
break
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
if return_dict_in_generate:
if self.config.is_encoder_decoder:
return SampleEncoderDecoderOutput(
sequences=input_ids,
scores=scores,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return SampleDecoderOnlyOutput(
sequences=input_ids,
scores=scores,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return input_ids
|
This is a verbatim copy of the original implementation by huggingface, with a single modification to ensure that a text and all
corresponding self-debiasing inputs always chose the same token to generate next. This modification is enclosed by the texts
"BEGIN MODIFICATIONS" and "END MODIFICATIONS", respectively.
|
https://github.com/timoschick/self-debiasing/blob/762f1811cc361014e92f0b6f7ffdd7f3e8d3200d/generation.py#L116-L252
|
from typing import List, Optional, Union, Tuple
import torch
import torch.nn.functional as F
from transformers import GPT2LMHeadModel, LogitsProcessorList, LogitsProcessor, PreTrainedTokenizer
from transformers.generation_utils import GenerationMixin, SampleOutput, SampleEncoderDecoderOutput, SampleDecoderOnlyOutput
class SelfDebiasingLogitsProcessor(LogitsProcessor):
def __init__(self, num_debiasing_prefixes: int, decay_constant: float = 50, epsilon: float = 0.01, debug: bool = False,
tokenizer: Optional[PreTrainedTokenizer] = None):
assert not debug or tokenizer, "If debug=True, a tokenizer must be passed to SelfDebiasingLogitsProcessor()"
self.num_debiasing_prefixes = num_debiasing_prefixes
self.decay_constant = decay_constant
self.epsilon = epsilon
self.debug = debug
self.tokenizer = tokenizer
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
batch_size = scores.shape[0] // (1 + self.num_debiasing_prefixes)
regular_sentence_indices = range(batch_size)
for regular_sentence_idx in regular_sentence_indices:
bias_indices = self._get_bias_indices(regular_sentence_idx, batch_size)
if bias_indices:
self._debias_scores(scores, regular_sentence_idx, bias_indices)
return scores
def _get_bias_indices(self, regular_sentence_idx: int, batch_size: int) -> List[int]:
return [regular_sentence_idx + (prefix_idx + 1) * batch_size for prefix_idx in range(self.num_debiasing_prefixes)]
def _debias_scores(self, scores: torch.FloatTensor, regular_sent_idx: int, bias_indices: List[int]) -> None:
logits_biased = [scores[bias_idx] for bias_idx in bias_indices]
mask = self._generate_decay_mask(scores[regular_sent_idx], logits_biased)
scores[regular_sent_idx] = torch.log(self._apply_decay_mask(scores[regular_sent_idx], mask))
for debiasing_sent_idx in bias_indices:
scores[debiasing_sent_idx] = scores[regular_sent_idx]
def _apply_decay_mask(self, logits: torch.Tensor, decay_mask: torch.Tensor) -> torch.Tensor:
probabilities = logits.softmax(dim=-1)
decay_mask = torch.exp(- decay_mask * self.decay_constant)
decay_mask = torch.max(decay_mask, torch.tensor([self.epsilon], device=decay_mask.device))
probabilities = probabilities * decay_mask
probabilities = probabilities / probabilities.sum(dim=-1)
return probabilities
def _generate_decay_mask(self, logits_regular: torch.FloatTensor, logits_biased_list: List[torch.FloatTensor]) -> torch.Tensor:
p_regular = logits_regular.softmax(dim=-1)
p_biased = None
for logits_biased in logits_biased_list:
if p_biased is None:
p_biased = logits_biased.softmax(dim=-1)
else:
p_biased = torch.max(p_biased, logits_biased.softmax(dim=-1))
if self.debug:
print(f'== Before Debiasing ==\n'
f'Top 5 predictions (regular): {self._get_most_likely_tokens(p_regular, k=5)}\n'
f'Top 5 predictions (biased): {self._get_most_likely_tokens(p_biased, k=5)}')
mask = torch.max(p_biased - p_regular, torch.tensor([0.], device=p_regular.device))
if self.debug:
p_regular = self._apply_decay_mask(logits_regular, mask)
print(f'== After Debiasing ==\n'
f'Top 5 predictions (regular): {self._get_most_likely_tokens(p_regular, k=5)}')
return mask
def _get_most_likely_tokens(self, probabilities_tensor: torch.Tensor, k: int) -> List[Tuple[str, float]]:
assert len(probabilities_tensor.shape) == 1
values, indices = torch.topk(probabilities_tensor, k=k, dim=-1)
tokens = self.tokenizer.convert_ids_to_tokens(indices)
return list(zip(tokens, [pv.item() for pv in values]))
class SelfDebiasingGPT2LMHeadModel(GPT2LMHeadModel, GenerationMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.logits_processor = None
def init_logits_processor(self, *args, **kwargs):
self.logits_processor = SelfDebiasingLogitsProcessor(*args, **kwargs)
def _get_logits_processor(self, *args, **kwargs) -> LogitsProcessorList:
logits_processor = super()._get_logits_processor(*args, **kwargs)
if self.logits_processor is not None:
logits_processor.append(self.logits_processor)
return logits_processor
def beam_sample(self, *args, **kwargs):
raise NotImplementedError("Beam sampling is not implemented for self-debiasing models")
|
Apache License 2.0
|
fichtefoll/csscheme
|
my_sublime_lib/view/_view.py
|
base_scope
|
python
|
def base_scope(view):
return view.scope_name(0).split(' ', 1)[0]
|
Returns the view's base scope.
|
https://github.com/fichtefoll/csscheme/blob/6575b53d2c40a64839f86e624af33bf86f6b8e34/my_sublime_lib/view/_view.py#L129-L132
|
from contextlib import contextmanager
from sublime import Region, View
from .. import Settings
from ..edit import Edit
__all__ = ['ViewSettings', 'unset_read_only', 'append', 'clear', 'set_text',
'has_sels', 'has_file_ext', 'base_scope', 'rowcount', 'rowwidth',
'relative_point', 'coorded_region', 'coorded_substr', 'get_text',
'get_viewport_point', 'get_viewport_coords', 'set_viewport',
'extract_selector']
class ViewSettings(Settings):
def __init__(self, view, none_erases=False):
if not isinstance(view, View):
raise ValueError("Invalid view")
settings = view.settings()
if not settings:
raise ValueError("Could not resolve view.settings()")
super(ViewSettings, self).__init__(settings, none_erases)
@contextmanager
def unset_read_only(view):
read_only_before = view.is_read_only()
if read_only_before:
view.set_read_only(False)
yield read_only_before
if read_only_before:
view.set_read_only(True)
def append(view, text, scroll=None):
size = view.size()
scroll = scroll or (scroll is not False and len(view.sel()) == 1 and
view.sel()[0] == Region(size))
with Edit(view) as edit:
edit.insert(size, text)
if scroll:
view.show(view.size())
def clear(view):
with Edit(view) as edit:
edit.erase(Region(0, view.size()))
def set_text(view, text, scroll=False):
with Edit(view) as edit:
edit.erase(Region(0, view.size()))
edit.insert(0, text)
if scroll:
view.show(view.size())
else:
view.sel().clear()
view.sel().add(Region(0, 0))
def has_sels(view):
return len(view.sel()) > 0
def has_file_ext(view, ext):
if not view.file_name() or not ext.strip().replace('.', ''):
return False
if not ext.startswith('.'):
ext = '.' + ext
return view.file_name().endswith(ext)
|
MIT License
|
kriaga/health-checker
|
HealthChecker/venv/Lib/site-packages/nltk/sem/evaluate.py
|
demo
|
python
|
def demo(num=0, trace=None):
demos = {
1: propdemo,
2: folmodel,
3: foldemo,
4: satdemo}
try:
demos[num](trace=trace)
except KeyError:
for num in demos:
demos[num](trace=trace)
|
Run exists demos.
- num = 1: propositional logic demo
- num = 2: first order model demo (only if trace is set)
- num = 3: first order sentences demo
- num = 4: satisfaction of open formulas demo
- any other value: run all the demos
:param trace: trace = 1, or trace = 2 for more verbose tracing
|
https://github.com/kriaga/health-checker/blob/3d9ce933f131bcbb897103b0f509cc45393cae4a/HealthChecker/venv/Lib/site-packages/nltk/sem/evaluate.py#L764-L786
|
from __future__ import print_function, unicode_literals
from pprint import pformat
import inspect
import textwrap
import re
import sys
from six import string_types
from nltk.decorators import decorator
from nltk.compat import python_2_unicode_compatible
from nltk.sem.logic import (AbstractVariableExpression, AllExpression, Expression,
AndExpression, ApplicationExpression, EqualityExpression,
ExistsExpression, IffExpression, ImpExpression,
IndividualVariableExpression, LambdaExpression,
NegatedExpression, OrExpression,
Variable, is_indvar)
class Error(Exception): pass
class Undefined(Error): pass
def trace(f, *args, **kw):
if sys.version_info[0] >= 3:
argspec = inspect.getfullargspec(f)
else:
argspec = inspect.getargspec(f)
d = dict(zip(argspec[0], args))
if d.pop('trace', None):
print()
for item in d.items():
print("%s => %s" % item)
return f(*args, **kw)
def is_rel(s):
if len(s) == 0:
return True
elif all(isinstance(el, tuple) for el in s) and len(max(s))==len(min(s)):
return True
else:
raise ValueError("Set %r contains sequences of different lengths" % s)
def set2rel(s):
new = set()
for elem in s:
if isinstance(elem, string_types):
new.add((elem,))
elif isinstance(elem, int):
new.add((str(elem,)))
else:
new.add(elem)
return new
def arity(rel):
if len(rel) == 0:
return 0
return len(list(rel)[0])
@python_2_unicode_compatible
class Valuation(dict):
def __init__(self, xs):
super(Valuation, self).__init__()
for (sym, val) in xs:
if isinstance(val, string_types) or isinstance(val, bool):
self[sym] = val
elif isinstance(val, set):
self[sym] = set2rel(val)
else:
msg = textwrap.fill("Error in initializing Valuation. "
"Unrecognized value for symbol '%s':\n%s" % (sym, val), width=66)
raise ValueError(msg)
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)
else:
raise Undefined("Unknown expression: '%s'" % key)
def __str__(self):
return pformat(self)
@property
def domain(self):
dom = []
for val in self.values():
if isinstance(val, string_types):
dom.append(val)
elif not isinstance(val, bool):
dom.extend([elem for tuple_ in val for elem in tuple_ if elem is not None])
return set(dom)
@property
def symbols(self):
return sorted(self.keys())
@classmethod
def fromstring(cls, s):
return read_valuation(s)
_VAL_SPLIT_RE = re.compile(r'\s*=+>\s*')
_ELEMENT_SPLIT_RE = re.compile(r'\s*,\s*')
_TUPLES_RE = re.compile(r"""\s*
(\([^)]+\)) # tuple-expression
\s*""", re.VERBOSE)
def _read_valuation_line(s):
pieces = _VAL_SPLIT_RE.split(s)
symbol = pieces[0]
value = pieces[1]
if value.startswith('{'):
value = value[1:-1]
tuple_strings = _TUPLES_RE.findall(value)
if tuple_strings:
set_elements = []
for ts in tuple_strings:
ts = ts[1:-1]
element = tuple(_ELEMENT_SPLIT_RE.split(ts))
set_elements.append(element)
else:
set_elements = _ELEMENT_SPLIT_RE.split(value)
value = set(set_elements)
return symbol, value
def read_valuation(s, encoding=None):
if encoding is not None:
s = s.decode(encoding)
statements = []
for linenum, line in enumerate(s.splitlines()):
line = line.strip()
if line.startswith('#') or line=='': continue
try:
statements.append(_read_valuation_line(line))
except ValueError:
raise ValueError('Unable to parse line %s: %s' % (linenum, line))
return Valuation(statements)
@python_2_unicode_compatible
class Assignment(dict):
def __init__(self, domain, assign=None):
super(Assignment, self).__init__()
self.domain = domain
if assign:
for (var, val) in assign:
assert val in self.domain, "'%s' is not in the domain: %s" % (val, self.domain)
assert is_indvar(var), "Wrong format for an Individual Variable: '%s'" % var
self[var] = val
self.variant = None
self._addvariant()
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)
else:
raise Undefined("Not recognized as a variable: '%s'" % key)
def copy(self):
new = Assignment(self.domain)
new.update(self)
return new
def purge(self, var=None):
if var:
del self[var]
else:
self.clear()
self._addvariant()
return None
def __str__(self):
gstring = "g"
variant = sorted(self.variant)
for (val, var) in variant:
gstring += "[%s/%s]" % (val, var)
return gstring
def _addvariant(self):
list_ = []
for item in self.items():
pair = (item[1], item[0])
list_.append(pair)
self.variant = list_
return None
def add(self, var, val):
assert val in self.domain, "%s is not in the domain %s" % (val, self.domain)
assert is_indvar(var), "Wrong format for an Individual Variable: '%s'" % var
self[var] = val
self._addvariant()
return self
@python_2_unicode_compatible
class Model(object):
def __init__(self, domain, valuation):
assert isinstance(domain, set)
self.domain = domain
self.valuation = valuation
if not domain.issuperset(valuation.domain):
raise Error("The valuation domain, %s, must be a subset of the model's domain, %s" % (valuation.domain, domain))
def __repr__(self):
return "(%r, %r)" % (self.domain, self.valuation)
def __str__(self):
return "Domain = %s,\nValuation = \n%s" % (self.domain, self.valuation)
def evaluate(self, expr, g, trace=None):
try:
parsed = Expression.fromstring(expr)
value = self.satisfy(parsed, g, trace=trace)
if trace:
print()
print("'%s' evaluates to %s under M, %s" % (expr, value, g))
return value
except Undefined:
if trace:
print()
print("'%s' is undefined under M, %s" % (expr, g))
return 'Undefined'
def satisfy(self, parsed, g, trace=None):
if isinstance(parsed, ApplicationExpression):
function, arguments = parsed.uncurry()
if isinstance(function, AbstractVariableExpression):
funval = self.satisfy(function, g)
argvals = tuple(self.satisfy(arg, g) for arg in arguments)
return argvals in funval
else:
funval = self.satisfy(parsed.function, g)
argval = self.satisfy(parsed.argument, g)
return funval[argval]
elif isinstance(parsed, NegatedExpression):
return not self.satisfy(parsed.term, g)
elif isinstance(parsed, AndExpression):
return self.satisfy(parsed.first, g) and self.satisfy(parsed.second, g)
elif isinstance(parsed, OrExpression):
return self.satisfy(parsed.first, g) or self.satisfy(parsed.second, g)
elif isinstance(parsed, ImpExpression):
return (not self.satisfy(parsed.first, g)) or self.satisfy(parsed.second, g)
elif isinstance(parsed, IffExpression):
return self.satisfy(parsed.first, g) == self.satisfy(parsed.second, g)
elif isinstance(parsed, EqualityExpression):
return self.satisfy(parsed.first, g) == self.satisfy(parsed.second, g)
elif isinstance(parsed, AllExpression):
new_g = g.copy()
for u in self.domain:
new_g.add(parsed.variable.name, u)
if not self.satisfy(parsed.term, new_g):
return False
return True
elif isinstance(parsed, ExistsExpression):
new_g = g.copy()
for u in self.domain:
new_g.add(parsed.variable.name, u)
if self.satisfy(parsed.term, new_g):
return True
return False
elif isinstance(parsed, LambdaExpression):
cf = {}
var = parsed.variable.name
for u in self.domain:
val = self.satisfy(parsed.term, g.add(var, u))
cf[u] = val
return cf
else:
return self.i(parsed, g, trace)
def i(self, parsed, g, trace=False):
if parsed.variable.name in self.valuation.symbols:
return self.valuation[parsed.variable.name]
elif isinstance(parsed, IndividualVariableExpression):
return g[parsed.variable.name]
else:
raise Undefined("Can't find a value for %s" % parsed)
def satisfiers(self, parsed, varex, g, trace=None, nesting=0):
spacer = ' '
indent = spacer + (spacer * nesting)
candidates = []
if isinstance(varex, string_types):
var = Variable(varex)
else:
var = varex
if var in parsed.free():
if trace:
print()
print((spacer * nesting) + "Open formula is '%s' with assignment %s" % (parsed, g))
for u in self.domain:
new_g = g.copy()
new_g.add(var.name, u)
if trace and trace > 1:
lowtrace = trace-1
else:
lowtrace = 0
value = self.satisfy(parsed, new_g, lowtrace)
if trace:
print(indent + "(trying assignment %s)" % new_g)
if value == False:
if trace:
print(indent + "value of '%s' under %s is False" % (parsed, new_g))
else:
candidates.append(u)
if trace:
print(indent + "value of '%s' under %s is %s" % (parsed, new_g, value))
result = set(c for c in candidates)
else:
raise Undefined("%s is not free in %s" % (var.name, parsed))
return result
mult = 30
def propdemo(trace=None):
global val1, dom1, m1, g1
val1 = Valuation([('P', True), ('Q', True), ('R', False)])
dom1 = set([])
m1 = Model(dom1, val1)
g1 = Assignment(dom1)
print()
print('*' * mult)
print("Propositional Formulas Demo")
print('*' * mult)
print('(Propositional constants treated as nullary predicates)')
print()
print("Model m1:\n", m1)
print('*' * mult)
sentences = [
'(P & Q)',
'(P & R)',
'- P',
'- R',
'- - P',
'- (P & R)',
'(P | R)',
'(R | P)',
'(R | R)',
'(- P | R)',
'(P | - P)',
'(P -> Q)',
'(P -> R)',
'(R -> P)',
'(P <-> P)',
'(R <-> R)',
'(P <-> R)',
]
for sent in sentences:
if trace:
print()
m1.evaluate(sent, g1, trace)
else:
print("The value of '%s' is: %s" % (sent, m1.evaluate(sent, g1)))
def folmodel(quiet=False, trace=None):
global val2, v2, dom2, m2, g2
v2 = [('adam', 'b1'), ('betty', 'g1'), ('fido', 'd1'), ('girl', set(['g1', 'g2'])), ('boy', set(['b1', 'b2'])), ('dog', set(['d1'])),
('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')]))]
val2 = Valuation(v2)
dom2 = val2.domain
m2 = Model(dom2, val2)
g2 = Assignment(dom2, [('x', 'b1'), ('y', 'g2')])
if not quiet:
print()
print('*' * mult)
print("Models Demo")
print("*" * mult)
print("Model m2:\n", "-" * 14,"\n", m2)
print("Variable assignment = ", g2)
exprs = ['adam', 'boy', 'love', 'walks', 'x', 'y', 'z']
parsed_exprs = [Expression.fromstring(e) for e in exprs]
print()
for parsed in parsed_exprs:
try:
print("The interpretation of '%s' in m2 is %s" % (parsed, m2.i(parsed, g2)))
except Undefined:
print("The interpretation of '%s' in m2 is Undefined" % parsed)
applications = [('boy', ('adam')), ('walks', ('adam',)), ('love', ('adam', 'y')), ('love', ('y', 'adam'))]
for (fun, args) in applications:
try:
funval = m2.i(Expression.fromstring(fun), g2)
argsval = tuple(m2.i(Expression.fromstring(arg), g2) for arg in args)
print("%s(%s) evaluates to %s" % (fun, args, argsval in funval))
except Undefined:
print("%s(%s) evaluates to Undefined" % (fun, args))
def foldemo(trace=None):
folmodel(quiet=True)
print()
print('*' * mult)
print("FOL Formulas Demo")
print('*' * mult)
formulas = [
'love (adam, betty)',
'(adam = mia)',
'\\x. (boy(x) | girl(x))',
'\\x. boy(x)(adam)',
'\\x y. love(x, y)',
'\\x y. love(x, y)(adam)(betty)',
'\\x y. love(x, y)(adam, betty)',
'\\x y. (boy(x) & love(x, y))',
'\\x. exists y. (boy(x) & love(x, y))',
'exists z1. boy(z1)',
'exists x. (boy(x) & -(x = adam))',
'exists x. (boy(x) & all y. love(y, x))',
'all x. (boy(x) | girl(x))',
'all x. (girl(x) -> exists y. boy(y) & love(x, y))',
'exists x. (boy(x) & all y. (girl(y) -> love(y, x)))',
'exists x. (boy(x) & all y. (girl(y) -> love(x, y)))',
'all x. (dog(x) -> - girl(x))',
'exists x. exists y. (love(x, y) & love(x, y))'
]
for fmla in formulas:
g2.purge()
if trace:
m2.evaluate(fmla, g2, trace)
else:
print("The value of '%s' is: %s" % (fmla, m2.evaluate(fmla, g2)))
def satdemo(trace=None):
print()
print('*' * mult)
print("Satisfiers Demo")
print('*' * mult)
folmodel(quiet=True)
formulas = [
'boy(x)',
'(x = x)',
'(boy(x) | girl(x))',
'(boy(x) & girl(x))',
'love(adam, x)',
'love(x, adam)',
'-(x = adam)',
'exists z22. love(x, z22)',
'exists y. love(y, x)',
'all y. (girl(y) -> love(x, y))',
'all y. (girl(y) -> love(y, x))',
'all y. (girl(y) -> (boy(x) & love(y, x)))',
'(boy(x) & all y. (girl(y) -> love(x, y)))',
'(boy(x) & all y. (girl(y) -> love(y, x)))',
'(boy(x) & exists y. (girl(y) & love(y, x)))',
'(girl(x) -> dog(x))',
'all y. (dog(y) -> (x = y))',
'exists y. love(y, x)',
'exists y. (love(adam, y) & love(y, x))'
]
if trace:
print(m2)
for fmla in formulas:
print(fmla)
Expression.fromstring(fmla)
parsed = [Expression.fromstring(fmla) for fmla in formulas]
for p in parsed:
g2.purge()
print("The satisfiers of '%s' are: %s" % (p, m2.satisfiers(p, 'x', g2, trace)))
|
MIT License
|
google/mentornet
|
code/utils.py
|
mentornet
|
python
|
def mentornet(epoch,
loss,
labels,
loss_p_percentile,
example_dropout_rates,
burn_in_epoch=18,
fixed_epoch_after_burn_in=True,
loss_moving_average_decay=0.9,
debug=False):
with tf.variable_scope('mentor_inputs'):
loss_moving_avg = tf.get_variable(
'cumulative', [], initializer=tf.zeros_initializer(), trainable=False)
if not fixed_epoch_after_burn_in:
cur_epoch = epoch
else:
cur_epoch = tf.to_int32(tf.minimum(epoch, burn_in_epoch))
v_ones = tf.ones(tf.shape(loss), tf.float32)
v_zeros = tf.zeros(tf.shape(loss), tf.float32)
upper_bound = tf.cond(cur_epoch < (burn_in_epoch - 1), lambda: v_ones,
lambda: v_zeros)
this_dropout_rate = tf.squeeze(
tf.nn.embedding_lookup(example_dropout_rates, cur_epoch))
this_percentile = tf.squeeze(
tf.nn.embedding_lookup(loss_p_percentile, cur_epoch))
percentile_loss = tf.contrib.distributions.percentile(
loss, this_percentile * 100)
percentile_loss = tf.convert_to_tensor(percentile_loss)
loss_moving_avg = loss_moving_avg.assign(
loss_moving_avg * loss_moving_average_decay +
(1 - loss_moving_average_decay) * percentile_loss)
slim.summaries.add_scalar_summary(percentile_loss, 'debug/percentile_loss')
slim.summaries.add_scalar_summary(this_dropout_rate, 'debug/dropout_rate')
slim.summaries.add_scalar_summary(cur_epoch, 'debug/epoch_step')
slim.summaries.add_scalar_summary(loss_moving_avg,
'debug/loss_moving_percentile')
ones = tf.ones([tf.shape(loss)[0], 1], tf.float32)
epoch_vec = tf.scalar_mul(tf.to_float(cur_epoch), ones)
lossdiff = loss - tf.scalar_mul(loss_moving_avg, ones)
input_data = tf.squeeze(tf.stack([loss, lossdiff, labels, epoch_vec], 1))
v = tf.nn.sigmoid(mentornet_nn(input_data), name='v')
v = tf.maximum(v, upper_bound, 'v_bound')
v_dropout = tf.py_func(probabilistic_sample,
[v, this_dropout_rate, 'random'], tf.float32)
v_dropout = tf.reshape(v_dropout, [-1, 1], name='v_dropout')
if debug:
v_dropout = tf.Print(
v_dropout,
data=[cur_epoch, loss_moving_avg, percentile_loss],
summarize=64,
message='epoch, loss_moving_avg, percentile_loss')
v_dropout = tf.Print(
v_dropout, data=[lossdiff], summarize=64, message='loss_diff')
v_dropout = tf.Print(v_dropout, data=[v], summarize=64, message='v')
v_dropout = tf.Print(
v_dropout, data=[v_dropout], summarize=64, message='v_dropout')
return v_dropout
|
The MentorNet to train with the StudentNet.
The details are in:
Jiang, Lu, et al. "MentorNet: Learning Data-Driven Curriculum for Very Deep
Neural Networks on Corrupted Labels." ICML. 2018.
http://proceedings.mlr.press/v80/jiang18c/jiang18c.pdf
Args:
epoch: a tensor [batch_size, 1] representing the training percentage. Each
epoch is an integer between 0 and 99.
loss: a tensor [batch_size, 1] representing the sample loss.
labels: a tensor [batch_size, 1] representing the label. Every label is set
to 0 in the current version.
loss_p_percentile: a 1-d tensor of size 100, where each element is the
p-percentile at that epoch to compute the moving average.
example_dropout_rates: a 1-d tensor of size 100, where each element is the
dropout rate at that epoch. Dropping out means the probability of setting
sample weights to zeros proposed in Liang, Junwei, et al. "Learning to
Detect Concepts from Webly-Labeled Video Data." IJCAI. 2016.
burn_in_epoch: the number of burn_in_epoch. In the first burn_in_epoch, all
samples have 1.0 weights.
fixed_epoch_after_burn_in: whether to fix the epoch after the burn-in.
loss_moving_average_decay: the decay factor to compute the moving average.
debug: whether to print the weight information for debugging purposes.
Returns:
v: [batch_size, 1] weight vector.
|
https://github.com/google/mentornet/blob/76d6be2db1be39714dec6db6bb3bcbb77855ce6e/code/utils.py#L163-L259
|
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
def summarize_data_utilization(v, tf_global_step, batch_size, epsilon=0.001):
nonzero_v = tf.get_variable('data_util/nonzero_v', [],
initializer=tf.zeros_initializer(),
trainable=False,
dtype=tf.float32)
rounded_v = tf.maximum(v - epsilon, tf.to_float(0))
nonzero_v = tf.assign_add(nonzero_v, tf.count_nonzero(
rounded_v, dtype=tf.float32))
data_util = (nonzero_v) / tf.to_float(batch_size) / (
tf.to_float(tf_global_step) + 2)
data_util = tf.minimum(data_util, 1)
tf.stop_gradient(data_util)
slim.summaries.add_scalar_summary(data_util, 'data_util/data_util')
slim.summaries.add_scalar_summary(tf.reduce_sum(v), 'data_util/batch_sum_v')
return data_util
def parse_dropout_rate_list(str_list):
str_list = np.array(str_list)
values = str_list[np.arange(0, len(str_list), 2)]
indexes = str_list[np.arange(1, len(str_list), 2)]
values = [float(t) for t in values]
indexes = [int(t) for t in indexes]
assert len(values) == len(indexes) and np.sum(indexes) == 100
for t in values:
assert t >= 0.0 and t <= 1.0
result = []
for t in range(len(str_list) // 2):
result.extend([values[t]] * indexes[t])
return result
def mentornet_nn(input_features,
label_embedding_size=2,
epoch_embedding_size=5,
num_fc_nodes=20):
batch_size = int(input_features.get_shape()[0])
losses = tf.reshape(input_features[:, 0], [-1, 1])
loss_diffs = tf.reshape(input_features[:, 1], [-1, 1])
labels = tf.to_int32(tf.reshape(input_features[:, 2], [-1, 1]))
epochs = tf.to_int32(tf.reshape(input_features[:, 3], [-1, 1]))
epochs = tf.minimum(epochs, tf.ones([batch_size, 1], dtype=tf.int32) * 99)
if len(losses.get_shape()) <= 1:
num_steps = 1
else:
num_steps = int(losses.get_shape()[1])
with tf.variable_scope('mentornet'):
label_embedding = tf.get_variable('label_embedding',
[2, label_embedding_size])
epoch_embedding = tf.get_variable(
'epoch_embedding', [100, epoch_embedding_size], trainable=False)
lstm_inputs = tf.stack([losses, loss_diffs], axis=1)
lstm_inputs = tf.squeeze(lstm_inputs)
lstm_inputs = [lstm_inputs]
forward_cell = tf.contrib.rnn.BasicLSTMCell(1, forget_bias=0.0)
backward_cell = tf.contrib.rnn.BasicLSTMCell(1, forget_bias=0.0)
_, out_state_fw, out_state_bw = tf.contrib.rnn.static_bidirectional_rnn(
forward_cell,
backward_cell,
inputs=lstm_inputs,
dtype=tf.float32,
sequence_length=np.ones(batch_size) * num_steps)
label_inputs = tf.squeeze(tf.nn.embedding_lookup(label_embedding, labels))
epoch_inputs = tf.squeeze(tf.nn.embedding_lookup(epoch_embedding, epochs))
h = tf.concat([out_state_fw[0], out_state_bw[0]], 1)
feat = tf.concat([label_inputs, epoch_inputs, h], 1)
feat_dim = int(feat.get_shape()[1])
fc_1 = tf.add(
tf.matmul(feat, tf.Variable(tf.random_normal([feat_dim,
num_fc_nodes]))),
tf.Variable(tf.random_normal([num_fc_nodes])))
fc_1 = tf.nn.tanh(fc_1)
out_layer = tf.matmul(
fc_1,
tf.Variable(tf.random_normal([num_fc_nodes, 1]))
+ tf.Variable(tf.random_normal([1])))
return out_layer
|
Apache License 2.0
|
google/clusterfuzz
|
src/clusterfuzz/_internal/platforms/android/adb.py
|
create_directory_if_needed
|
python
|
def create_directory_if_needed(device_directory):
run_shell_command(['mkdir', '-p', device_directory])
|
Creates a directory on the device if it doesn't already exist.
|
https://github.com/google/clusterfuzz/blob/e9e105d66f009356c4f3fe9ae7873ffff126b234/src/clusterfuzz/_internal/platforms/android/adb.py#L101-L103
|
import collections
import os
import re
import signal
import socket
import subprocess
import tempfile
import threading
import time
from clusterfuzz._internal.base import persistent_cache
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.metrics import logs
from clusterfuzz._internal.system import environment
from clusterfuzz._internal.system import shell
ADB_TIMEOUT = 1200
BAD_STATE_WAIT = 900
BOOT_WAIT_INTERVAL = 30
CUTTLEFISH_USER = 'vsoc-01'
CUTTLEFISH_CVD_PORT = 6520
DEFAULT_DEVICE_MEMORY_MB = 2048
DEVICE = collections.namedtuple('Device', ['serial', 'path'])
DEVICE_HANG_STRING = None
DEVICE_NOT_FOUND_STRING = 'error: device \'{serial}\' not found'
DEVICE_OFFLINE_STRING = 'error: device offline'
FACTORY_RESET_WAIT = 60
KERNEL_LOG_FILES = [
'/proc/last_kmsg',
'/sys/fs/pstore/console-ramoops',
]
MONKEY_PROCESS_NAME = 'monkey'
WAIT_FOR_DEVICE_TIMEOUT = 600
REBOOT_TIMEOUT = 3600
RECOVERY_CMD_TIMEOUT = 60
STOP_CVD_WAIT = 20
LSUSB_BUS_RE = re.compile(r'Bus\s+(\d+)\s+Device\s+(\d+):.*')
LSUSB_SERIAL_RE = re.compile(r'\s+iSerial\s+\d\s+(.*)')
USBDEVFS_RESET = ord('U') << 8 | 20
def bad_state_reached():
persistent_cache.clear_values()
logs.log_fatal_and_exit(
'Device in bad state.', wait_before_exit=BAD_STATE_WAIT)
def connect_to_cuttlefish_device():
logs.log('Connect to cuttlefish device.')
device_serial = environment.get_value('ANDROID_SERIAL')
connect_cmd = f'{get_adb_path()} connect {device_serial}'
return execute_command(connect_cmd, timeout=RECOVERY_CMD_TIMEOUT)
def copy_local_directory_to_remote(local_directory, remote_directory):
create_directory_if_needed(remote_directory)
if os.listdir(local_directory):
run_command(['push', '%s/.' % local_directory, remote_directory])
def copy_local_file_to_remote(local_file_path, remote_file_path):
create_directory_if_needed(os.path.dirname(remote_file_path))
run_command(['push', local_file_path, remote_file_path])
def copy_remote_directory_to_local(remote_directory, local_directory):
run_command(['pull', '%s/.' % remote_directory, local_directory])
def copy_remote_file_to_local(remote_file_path, local_file_path):
shell.create_directory(
os.path.dirname(local_file_path), create_intermediates=True)
run_command(['pull', remote_file_path, local_file_path])
|
Apache License 2.0
|
gaasedelen/lighthouse
|
plugins/lighthouse/coverage.py
|
DatabaseCoverage._map_nodes
|
python
|
def _map_nodes(self):
dirty_nodes = {}
coverage_addresses = collections.deque(sorted(self._unmapped_data))
while coverage_addresses:
address = coverage_addresses.popleft()
node_metadata = self._metadata.get_node(address)
if not node_metadata:
continue
if node_metadata.address in self.nodes:
node_coverage = self.nodes[node_metadata.address]
else:
node_coverage = NodeCoverage(node_metadata.address, self._weak_self)
self.nodes[node_metadata.address] = node_coverage
while 1:
node_coverage.executed_instructions[address] = self._hitmap[address]
self._unmapped_data.discard(address)
try:
address = coverage_addresses.popleft()
except IndexError:
break;
if not (address in node_metadata.instructions):
coverage_addresses.appendleft(address)
break
dirty_nodes[node_metadata.address] = node_coverage
return dirty_nodes
|
Map loaded coverage data to database defined nodes (basic blocks).
|
https://github.com/gaasedelen/lighthouse/blob/7245a2d2c4e84351cd259ed81dafa4263167909a/plugins/lighthouse/coverage.py#L479-L584
|
import os
import time
import logging
import weakref
import datetime
import itertools
import collections
from lighthouse.util import *
from lighthouse.util.qt import compute_color_on_gradiant
from lighthouse.metadata import DatabaseMetadata
logger = logging.getLogger("Lighthouse.Coverage")
class DatabaseCoverage(object):
def __init__(self, palette, name="", filepath=None, data=None):
self.palette = palette
self.name = name
self.filepath = filepath
try:
self.timestamp = os.path.getmtime(filepath)
except (OSError, TypeError):
self.timestamp = time.time()
self._metadata = DatabaseMetadata()
self._hitmap = build_hitmap(data)
self._imagebase = BADADDR
self.coverage_hash = 0
self._update_coverage_hash()
self._unmapped_data = set(self._hitmap.keys())
self._unmapped_data.add(BADADDR)
self._misaligned_data = set()
self.nodes = {}
self.functions = {}
self.instruction_percent = 0.0
self.partial_nodes = set()
self.partial_instructions = set()
self._weak_self = weakref.proxy(self)
@property
def data(self):
return self._hitmap
@property
def coverage(self):
return viewkeys(self._hitmap)
@property
def suspicious(self):
bad = 0
total = len(self.nodes)
if not total:
return 0.0
for adddress, node_coverage in iteritems(self.nodes):
if adddress in node_coverage.executed_instructions:
continue
bad += 1
percent = (bad/float(total))*100
logger.debug("SUSPICIOUS: %5.2f%% (%u/%u)" % (percent, bad, total))
return percent > 2.0
def update_metadata(self, metadata, delta=None):
self._metadata = weakref.proxy(metadata)
rebase_offset = self._metadata.imagebase - self._imagebase
if self._imagebase == BADADDR:
self._imagebase = self._metadata.imagebase
elif rebase_offset:
self._hitmap = { (address + rebase_offset): hits for address, hits in iteritems(self._hitmap) }
self._imagebase = self._metadata.imagebase
self.unmap_all()
def refresh(self):
dirty_nodes, dirty_functions = self._map_coverage()
self._finalize(dirty_nodes, dirty_functions)
self._update_coverage_hash()
def refresh_theme(self):
for function in self.functions.values():
function.coverage_color = compute_color_on_gradiant(
function.instruction_percent,
self.palette.table_coverage_bad,
self.palette.table_coverage_good
)
def _finalize(self, dirty_nodes, dirty_functions):
self._finalize_nodes(dirty_nodes)
self._finalize_functions(dirty_functions)
self._finalize_instruction_percent()
def _finalize_nodes(self, dirty_nodes):
metadata = self._metadata
for address, node_coverage in iteritems(dirty_nodes):
node_coverage.finalize()
if node_coverage.instructions_executed != metadata.nodes[address].instruction_count:
self.partial_nodes.add(address)
else:
self.partial_nodes.discard(address)
instructions = []
for node_address in self.partial_nodes:
instructions.append(self.nodes[node_address].executed_instructions)
self.partial_instructions = set(itertools.chain.from_iterable(instructions))
def _finalize_functions(self, dirty_functions):
for function_coverage in itervalues(dirty_functions):
function_coverage.finalize()
def _finalize_instruction_percent(self):
total = sum(f.instruction_count for f in itervalues(self._metadata.functions))
if not total:
self.instruction_percent = 0.0
return
executed = sum(f.instructions_executed for f in itervalues(self.functions))
self.instruction_percent = float(executed) / total
def add_data(self, data, update=True):
for address, hit_count in iteritems(data):
self._hitmap[address] += hit_count
if not update:
return
self._update_coverage_hash()
self._unmapped_data |= viewkeys(data)
def add_addresses(self, addresses, update=True):
for address in addresses:
self._hitmap[address] += 1
if not update:
return
self._update_coverage_hash()
self._unmapped_data |= set(addresses)
def subtract_data(self, data):
for address, hit_count in iteritems(data):
self._hitmap[address] -= hit_count
if not self._hitmap[address]:
del self._hitmap[address]
self._update_coverage_hash()
self.unmap_all()
def mask_data(self, coverage_mask):
composite_data = collections.defaultdict(int)
for address in coverage_mask:
composite_data[address] = self._hitmap[address]
return DatabaseCoverage(self.palette, data=composite_data)
def _update_coverage_hash(self):
if self._hitmap:
self.coverage_hash = hash(frozenset(viewkeys(self._hitmap)))
else:
self.coverage_hash = 0
def _map_coverage(self):
dirty_nodes = self._map_nodes()
dirty_functions = self._map_functions(dirty_nodes)
return (dirty_nodes, dirty_functions)
|
MIT License
|
intel/tcf
|
tcfl/tc.py
|
reporter_c.report_pass
|
python
|
def report_pass(self, message, attachments = None,
level = None, dlevel = 0, alevel = 2, subcase = None):
if level == None:
level = msgid_c.depth()
self._argcheck(message, attachments, level, dlevel, alevel)
level += dlevel
self._report(level, level + alevel, "PASS", message,
attachments, subcase = subcase)
|
Report a check has passed (a positive condition we were
looking for was found).
>>> report_pass("this thing worked ok",
>>> dict(
>>> measurement1 = 34,
>>> log = commonl.generator_factory_c(
>>> commonl.file_iterator, "LOGILENAME")
>>> ),
>>> subcase = "subtest1")
A check, described by *message* has passed
:param str message: message describing the check or condition
that has passed
:param dict attachments: (optional) a dictionary of extra data
to append to the report, keyed by string. Stick to simple
values (bool, int, float, string, nested dict of the same )
for all report drivers to be able to handle it.
Additionally, use class:`commonl.generator_factory_c` for
generators (since the report drivers will have to spin the
generator once each).
:param str subcase: (optional, default *None*) report this
message as coming from a subcase
:param int level: (optional, default set by
:class:`tcfl.msgid_c` context depth) verbosity level of this
message. Must be a zero or positive integer. 0 is most
important. Usually you want to set *dlevel*.
:param int dlevel: (optional, default 0) verbosity level of
this message relative to level (normally to the default
level).
Eg: if given -2 and level resolves to 4, the verbosity level
would be 2.
:param int alevel: (optional, default 2) verbosity level of
the attachments to this message relative to level (normally
to the default level).
The attachments might contain a lot of extra data that in
some cases is not necessary unless more verbosity is
declared.
|
https://github.com/intel/tcf/blob/ca8b66d3809ecb441c9a1ae99ff13eb88baf9286/tcfl/tc.py#L661-L717
|
import ast
import atexit
import collections
import contextlib
import copy
import datetime
import errno
import functools
import imp
import inspect
import json
import logging
import numbers
import os
import platform
import pprint
import random
import re
import shutil
import signal
import socket
import string
import subprocess
import sys
import tempfile
import threading
import time
import traceback
import types
import typing
import tcfl
_multiprocessing_method_pool_c = None
_multiprocessing_sync_manager = None
_multiprocessing = None
def import_mp_pathos():
import pathos.pools
import pathos.multiprocessing
import multiprocess.managers
global _multiprocessing_method_pool_c
global _multiprocessing_tc_pool_c
global _multiprocessing_sync_manager
global _multiprocessing
_multiprocessing_method_pool_c = pathos.pools._ThreadPool
def _multiprocessing_tc_pool_c(**kwargs):
return pathos.pools._ProcessPool(maxtasksperchild = 2, **kwargs)
_multiprocessing_sync_manager = multiprocess.managers.SyncManager
_multiprocessing = pathos.multiprocessing
def import_mp_std():
import multiprocessing.pool
import multiprocessing
import multiprocessing.managers
global _multiprocessing_method_pool_c
global _multiprocessing_tc_pool_c
global _multiprocessing_sync_manager
global _multiprocessing
_multiprocessing_method_pool_c = multiprocessing.pool.ThreadPool
def _multiprocessing_tc_pool_c(**kwargs):
return multiprocessing.pool.ThreadPool(**kwargs)
_multiprocessing_sync_manager = multiprocessing.managers.SyncManager
_multiprocessing = multiprocessing
mp = os.environ.get('TCF_USE_MP', None)
if mp == None:
try:
import_mp_pathos()
except ImportError as e:
import_mp_std()
elif mp.lower() == 'std':
import_mp_std()
elif mp.lower() == 'pathos':
import_mp_pathos()
else:
raise RuntimeError('Invalid value to TCF_USE_MP (%s)' % mp)
import requests.exceptions
from . import app
import commonl
import commonl.expr_parser
from . import ttb_client
from . import msgid_c
version = None
logging.addLevelName(50, "C")
logging.addLevelName(40, "E")
logging.addLevelName(30, "W")
logging.addLevelName(20, "I")
logging.addLevelName(10, "D")
logging.addLevelName(9, "D2")
logging.addLevelName(8, "D3")
logging.addLevelName(7, "D4")
logging.addLevelName(6, "D5")
exception = tcfl.exception
pass_e = tcfl.pass_e
error_e = tcfl.error_e
blocked_e = tcfl.blocked_e
block_e = tcfl.blocked_e
timeout_error_e = tcfl.timeout_error_e
failed_e = tcfl.fail_e
fail_e = tcfl.fail_e
timeout_failed_e = tcfl.timeout_failed_e
skip_e = tcfl.skip_e
valid_results = tcfl.valid_results
logger = logging.getLogger("tcfl.tc")
_method_phase_prefixes = frozenset({
'configure',
'build',
'deploy',
'setup',
'start',
'eval',
'teardown',
'class_teardown',
'clean',
})
class _simple_namespace:
def __init__(self, kw):
self.__dict__.update(kw)
log_dir = None
report_console_impl = None
report_file_impl = None
ticket = None
def _globals_init():
global log_dir
global report_console_impl
global report_file_impl
log_dir = None
tc_c._dry_run = False
tc_c.runid = None
if report_console_impl:
report_driver_c.remove(report_console_impl)
report_console_impl = None
if report_file_impl:
report_driver_c.remove(report_file_impl)
report_file_impl = None
class target_extension_c(object):
class unneeded(Exception):
pass
def __init__(self, _target):
assert isinstance(_target, target_c)
self.target = _target
@classmethod
def __check_name(cls):
pass
report_runid_hashid_separator = ":"
report_runid_hashid_file_separator = ":"
class report_driver_c(object):
name = None
def report(self, reporter, tag, ts, delta,
level, message, alevel, attachments):
raise NotImplementedError
_drivers = []
@classmethod
def add(cls, obj, name = None, origin = None):
assert isinstance(obj, cls)
if origin == None:
o = inspect.stack()[1]
origin = "%s:%s" % (o[1], o[2])
setattr(obj, "origin", origin)
obj.name = name
cls._drivers.append(obj)
@classmethod
def get_by_name(cls, name):
for driver in cls._drivers:
if driver.name == name:
return driver
raise ValueError("%s: report driver does not exist" % name)
@classmethod
def remove(cls, obj):
assert isinstance(obj, cls)
cls._drivers.remove(obj)
class reporter_c(object):
def __init__(self, testcase = None):
self._report_prefix = "reporter_c._report_prefix/uninitialized"
if testcase:
self.ts_start = testcase.ts_start
assert isinstance(testcase, tc_c)
self.testcase = testcase
else:
self.ts_start = time.time()
assert isinstance(self, tc_c)
self.testcase = self
report_level_max = None
report_level_driver_max = {}
@staticmethod
def _argcheck(message, attachments, level, dlevel, alevel):
assert isinstance(message, str), f"message: expected str, got {type(message)}"
if attachments:
assert isinstance(attachments, dict)
assert level >= 0
assert dlevel >= 0 or dlevel < 0
assert alevel >= 0 or alevel < 0
def _report(self, level, alevel, tag, message,
attachments, subcase = None, subcase_base = None):
assert subcase == None or isinstance(subcase, str), f"subcase: expected short string; got {type(subcase)}"
if self.report_level_max != None and level >= self.report_level_max:
return
ts = time.time()
delta = ts - self.ts_start
testcase = self.testcase
subl = []
if subcase_base == None:
subcase_base = msgid_c.subcase()
if subcase_base:
subl.append(subcase_base)
if subcase:
subl.append(subcase)
subcase = "##".join(subl)
if subcase:
subtc = testcase._subcase_get(subcase)
if tag == "PASS":
subtc.result.passed += 1
elif tag == "FAIL":
subtc.result.failed += 1
elif tag == "ERRR":
subtc.result.errors += 1
elif tag == "BLCK":
subtc.result.blocked += 1
elif tag == "SKIP":
subtc.result.skipped += 1
report_on = subtc
else:
report_on = self
for driver in report_driver_c._drivers:
if driver.name:
level_driver_max = self.report_level_driver_max.get(driver.name, None)
if level_driver_max != None and level >= level_driver_max:
continue
driver.report(
report_on, tag, ts, delta, level, commonl.mkutf8(message),
alevel, attachments)
|
Apache License 2.0
|
jspenmar/defeat-net
|
utils/transforms.py
|
get_translation_matrix
|
python
|
def get_translation_matrix(translation_vector):
t_mat = torch.eye(4).repeat((translation_vector.shape[0], 1, 1)).to(device=translation_vector.device)
t = translation_vector.contiguous().view(-1, 3, 1)
t_mat[:, :3, 3, None] = t
return t_mat
|
Convert a translation vector into a 4x4 transformation matrix
|
https://github.com/jspenmar/defeat-net/blob/c3dc5e143c7f484b86cf1703a290c0f07256e3e7/utils/transforms.py#L22-L27
|
import torch
def disp2depth(disp, min_depth, max_depth):
min_disp, max_disp = 1/max_depth, 1/min_depth
scaled_disp = min_disp + (max_disp - min_disp) * disp
depth = 1/scaled_disp
return scaled_disp, depth
def params2tform(axisangle, translation, invert=False):
r, t = axisangle2rot(axisangle), translation.clone()
if invert:
r, t = r.transpose(1, 2), t*-1
t = get_translation_matrix(t)
return torch.matmul(r, t) if invert else torch.matmul(t, r)
|
MIT License
|
sebastianmh/random-forest-classifier
|
utilities.py
|
shuffle_in_unison
|
python
|
def shuffle_in_unison(a, b):
rng_state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(rng_state)
np.random.shuffle(b)
|
Shuffles two lists of equal length and keeps corresponding elements in the same index.
|
https://github.com/sebastianmh/random-forest-classifier/blob/0527d6e0e278b40d2560913f47f0ba2f6b1bf00f/utilities.py#L8-L13
|
from __future__ import division
from collections import Counter
import random
import numpy as np
|
MIT License
|
natduca/quickopen
|
src/db_proxy.py
|
DBProxy.search
|
python
|
def search(self, *args, **kwargs):
query = Query.from_kargs(args, kwargs)
d = self._req('POST', '/search', query.as_dict())
return QueryResult.from_dict(d)
|
Searches the database.
args should be either a Query object, or arguments to the Query-object constructor.
|
https://github.com/natduca/quickopen/blob/527cda56b867a0b2f47baa9ec4f39459fec746ca/src/db_proxy.py#L182-L190
|
import async_http_connection
import httplib
import os
import socket
import subprocess
import sys
import time
import json
import urllib
import urlparse
from db_status import DBStatus
from event import Event
from trace_event import *
from query import Query
from query_result import QueryResult
class DBDirProxy(object):
def __init__(self, id, path):
self.id = id
self.path = path
def __repr__(self):
return "DBDirProxy(%s, %s)" % (self.id, self.path)
class DBProxy(object):
def __init__(self, host, port, start_if_needed = False, port_for_autostart=-1):
if start_if_needed and port_for_autostart == -1:
raise Exception("Cannot start_if_needed without a port_for_autostart")
self.host = host
self.port = port
self._start_if_needed = start_if_needed
self._port_for_autostart = port_for_autostart
self.couldnt_start_daemon = Event()
self.conn = httplib.HTTPConnection(host, port, True)
self._dir_lut = {}
@property
def start_if_needed(self):
return self._start_if_needed
@property
def port_for_autostart(self):
return self._port_for_autostart
@staticmethod
def try_to_start_quickopend(port_for_autostart):
basepath = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
quickopend_path = os.path.join(basepath, "quickopend")
assert os.path.exists(quickopend_path)
args = [quickopend_path, 'run']
sys.stderr.write('No quickopend running. Launching it...\n')
proc = subprocess.Popen(args)
sys.stderr.write('Waiting for it to come up on port %i\n' % port_for_autostart)
ok = False
per_iter_delay = 0.1
timeout = 10
num_tries = int(timeout / per_iter_delay)
for i in range(num_tries):
try:
conn = httplib.HTTPConnection('localhost', port_for_autostart, True)
conn.request('GET', '/ping')
except Exception, ex:
time.sleep(per_iter_delay)
continue
res = conn.getresponse()
if res.status != 200:
ok = False
break
if json.loads(res.read()) != 'pong':
ok = False
break
ok = True
break
if ok:
sys.stderr.write('Daemon is up\n')
return ok
def close(self):
pass
def _req(self, method, path, data = None):
if data != None:
data = json.dumps(data)
try:
self.conn.request(method, path, data)
except httplib.CannotSendRequest:
self.conn = None
except socket.error:
self.conn = None
if not self.conn:
if self._start_if_needed:
ok = DBProxy.try_to_start_quickopend(self._port_for_autostart)
if not ok:
self.couldnt_start_daemon.fire()
raise Exception("Daemon did not come up")
self._start_if_needed = False
self.conn = httplib.HTTPConnection(self.host, self.port, True)
self.conn.request(method, path, data)
else:
self._should_try_autostart = False
self._start_if_needed = False
res = self.conn.getresponse()
if res.status == 500:
info = json.loads(res.read())
try:
module = __import__(info["module"], {}, {}, True)
constructor = getattr(module, info["class"])
ex = constructor(*info["args"])
except:
raise Exception("Server side exception: %s" % info["exception"])
raise ex
elif res.status != 200:
raise Exception("On %s, got %s" % (path, res.status))
res = json.loads(res.read().encode('utf8'))
return res
def _get_dir(self, id, path):
if id not in self._dir_lut:
self._dir_lut[id] = DBDirProxy(id, path)
assert self._dir_lut[id].path== path
return self._dir_lut[id]
@property
def dirs(self):
ret = self._req('GET', '/dirs')
return map(lambda x: self._get_dir(x["id"], x["path"]), ret)
def add_dir(self, d):
ret = self._req('POST', '/dirs/add', {"path": os.path.abspath(d)})
assert ret["status"] == 'OK'
return self._get_dir(ret["id"], d)
def delete_dir(self, d):
if type(d) != DBDirProxy:
raise Exception("Expected DBDirProxy")
ret = self._req('DELETE', '/dirs/%s' % d.id)
assert ret["status"] == 'OK'
@property
def ignores(self):
return self._req('GET', '/ignores')
def ignore(self, i):
ret = self._req('POST', '/ignores/add', i)
def unignore(self, i):
try:
ret = self._req('POST', '/ignores/remove', i)
except:
raise "Pattern not found"
def get_oauth(self):
ret = self._req('GET', '/get_oauth')
if not 'token' in ret:
return None
return ret['token']
def set_oauth(self, token):
ret = self._req('POST', '/set_oauth', {"token": token})
assert ret["status"] == 'OK'
@traced
|
Apache License 2.0
|
tibkiss/iqfeed
|
iqfeed/tools.py
|
write_bars_to_file
|
python
|
def write_bars_to_file(bars, filename, tz):
date_format_str = "%Y%m%d %H%M%S"
rows = [{'DateTime': bar.datetime.astimezone(tz).strftime(date_format_str),
'Open': bar.open,
'High': bar.high,
'Low': bar.low,
'Close': bar.close,
'Volume': bar.volume,
} for bar in bars]
if os.path.exists(filename):
raise Exception("File already exists!")
fd = os.popen("gzip > %s" % filename, 'w') if filename.endswith('.gz') else open(filename, 'w')
with fd:
csv_writer = csv.DictWriter(fd, ['DateTime', 'Open', 'High', 'Low', 'Close', 'Volume'])
csv_writer.writeheader()
csv_writer.writerows(rows)
|
Creates CSV file from list of Bar instances
|
https://github.com/tibkiss/iqfeed/blob/c7fc937daf4e4896141f176e9f4a3b1c14a21325/iqfeed/tools.py#L54-L74
|
from functools import wraps
import logging
import time
import csv
import os
log = logging.getLogger(__name__)
def retry(tries, exceptions=None, delay=0):
exceptions_ = exceptions or (Exception, )
def _retry(fn):
@wraps(fn)
def __retry(*args, **kwargs):
for _ in range(tries+1):
try:
return fn(*args, **kwargs)
except exceptions_ as e:
log.warning("Exception, retrying...", exc_info=e)
time.sleep(delay)
raise
return __retry
return _retry
|
Apache License 2.0
|
tomplus/kubernetes_asyncio
|
kubernetes_asyncio/client/models/v1_json_schema_props.py
|
V1JSONSchemaProps.enum
|
python
|
def enum(self):
return self._enum
|
Gets the enum of this V1JSONSchemaProps. # noqa: E501
:return: The enum of this V1JSONSchemaProps. # noqa: E501
:rtype: list[object]
|
https://github.com/tomplus/kubernetes_asyncio/blob/22bf0f4ec775b920abc9cee86bb38abcfc57506d/kubernetes_asyncio/client/models/v1_json_schema_props.py#L482-L489
|
import pprint
import re
import six
from kubernetes_asyncio.client.configuration import Configuration
class V1JSONSchemaProps(object):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'ref': 'str',
'schema': 'str',
'additional_items': 'object',
'additional_properties': 'object',
'all_of': 'list[V1JSONSchemaProps]',
'any_of': 'list[V1JSONSchemaProps]',
'default': 'object',
'definitions': 'dict(str, V1JSONSchemaProps)',
'dependencies': 'dict(str, object)',
'description': 'str',
'enum': 'list[object]',
'example': 'object',
'exclusive_maximum': 'bool',
'exclusive_minimum': 'bool',
'external_docs': 'V1ExternalDocumentation',
'format': 'str',
'id': 'str',
'items': 'object',
'max_items': 'int',
'max_length': 'int',
'max_properties': 'int',
'maximum': 'float',
'min_items': 'int',
'min_length': 'int',
'min_properties': 'int',
'minimum': 'float',
'multiple_of': 'float',
'_not': 'V1JSONSchemaProps',
'nullable': 'bool',
'one_of': 'list[V1JSONSchemaProps]',
'pattern': 'str',
'pattern_properties': 'dict(str, V1JSONSchemaProps)',
'properties': 'dict(str, V1JSONSchemaProps)',
'required': 'list[str]',
'title': 'str',
'type': 'str',
'unique_items': 'bool',
'x_kubernetes_embedded_resource': 'bool',
'x_kubernetes_int_or_string': 'bool',
'x_kubernetes_list_map_keys': 'list[str]',
'x_kubernetes_list_type': 'str',
'x_kubernetes_map_type': 'str',
'x_kubernetes_preserve_unknown_fields': 'bool'
}
attribute_map = {
'ref': '$ref',
'schema': '$schema',
'additional_items': 'additionalItems',
'additional_properties': 'additionalProperties',
'all_of': 'allOf',
'any_of': 'anyOf',
'default': 'default',
'definitions': 'definitions',
'dependencies': 'dependencies',
'description': 'description',
'enum': 'enum',
'example': 'example',
'exclusive_maximum': 'exclusiveMaximum',
'exclusive_minimum': 'exclusiveMinimum',
'external_docs': 'externalDocs',
'format': 'format',
'id': 'id',
'items': 'items',
'max_items': 'maxItems',
'max_length': 'maxLength',
'max_properties': 'maxProperties',
'maximum': 'maximum',
'min_items': 'minItems',
'min_length': 'minLength',
'min_properties': 'minProperties',
'minimum': 'minimum',
'multiple_of': 'multipleOf',
'_not': 'not',
'nullable': 'nullable',
'one_of': 'oneOf',
'pattern': 'pattern',
'pattern_properties': 'patternProperties',
'properties': 'properties',
'required': 'required',
'title': 'title',
'type': 'type',
'unique_items': 'uniqueItems',
'x_kubernetes_embedded_resource': 'x-kubernetes-embedded-resource',
'x_kubernetes_int_or_string': 'x-kubernetes-int-or-string',
'x_kubernetes_list_map_keys': 'x-kubernetes-list-map-keys',
'x_kubernetes_list_type': 'x-kubernetes-list-type',
'x_kubernetes_map_type': 'x-kubernetes-map-type',
'x_kubernetes_preserve_unknown_fields': 'x-kubernetes-preserve-unknown-fields'
}
def __init__(self, ref=None, schema=None, additional_items=None, additional_properties=None, all_of=None, any_of=None, default=None, definitions=None, dependencies=None, description=None, enum=None, example=None, exclusive_maximum=None, exclusive_minimum=None, external_docs=None, format=None, id=None, items=None, max_items=None, max_length=None, max_properties=None, maximum=None, min_items=None, min_length=None, min_properties=None, minimum=None, multiple_of=None, _not=None, nullable=None, one_of=None, pattern=None, pattern_properties=None, properties=None, required=None, title=None, type=None, unique_items=None, x_kubernetes_embedded_resource=None, x_kubernetes_int_or_string=None, x_kubernetes_list_map_keys=None, x_kubernetes_list_type=None, x_kubernetes_map_type=None, x_kubernetes_preserve_unknown_fields=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._ref = None
self._schema = None
self._additional_items = None
self._additional_properties = None
self._all_of = None
self._any_of = None
self._default = None
self._definitions = None
self._dependencies = None
self._description = None
self._enum = None
self._example = None
self._exclusive_maximum = None
self._exclusive_minimum = None
self._external_docs = None
self._format = None
self._id = None
self._items = None
self._max_items = None
self._max_length = None
self._max_properties = None
self._maximum = None
self._min_items = None
self._min_length = None
self._min_properties = None
self._minimum = None
self._multiple_of = None
self.__not = None
self._nullable = None
self._one_of = None
self._pattern = None
self._pattern_properties = None
self._properties = None
self._required = None
self._title = None
self._type = None
self._unique_items = None
self._x_kubernetes_embedded_resource = None
self._x_kubernetes_int_or_string = None
self._x_kubernetes_list_map_keys = None
self._x_kubernetes_list_type = None
self._x_kubernetes_map_type = None
self._x_kubernetes_preserve_unknown_fields = None
self.discriminator = None
if ref is not None:
self.ref = ref
if schema is not None:
self.schema = schema
if additional_items is not None:
self.additional_items = additional_items
if additional_properties is not None:
self.additional_properties = additional_properties
if all_of is not None:
self.all_of = all_of
if any_of is not None:
self.any_of = any_of
if default is not None:
self.default = default
if definitions is not None:
self.definitions = definitions
if dependencies is not None:
self.dependencies = dependencies
if description is not None:
self.description = description
if enum is not None:
self.enum = enum
if example is not None:
self.example = example
if exclusive_maximum is not None:
self.exclusive_maximum = exclusive_maximum
if exclusive_minimum is not None:
self.exclusive_minimum = exclusive_minimum
if external_docs is not None:
self.external_docs = external_docs
if format is not None:
self.format = format
if id is not None:
self.id = id
if items is not None:
self.items = items
if max_items is not None:
self.max_items = max_items
if max_length is not None:
self.max_length = max_length
if max_properties is not None:
self.max_properties = max_properties
if maximum is not None:
self.maximum = maximum
if min_items is not None:
self.min_items = min_items
if min_length is not None:
self.min_length = min_length
if min_properties is not None:
self.min_properties = min_properties
if minimum is not None:
self.minimum = minimum
if multiple_of is not None:
self.multiple_of = multiple_of
if _not is not None:
self._not = _not
if nullable is not None:
self.nullable = nullable
if one_of is not None:
self.one_of = one_of
if pattern is not None:
self.pattern = pattern
if pattern_properties is not None:
self.pattern_properties = pattern_properties
if properties is not None:
self.properties = properties
if required is not None:
self.required = required
if title is not None:
self.title = title
if type is not None:
self.type = type
if unique_items is not None:
self.unique_items = unique_items
if x_kubernetes_embedded_resource is not None:
self.x_kubernetes_embedded_resource = x_kubernetes_embedded_resource
if x_kubernetes_int_or_string is not None:
self.x_kubernetes_int_or_string = x_kubernetes_int_or_string
if x_kubernetes_list_map_keys is not None:
self.x_kubernetes_list_map_keys = x_kubernetes_list_map_keys
if x_kubernetes_list_type is not None:
self.x_kubernetes_list_type = x_kubernetes_list_type
if x_kubernetes_map_type is not None:
self.x_kubernetes_map_type = x_kubernetes_map_type
if x_kubernetes_preserve_unknown_fields is not None:
self.x_kubernetes_preserve_unknown_fields = x_kubernetes_preserve_unknown_fields
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, ref):
self._ref = ref
@property
def schema(self):
return self._schema
@schema.setter
def schema(self, schema):
self._schema = schema
@property
def additional_items(self):
return self._additional_items
@additional_items.setter
def additional_items(self, additional_items):
self._additional_items = additional_items
@property
def additional_properties(self):
return self._additional_properties
@additional_properties.setter
def additional_properties(self, additional_properties):
self._additional_properties = additional_properties
@property
def all_of(self):
return self._all_of
@all_of.setter
def all_of(self, all_of):
self._all_of = all_of
@property
def any_of(self):
return self._any_of
@any_of.setter
def any_of(self, any_of):
self._any_of = any_of
@property
def default(self):
return self._default
@default.setter
def default(self, default):
self._default = default
@property
def definitions(self):
return self._definitions
@definitions.setter
def definitions(self, definitions):
self._definitions = definitions
@property
def dependencies(self):
return self._dependencies
@dependencies.setter
def dependencies(self, dependencies):
self._dependencies = dependencies
@property
def description(self):
return self._description
@description.setter
def description(self, description):
self._description = description
@property
|
Apache License 2.0
|
hendrix/hendrix
|
hendrix/contrib/cache/__init__.py
|
CachedResource.getLastModified
|
python
|
def getLastModified(self):
last_modified = self.headers.get('last-modified')
if last_modified:
last_modified = self.convertTimeString(last_modified)
return last_modified
|
returns the GMT last-modified datetime or None
|
https://github.com/hendrix/hendrix/blob/da601a6290d63c26049db9e5ad686447df73c6da/hendrix/contrib/cache/__init__.py#L68-L73
|
try:
import cStringIO
except ImportError:
from io import BytesIO as cStringIO
import gzip
from datetime import datetime
try:
import urlparse
except ImportError:
from urllib.parse import urlparse
from time import strptime
from twisted.web.resource import Resource
def compressBuffer(buffer):
zbuf = cStringIO.StringIO()
zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=9)
zfile.write(buffer)
zfile.close()
return zbuf.getvalue()
def decompressBuffer(buffer):
zbuf = cStringIO.StringIO(buffer)
zfile = gzip.GzipFile(fileobj=zbuf)
deflated = zfile.read()
zfile.close()
return deflated
class CachedResource(Resource):
isLeaf = True
def __init__(self, content=None, headers=None):
Resource.__init__(self)
self.content = content
self.headers = headers
self.created = datetime.now()
def render(self, request):
return self.content
def getMaxAge(self):
max_age = 0
cache_control = self.headers.get('cache-control')
if cache_control:
params = dict(urlparse.parse_qsl(cache_control))
max_age = int(params.get('max-age', '0'))
return max_age
def convertTimeString(self, timestr):
time_struc = strptime(timestr, '%a, %d %b %Y %H:%M:%S GMT')
return datetime(*time_struc[:6])
|
MIT License
|
cloudant/bigcouch
|
couchjs/scons/scons-local-2.0.1/SCons/Action.py
|
_do_create_keywords
|
python
|
def _do_create_keywords(args, kw):
v = kw.get('varlist', ())
if is_String(v): v = (v,)
kw['varlist'] = tuple(v)
if args:
cmdstrfunc = args[0]
if cmdstrfunc is None or is_String(cmdstrfunc):
kw['cmdstr'] = cmdstrfunc
elif callable(cmdstrfunc):
kw['strfunction'] = cmdstrfunc
else:
raise SCons.Errors.UserError(
'Invalid command display variable type. '
'You must either pass a string or a callback which '
'accepts (target, source, env) as parameters.')
if len(args) > 1:
kw['varlist'] = args[1:] + kw['varlist']
if kw.get('strfunction', _null) is not _null and kw.get('cmdstr', _null) is not _null:
raise SCons.Errors.UserError(
'Cannot have both strfunction and cmdstr args to Action()')
|
This converts any arguments after the action argument into
their equivalent keywords and adds them to the kw argument.
|
https://github.com/cloudant/bigcouch/blob/8e9c1ec0ed1676ff152f10658f5c83a1a91fa8fe/couchjs/scons/scons-local-2.0.1/SCons/Action.py#L316-L341
|
__revision__ = "src/engine/SCons/Action.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.compat
import dis
import os
import pickle
import re
import sys
import subprocess
from SCons.Debug import logInstanceCreation
import SCons.Errors
import SCons.Executor
import SCons.Util
import SCons.Subst
is_String = SCons.Util.is_String
is_List = SCons.Util.is_List
class _null(object):
pass
print_actions = 1
execute_actions = 1
print_actions_presub = 0
def rfile(n):
try:
return n.rfile()
except AttributeError:
return n
def default_exitstatfunc(s):
return s
try:
SET_LINENO = dis.SET_LINENO
HAVE_ARGUMENT = dis.HAVE_ARGUMENT
except AttributeError:
remove_set_lineno_codes = lambda x: x
else:
def remove_set_lineno_codes(code):
result = []
n = len(code)
i = 0
while i < n:
c = code[i]
op = ord(c)
if op >= HAVE_ARGUMENT:
if op != SET_LINENO:
result.append(code[i:i+3])
i = i+3
else:
result.append(c)
i = i+1
return ''.join(result)
strip_quotes = re.compile('^[\'"](.*)[\'"]$')
def _callable_contents(obj):
try:
return _function_contents(obj.im_func)
except AttributeError:
try:
return _function_contents(obj.__call__.im_func)
except AttributeError:
try:
return _code_contents(obj)
except AttributeError:
return _function_contents(obj)
def _object_contents(obj):
try:
return _function_contents(obj.im_func)
except AttributeError:
try:
return _function_contents(obj.__call__.im_func)
except AttributeError:
try:
return _code_contents(obj)
except AttributeError:
try:
return _function_contents(obj)
except AttributeError:
try:
return pickle.dumps(obj)
except (pickle.PicklingError, TypeError):
return str(obj)
def _code_contents(code):
contents = []
contents.append("%s,%s" % (code.co_argcount, len(code.co_varnames)))
try:
contents.append(",%s,%s" % (len(code.co_cellvars), len(code.co_freevars)))
except AttributeError:
contents.append(",0,0")
contents.append(',(' + ','.join(map(_object_contents,code.co_consts[1:])) + ')')
contents.append(',(' + ','.join(map(_object_contents,code.co_names)) + ')')
contents.append(',(' + str(remove_set_lineno_codes(code.co_code)) + ')')
return ''.join(contents)
def _function_contents(func):
contents = [_code_contents(func.func_code)]
if func.func_defaults:
contents.append(',(' + ','.join(map(_object_contents,func.func_defaults)) + ')')
else:
contents.append(',()')
try:
closure = func.func_closure or []
except AttributeError:
closure = []
try:
xxx = [_object_contents(x.cell_contents) for x in closure]
except AttributeError:
xxx = []
contents.append(',(' + ','.join(xxx) + ')')
return ''.join(contents)
def _actionAppend(act1, act2):
a1 = Action(act1)
a2 = Action(act2)
if a1 is None or a2 is None:
raise TypeError("Cannot append %s to %s" % (type(act1), type(act2)))
if isinstance(a1, ListAction):
if isinstance(a2, ListAction):
return ListAction(a1.list + a2.list)
else:
return ListAction(a1.list + [ a2 ])
else:
if isinstance(a2, ListAction):
return ListAction([ a1 ] + a2.list)
else:
return ListAction([ a1, a2 ])
|
Apache License 2.0
|
thomazthz/godot-wakatime
|
addons/wakatime/wakatime-cli-10.2.1/wakatime/stats.py
|
get_language_from_extension
|
python
|
def get_language_from_extension(file_name):
filepart, extension = os.path.splitext(file_name)
if re.match(r'\.h.*$', extension, re.IGNORECASE) or re.match(r'\.c.*$', extension, re.IGNORECASE):
if os.path.exists(u('{0}{1}').format(u(filepart), u('.c'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.C'))):
return 'C'
if os.path.exists(u('{0}{1}').format(u(filepart), u('.m'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.M'))):
return 'Objective-C'
if os.path.exists(u('{0}{1}').format(u(filepart), u('.mm'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.MM'))):
return 'Objective-C++'
available_extensions = extensions_in_same_folder(file_name)
if '.cpp' in available_extensions:
return 'C++'
if '.c' in available_extensions:
return 'C'
if re.match(r'\.m$', extension, re.IGNORECASE) and (os.path.exists(u('{0}{1}').format(u(filepart), u('.h'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.H')))):
return 'Objective-C'
if re.match(r'\.mm$', extension, re.IGNORECASE) and (os.path.exists(u('{0}{1}').format(u(filepart), u('.h'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.H')))):
return 'Objective-C++'
return None
|
Returns a matching language for the given file extension.
When guessed_language is 'C', does not restrict to known file extensions.
|
https://github.com/thomazthz/godot-wakatime/blob/e6c8d7ad07a68b7d33799b588229af53886eb233/addons/wakatime/wakatime-cli-10.2.1/wakatime/stats.py#L167-L198
|
import logging
import os
import re
import sys
from .compat import u, open
from .constants import MAX_FILE_SIZE_SUPPORTED
from .dependencies import DependencyParser
from .exceptions import SkipHeartbeat
from .language_priorities import LANGUAGES
from .packages.pygments.lexers import (
_iter_lexerclasses,
_fn_matches,
basename,
ClassNotFound,
find_lexer_class,
get_lexer_by_name,
)
from .packages.pygments.modeline import get_filetype_from_buffer
try:
from .packages import simplejson as json
except (ImportError, SyntaxError):
import json
log = logging.getLogger('WakaTime')
def get_file_stats(file_name, entity_type='file', lineno=None, cursorpos=None,
plugin=None, language=None):
if entity_type != 'file':
stats = {
'language': None,
'dependencies': [],
'lines': None,
'lineno': lineno,
'cursorpos': cursorpos,
}
else:
language, lexer = standardize_language(language, plugin)
if not language:
language, lexer = guess_language(file_name)
language = use_root_language(language, lexer)
parser = DependencyParser(file_name, lexer)
dependencies = parser.parse()
stats = {
'language': language,
'dependencies': dependencies,
'lines': number_lines_in_file(file_name),
'lineno': lineno,
'cursorpos': cursorpos,
}
return stats
def guess_language(file_name):
lexer = None
language = get_language_from_extension(file_name)
if language:
lexer = get_lexer(language)
else:
lexer = smart_guess_lexer(file_name)
if lexer:
language = u(lexer.name)
return language, lexer
def smart_guess_lexer(file_name):
lexer = None
text = get_file_head(file_name)
lexer1, accuracy1 = guess_lexer_using_filename(file_name, text)
lexer2, accuracy2 = guess_lexer_using_modeline(text)
if lexer1:
lexer = lexer1
if (lexer2 and accuracy2 and
(not accuracy1 or accuracy2 > accuracy1)):
lexer = lexer2
return lexer
def guess_lexer_using_filename(file_name, text):
lexer, accuracy = None, None
try:
lexer = custom_pygments_guess_lexer_for_filename(file_name, text)
except SkipHeartbeat as ex:
raise SkipHeartbeat(u(ex))
except:
log.traceback(logging.DEBUG)
if lexer is not None:
try:
accuracy = lexer.analyse_text(text)
except:
log.traceback(logging.DEBUG)
return lexer, accuracy
def guess_lexer_using_modeline(text):
lexer, accuracy = None, None
file_type = None
try:
file_type = get_filetype_from_buffer(text)
except:
log.traceback(logging.DEBUG)
if file_type is not None:
try:
lexer = get_lexer_by_name(file_type)
except ClassNotFound:
log.traceback(logging.DEBUG)
if lexer is not None:
try:
accuracy = lexer.analyse_text(text)
except:
log.traceback(logging.DEBUG)
return lexer, accuracy
|
MIT License
|
yoshida-lab/xenonpy
|
xenonpy/mdl/mdl.py
|
MDL.get_model_detail
|
python
|
def get_model_detail(self, model_id: int) -> GetModelDetail:
return GetModelDetail({'id': model_id}, api_key=self.api_key, endpoint=self.endpoint)
|
Get model detail by model id.
Parameters
----------
model_id
Model id.
Returns
-------
query
Querying object.
|
https://github.com/yoshida-lab/xenonpy/blob/244b241ea23dcf03af34d86b0ae68f911ae301a5/xenonpy/mdl/mdl.py#L273-L287
|
import os
import tarfile
from pathlib import Path
from typing import List, Union
import pandas as pd
import requests
from sklearn.base import BaseEstimator
from tqdm import tqdm
from xenonpy.utils import TimedMetaClass
from .base import BaseQuery
from .descriptor import QueryDescriptors, QueryDescriptorsWith, UpdateDescriptor, CreateDescriptor, ListDescriptors, GetDescriptorDetail
from .method import QueryMethods, QueryMethodsWith, UpdateMethod, CreateMethod, ListMethods, GetMethodDetail
from .model import QueryModelDetails, QueryModelDetailsWith, UploadModel, GetTrainingInfo, GetTrainingEnv, GetSupplementary, GetModelUrls, GetModelUrl, GetModelDetails, GetModelDetail, ListModelsWithProperty, ListModelsWithModelset, ListModelsWithMethod, ListModelsWithDescriptor
from .modelset import QueryModelsets, QueryModelsetsWith, UpdateModelset, CreateModelset, ListModelsets, GetModelsetDetail
from .property import QueryPropertiesWith, QueryProperties, UpdateProperty, CreateProperty, ListProperties, GetPropertyDetail
__all__ = ['MDL', 'GetVersion', 'QueryModelsetsWith', 'QueryModelsets', 'QueryModelDetailsWith', 'QueryModelDetails',
'UpdateModelset', 'UploadModel', 'GetModelsetDetail', 'GetModelDetail', 'GetModelDetails', 'GetModelUrls',
'GetModelUrl', 'GetTrainingInfo', 'GetSupplementary', 'GetTrainingEnv', 'ListModelsets',
'ListModelsWithDescriptor', 'ListModelsWithMethod', 'ListModelsWithModelset', 'ListModelsWithProperty',
'QueryPropertiesWith', 'QueryProperties', 'GetPropertyDetail', 'ListProperties', 'CreateProperty',
'CreateModelset', 'UpdateProperty', 'QueryDescriptorsWith', 'QueryDescriptors', 'QueryMethodsWith',
'QueryMethods', 'UpdateDescriptor', 'UpdateMethod', 'ListDescriptors', 'ListMethods', 'GetMethodDetail',
'GetDescriptorDetail', 'CreateDescriptor', 'CreateMethod']
class GetVersion(BaseQuery):
queryable = []
def __init__(self, *, api_key: str = 'anonymous.user.key',
endpoint: str = 'http://xenon.ism.ac.jp/api'):
super().__init__(variables={}, api_key=api_key, endpoint=endpoint)
self._return_json = True
def gql(self, *query_vars: str):
return '''
query {
getVersion
}
'''
class MDL(BaseEstimator, metaclass=TimedMetaClass):
def __init__(self, *, api_key: str = 'anonymous.user.key', endpoint: str = 'http://xenon.ism.ac.jp/api'):
self._endpoint = endpoint
self._api_key = api_key
@property
def endpoint(self):
return self._endpoint
@endpoint.setter
def endpoint(self, e):
self._endpoint = e
@property
def api_key(self):
return self._api_key
@api_key.setter
def api_key(self, k):
self._api_key = k
@property
def version(self):
return GetVersion()()
def __call__(self, *query: str,
modelset_has: Union[List[str]] = None,
property_has: Union[List[str]] = None,
descriptor_has: Union[List[str]] = None,
method_has: Union[List[str]] = None,
lang_has: Union[List[str]] = None,
regression: bool = None,
transferred: bool = None,
deprecated: bool = None,
succeed: bool = None,
) -> Union[QueryModelDetails, QueryModelDetailsWith]:
if len(query) > 0:
return QueryModelDetails(dict(query=query), api_key=self.api_key, endpoint=self.endpoint)
else:
variables = dict(
modelset_has=modelset_has,
property_has=property_has,
descriptor_has=descriptor_has,
method_has=method_has,
lang_has=lang_has,
regression=regression,
transferred=transferred,
deprecated=deprecated,
succeed=succeed
)
variables = {k: v for k, v in variables.items() if v is not None}
return QueryModelDetailsWith(variables, api_key=self.api_key, endpoint=self.endpoint)
def upload_model(self, *,
modelset_id: int,
describe: dict,
training_env: dict = None,
training_info: dict = None,
supplementary: dict = None) -> UploadModel:
variables = dict(
model=None,
id=modelset_id,
describe=describe,
training_env=training_env,
training_info=training_info,
supplementary=supplementary,
)
variables = {k: v for k, v in variables.items() if v is not None}
return UploadModel(variables, api_key=self.api_key, endpoint=self.endpoint)
def get_training_info(self, model_id: int) -> GetTrainingInfo:
return GetTrainingInfo({'id': model_id}, api_key=self.api_key, endpoint=self.endpoint)
def get_training_env(self, model_id: int) -> GetTrainingEnv:
return GetTrainingEnv({'id': model_id}, api_key=self.api_key, endpoint=self.endpoint)
def get_supplementary(self, *, model_id: int) -> GetSupplementary:
return GetSupplementary({'id': model_id}, api_key=self.api_key, endpoint=self.endpoint)
def get_model_urls(self, *model_ids: int) -> Union[GetModelUrl, GetModelUrls]:
if len(model_ids) == 0:
raise RuntimeError('input is not non-able')
if len(model_ids) == 1:
return GetModelUrl({'id': model_ids[0]}, api_key=self.api_key, endpoint=self.endpoint)
else:
return GetModelUrls({'ids': model_ids}, api_key=self.api_key, endpoint=self.endpoint)
|
BSD 3-Clause New or Revised License
|
squares-sql/squares
|
tyrell/enumerator/smt.py
|
SmtEnumerator.createInputConstraints
|
python
|
def createInputConstraints(self, solver):
input_productions = self.spec.get_param_productions()
for x in range(0, len(input_productions)):
ctr = None
for y in range(0, len(self.nodes)):
if ctr is None:
ctr = self.variables[y] == input_productions[x].id
else:
ctr = Or(self.variables[y] == input_productions[x].id, ctr)
solver.add(ctr)
self.num_constraints += 1
|
Each input will appear at least once in the program
|
https://github.com/squares-sql/squares/blob/68d1cdea5f4e42aae7eb665811c2d6c8224ec7b0/tyrell/enumerator/smt.py#L85-L96
|
from z3 import *
from collections import deque
from .enumerator import Enumerator
from .optimizer import Optimizer
from .. import dsl as D
from ..logger import get_logger
import time
logger = get_logger('tyrell.enumerator.smt')
class AST:
def __init__(self):
self.head = None
class ASTNode:
def __init__(self, nb=None, depth=None, children=None):
self.id = nb
self.depth = depth
self.children = children
self.production = None
class SmtEnumerator(Enumerator):
z3_solver = Solver()
leaf_productions = []
variables = []
variables_fun = []
program2tree = {}
def initLeafProductions(self):
for p in self.spec.productions():
if not p.is_function() or str(p).find('Empty') != -1:
self.leaf_productions.append(p)
def createVariables(self, solver):
for x in range(0, len(self.nodes)):
name = 'n' + str(x + 1)
v = Int(name)
self.variables.append(v)
solver.add(And(v >= 0, v < self.spec.num_productions()))
self.num_constraints += 1
hname = 'h' + str(x + 1)
h = Int(hname)
self.variables_fun.append(h)
solver.add(And(h >= 0, h <= 1))
self.num_constraints += 1
def createOutputConstraints(self, solver):
ctr = None
for p in self.spec.get_productions_with_lhs(self.spec.output):
if ctr is None:
ctr = self.variables[0] == p.id
else:
ctr = Or(ctr, self.variables[0] == p.id)
solver.add(ctr)
self.num_constraints += 1
def createLocConstraints(self, solver):
ctr = self.variables_fun[0]
for x in range(1, len(self.variables_fun)):
ctr += self.variables_fun[x]
ctr_fun = ctr == self.loc
solver.add(ctr_fun)
self.num_constraints += 1
|
Apache License 2.0
|
multimodallearning/pdd_net
|
dense_pdd_net_v01.py
|
augmentAffine
|
python
|
def augmentAffine(img_in, seg_in, strength=0.05):
B,C,D,H,W = img_in.size()
affine_matrix = (torch.eye(3,4).unsqueeze(0) + torch.randn(B, 3, 4) * strength).to(img_in.device)
meshgrid = F.affine_grid(affine_matrix,torch.Size((B,1,D,H,W)))
img_out = F.grid_sample(img_in, meshgrid,padding_mode='border')
seg_out = F.grid_sample(seg_in.float().unsqueeze(1), meshgrid, mode='nearest').long().squeeze(1)
return img_out, seg_out
|
3D affine augmentation on image and segmentation mini-batch on GPU.
(affine transf. is centered: trilinear interpolation and zero-padding used for sampling)
:input: img_in batch (torch.cuda.FloatTensor), seg_in batch (torch.cuda.LongTensor)
:return: augmented BxCxTxHxW image batch (torch.cuda.FloatTensor), augmented BxTxHxW seg batch (torch.cuda.LongTensor)
|
https://github.com/multimodallearning/pdd_net/blob/8573113d044ec6e3c20b7acd6977404613c10a8f/dense_pdd_net_v01.py#L128-L143
|
import torch
import torch.nn as nn
import torch.optim as optim
import time
import torch.nn.functional as F
import nibabel as nib
import numpy as np
import pydicom
from torch.utils.checkpoint import checkpoint
fold = 1
list_train = torch.Tensor([1,4,5,6,9,10]).long()
if(fold==2):
list_train = torch.Tensor([2,3,5,6,7,8,9]).long()
if(fold==3):
list_train = torch.Tensor([1,2,3,4,7,8,10]).long()
B = 10
H = 233; W = 168; D = 286;
imgs = torch.zeros(B,1,H,W,D)
segs = torch.zeros(B,H,W,D).long()
label_select = torch.Tensor([0,1,2,3,4,5,6,7,0,0,8,9]).long()
for i in range(B):
imgs[i,0,:,:,:] = torch.from_numpy(nib.load('/share/data_zoe2/heinrich/DatenPMBV/img'+str(i+1)+'v2.nii.gz').get_data())/500.0
segs[i,:,:,:] = label_select[torch.from_numpy(nib.load('/share/data_zoe2/heinrich/DatenPMBV/seg'+str(i+1)+'v2.nii.gz').get_data()).long()]
def dice_coeff(outputs, labels, max_label):
dice = torch.FloatTensor(max_label-1).fill_(0)
for label_num in range(1, max_label):
iflat = (outputs==label_num).view(-1).float()
tflat = (labels==label_num).view(-1).float()
intersection = torch.mean(iflat * tflat)
dice[label_num-1] = (2. * intersection) / (1e-8 + torch.mean(iflat) + torch.mean(tflat))
return dice
print(np.unique(segs.view(-1).numpy()))
d0 = dice_coeff(segs[9,:,:,:], segs[4,:,:,:], 10)
print(d0.mean(),d0)
o_m = H//3
o_n = W//3
o_o = D//3
print('numel_o',o_m*o_n*o_o)
ogrid_xyz = F.affine_grid(torch.eye(3,4).unsqueeze(0),(1,1,o_m,o_n,o_o)).view(1,1,-1,1,3).cuda()
def init_weights(m):
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv3d) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
nn.init.xavier_normal(m.weight)
if m.bias is not None:
nn.init.constant(m.bias, 0.0)
def countParameters(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return params
class OBELISK(nn.Module):
def __init__(self):
super(OBELISK, self).__init__()
channels = 16
self.offsets = nn.Parameter(torch.randn(2,channels*2,3)*0.05)
self.layer0 = nn.Conv3d(1, 4, 5, stride=2, bias=False, padding=2)
self.batch0 = nn.BatchNorm3d(4)
self.layer1 = nn.Conv3d(channels*8, channels*4, 1, bias=False, groups=1)
self.batch1 = nn.BatchNorm3d(channels*4)
self.layer2 = nn.Conv3d(channels*4, channels*4, 3, bias=False, padding=1)
self.batch2 = nn.BatchNorm3d(channels*4)
self.layer3 = nn.Conv3d(channels*4, channels*1, 1)
def forward(self, input_img):
img_in = F.avg_pool3d(input_img,3,padding=1,stride=2)
img_in = F.relu(self.batch0(self.layer0(img_in)))
sampled = F.grid_sample(img_in,ogrid_xyz + self.offsets[0,:,:].view(1,-1,1,1,3)).view(1,-1,o_m,o_n,o_o)
sampled -= F.grid_sample(img_in,ogrid_xyz + self.offsets[1,:,:].view(1,-1,1,1,3)).view(1,-1,o_m,o_n,o_o)
x = F.relu(self.batch1(self.layer1(sampled)))
x = F.relu(self.batch2(self.layer2(x)))
features = self.layer3(x)
return features
disp_range = 0.4
displacement_width = 15
shift_xyz = F.affine_grid(disp_range*torch.eye(3,4).unsqueeze(0),(1,1,displacement_width,displacement_width,displacement_width)).view(1,1,-1,1,3).cuda()
grid_size = 32
grid_xyz = F.affine_grid(torch.eye(3,4).unsqueeze(0),(1,1,grid_size,grid_size,grid_size)).view(1,-1,1,1,3).cuda()
|
Apache License 2.0
|
ergo/ziggurat_foundations
|
ziggurat_foundations/models/services/user.py
|
UserService.by_user_name
|
python
|
def by_user_name(cls, user_name, db_session=None):
db_session = get_db_session(db_session)
query = db_session.query(cls.model)
query = query.filter(
sa.func.lower(cls.model.user_name) == (user_name or "").lower()
)
query = query.options(sa.orm.eagerload("groups"))
return query.first()
|
fetch user by user name
:param user_name:
:param db_session:
:return:
|
https://github.com/ergo/ziggurat_foundations/blob/613adf1b6022e9b5401ef7de9f5a066c88cfb6e8/ziggurat_foundations/models/services/user.py#L316-L330
|
from __future__ import unicode_literals
import hashlib
import six
import sqlalchemy as sa
from ziggurat_foundations.models.base import get_db_session
from ziggurat_foundations.models.services import BaseService
from ziggurat_foundations.permissions import (
ALL_PERMISSIONS,
ANY_PERMISSION,
PermissionTuple,
resource_permissions_for_users,
)
from ziggurat_foundations.utils import generate_random_string
__all__ = ["UserService"]
class UserService(BaseService):
@classmethod
def get(cls, user_id, db_session=None):
db_session = get_db_session(db_session)
return db_session.query(cls.model).get(user_id)
@classmethod
def permissions(cls, instance, db_session=None):
db_session = get_db_session(db_session, instance)
query = db_session.query(
cls.models_proxy.GroupPermission.group_id.label("owner_id"),
cls.models_proxy.GroupPermission.perm_name.label("perm_name"),
sa.literal("group").label("type"),
)
query = query.filter(
cls.models_proxy.GroupPermission.group_id
== cls.models_proxy.UserGroup.group_id
)
query = query.filter(
cls.models_proxy.User.id == cls.models_proxy.UserGroup.user_id
)
query = query.filter(cls.models_proxy.User.id == instance.id)
query2 = db_session.query(
cls.models_proxy.UserPermission.user_id.label("owner_id"),
cls.models_proxy.UserPermission.perm_name.label("perm_name"),
sa.literal("user").label("type"),
)
query2 = query2.filter(cls.models_proxy.UserPermission.user_id == instance.id)
query = query.union(query2)
groups_dict = dict([(g.id, g) for g in instance.groups])
return [
PermissionTuple(
instance,
row.perm_name,
row.type,
groups_dict.get(row.owner_id) if row.type == "group" else None,
None,
False,
True,
)
for row in query
]
@classmethod
def resources_with_perms(
cls, instance, perms, resource_ids=None, resource_types=None, db_session=None
):
db_session = get_db_session(db_session, instance)
query = db_session.query(cls.models_proxy.Resource).distinct()
group_ids = [gr.id for gr in instance.groups]
if group_ids:
join_conditions = (
cls.models_proxy.GroupResourcePermission.group_id.in_(group_ids),
cls.models_proxy.Resource.resource_id
== cls.models_proxy.GroupResourcePermission.resource_id,
cls.models_proxy.GroupResourcePermission.perm_name.in_(perms),
)
query = query.outerjoin(
(cls.models_proxy.GroupResourcePermission, sa.and_(*join_conditions))
)
query = query.filter(
sa.or_(
cls.models_proxy.Resource.owner_user_id == instance.id,
cls.models_proxy.Resource.owner_group_id.in_(group_ids),
cls.models_proxy.GroupResourcePermission.perm_name != None,
)
)
else:
query = query.filter(cls.models_proxy.Resource.owner_user_id == instance.id)
query2 = db_session.query(cls.models_proxy.Resource).distinct()
query2 = query2.filter(
cls.models_proxy.UserResourcePermission.user_id == instance.id
)
query2 = query2.filter(
cls.models_proxy.Resource.resource_id
== cls.models_proxy.UserResourcePermission.resource_id
)
query2 = query2.filter(
cls.models_proxy.UserResourcePermission.perm_name.in_(perms)
)
if resource_ids:
query = query.filter(
cls.models_proxy.Resource.resource_id.in_(resource_ids)
)
query2 = query2.filter(
cls.models_proxy.Resource.resource_id.in_(resource_ids)
)
if resource_types:
query = query.filter(
cls.models_proxy.Resource.resource_type.in_(resource_types)
)
query2 = query2.filter(
cls.models_proxy.Resource.resource_type.in_(resource_types)
)
query = query.union(query2)
query = query.order_by(cls.models_proxy.Resource.resource_name)
return query
@classmethod
def groups_with_resources(cls, instance):
return instance.groups_dynamic.options(
sa.orm.eagerload(cls.models_proxy.Group.resources)
)
@classmethod
def resources_with_possible_perms(
cls, instance, resource_ids=None, resource_types=None, db_session=None
):
perms = resource_permissions_for_users(
cls.models_proxy,
ANY_PERMISSION,
resource_ids=resource_ids,
resource_types=resource_types,
user_ids=[instance.id],
db_session=db_session,
)
for resource in instance.resources:
perms.append(
PermissionTuple(
instance, ALL_PERMISSIONS, "user", None, resource, True, True
)
)
for group in cls.groups_with_resources(instance):
for resource in group.resources:
perms.append(
PermissionTuple(
instance, ALL_PERMISSIONS, "group", group, resource, True, True
)
)
return perms
@classmethod
def gravatar_url(cls, instance, default="mm", **kwargs):
hash = hashlib.md5(instance.email.encode("utf8").lower()).hexdigest()
if "d" not in kwargs:
kwargs["d"] = default
params = "&".join(
[
six.moves.urllib.parse.urlencode({key: value})
for key, value in kwargs.items()
]
)
return "https://secure.gravatar.com/avatar/{}?{}".format(hash, params)
@classmethod
def set_password(cls, instance, raw_password):
hash_callable = getattr(
instance.passwordmanager, "hash", instance.passwordmanager.encrypt
)
password = hash_callable(raw_password)
if six.PY2:
instance.user_password = password.decode("utf8")
else:
instance.user_password = password
cls.regenerate_security_code(instance)
@classmethod
def check_password(cls, instance, raw_password, enable_hash_migration=True):
verified, replacement_hash = instance.passwordmanager.verify_and_update(
raw_password, instance.user_password
)
if enable_hash_migration and replacement_hash:
if six.PY2:
instance.user_password = replacement_hash.decode("utf8")
else:
instance.user_password = replacement_hash
return verified
@classmethod
def generate_random_pass(cls, chars=7):
return cls.generate_random_string(chars)
@classmethod
def regenerate_security_code(cls, instance):
instance.security_code = cls.generate_random_string(64)
@staticmethod
def generate_random_string(chars=7):
return generate_random_string(chars)
@classmethod
def by_id(cls, user_id, db_session=None):
db_session = get_db_session(db_session)
query = db_session.query(cls.model)
query = query.filter(cls.model.id == user_id)
query = query.options(sa.orm.eagerload("groups"))
return query.first()
@classmethod
|
BSD 3-Clause New or Revised License
|
wolph/python-progressbar
|
progressbar/utils.py
|
len_color
|
python
|
def len_color(value):
return len(no_color(value))
|
Return the length of `value` without ANSI escape codes
>>> len_color(b'\u001b[1234]abc')
3
>>> len_color(u'\u001b[1234]abc')
3
>>> len_color('\u001b[1234]abc')
3
|
https://github.com/wolph/python-progressbar/blob/8eb963c6cc97949bc7ac3fc57e645506a2c9ae0c/progressbar/utils.py#L156-L167
|
from __future__ import absolute_import
import atexit
import io
import os
import re
import sys
import logging
import datetime
from python_utils.time import timedelta_to_seconds, epoch, format_time
from python_utils.converters import scale_1024
from python_utils.terminal import get_terminal_size
import six
assert timedelta_to_seconds
assert get_terminal_size
assert format_time
assert scale_1024
assert epoch
ANSI_TERMS = (
'([xe]|bv)term',
'(sco)?ansi',
'cygwin',
'konsole',
'linux',
'rxvt',
'screen',
'tmux',
'vt(10[02]|220|320)',
)
ANSI_TERM_RE = re.compile('^({})'.format('|'.join(ANSI_TERMS)), re.IGNORECASE)
def is_ansi_terminal(fd, is_terminal=None):
if is_terminal is None:
if 'JPY_PARENT_PID' in os.environ:
is_terminal = True
elif os.environ.get('PYCHARM_HOSTED') == '1':
is_terminal = True
if is_terminal is None:
try:
is_tty = fd.isatty()
if is_tty and ANSI_TERM_RE.match(os.environ.get('TERM', '')):
is_terminal = True
elif 'ANSICON' in os.environ:
is_terminal = True
else:
is_terminal = None
except Exception:
is_terminal = False
return is_terminal
def is_terminal(fd, is_terminal=None):
if is_terminal is None:
is_terminal = is_ansi_terminal(True) or None
if is_terminal is None:
is_terminal = env_flag('PROGRESSBAR_IS_TERMINAL', None)
if is_terminal is None:
try:
is_terminal = fd.isatty()
except Exception:
is_terminal = False
return is_terminal
def deltas_to_seconds(*deltas, **kwargs):
default = kwargs.pop('default', ValueError)
assert not kwargs, 'Only the `default` keyword argument is supported'
for delta in deltas:
if delta is None:
continue
if isinstance(delta, datetime.timedelta):
return timedelta_to_seconds(delta)
elif not isinstance(delta, float):
return float(delta)
else:
return delta
if default is ValueError:
raise ValueError('No valid deltas passed to `deltas_to_seconds`')
else:
return default
def no_color(value):
if isinstance(value, bytes):
pattern = '\\\u001b\\[.*?[@-~]'
pattern = pattern.encode()
replace = b''
assert isinstance(pattern, bytes)
else:
pattern = u'\x1b\\[.*?[@-~]'
replace = ''
return re.sub(pattern, replace, value)
|
BSD 3-Clause New or Revised License
|
011235813/cm3
|
alg/alg_qmix_checkers.py
|
Alg.process_actions
|
python
|
def process_actions(self, n_steps, actions):
actions_1hot = np.zeros([n_steps, self.n_agents, self.l_action], dtype=int)
grid = np.indices((n_steps, self.n_agents))
actions_1hot[grid[0], grid[1], actions] = 1
list_to_interleave = []
for n in range(self.n_agents):
list_to_interleave.append( actions_1hot[:, np.arange(self.n_agents)!=n, :] )
actions_others_1hot = np.zeros([self.n_agents*n_steps, self.n_agents-1, self.l_action])
for n in range(self.n_agents):
actions_others_1hot[n::self.n_agents, :, :] = list_to_interleave[n]
actions_1hot.shape = (n_steps*self.n_agents, self.l_action)
return actions_1hot, actions_others_1hot
|
actions must have shape [time, agents],
and values are action indices
|
https://github.com/011235813/cm3/blob/79d091453d73768ee4fdca0b15448b9078adb573/alg/alg_qmix_checkers.py#L204-L232
|
import numpy as np
import tensorflow as tf
import sys
import networks
class Alg(object):
def __init__(self, experiment, dimensions, stage=1, n_agents=1,
tau=0.01, lr_Q=0.001, gamma=0.99, nn={}):
self.experiment = experiment
if self.experiment == "checkers":
self.rows_state = dimensions['rows_state']
self.columns_state = dimensions['columns_state']
self.channels_state = dimensions['channels_state']
self.l_state = n_agents * dimensions['l_state_one']
self.l_state_one_agent = dimensions['l_state_one']
self.l_state_other_agents = (n_agents-1) * dimensions['l_state_one']
self.l_obs_others = dimensions['l_obs_others']
self.l_obs_self = dimensions['l_obs_self']
self.rows_obs = dimensions['rows_obs']
self.columns_obs = dimensions['columns_obs']
self.channels_obs = dimensions['channels_obs']
self.l_action = dimensions['l_action']
self.l_goal = dimensions['l_goal']
self.n_agents = n_agents
self.tau = tau
self.lr_Q = lr_Q
self.gamma = gamma
self.nn = nn
self.agent_labels = np.eye(self.n_agents)
self.actions = np.eye(self.l_action)
self.create_networks(stage)
self.list_initialize_target_ops, self.list_update_target_ops = self.get_assign_target_ops(tf.trainable_variables())
self.create_train_op()
def create_networks(self, stage):
self.state_env = tf.placeholder(tf.float32, [None, self.rows_state, self.columns_state, self.channels_state], 'state_env')
self.v_state = tf.placeholder(tf.float32, [None, self.l_state], 'v_state')
self.v_goal_all = tf.placeholder(tf.float32, [None, self.n_agents*self.l_goal], 'v_goal_all')
self.v_state_one_agent = tf.placeholder(tf.float32, [None, self.l_state_one_agent], 'v_state_one_agent')
self.v_state_other_agents = tf.placeholder(tf.float32, [None, self.l_state_other_agents], 'v_state_other_agents')
self.v_goal = tf.placeholder(tf.float32, [None, self.l_goal], 'v_goal')
self.v_goal_others = tf.placeholder(tf.float32, [None, (self.n_agents-1)*self.l_goal], 'v_goal_others')
self.v_labels = tf.placeholder(tf.float32, [None, self.n_agents])
self.action_others = tf.placeholder(tf.float32, [None, self.n_agents-1, self.l_action], 'action_others')
if self.experiment == "checkers":
self.obs_self_t = tf.placeholder(tf.float32, [None, self.rows_obs, self.columns_obs, self.channels_obs], 'obs_self_t')
self.obs_self_v = tf.placeholder(tf.float32, [None, self.l_obs_self], 'obs_self_v')
self.obs_others = tf.placeholder(tf.float32, [None, self.l_obs_others], 'obs_others')
self.actions_prev = tf.placeholder(tf.float32, [None, self.l_action], 'action_prev')
with tf.variable_scope("Agent_main"):
if self.experiment == 'checkers':
self.agent_qs = networks.Qmix_single_checkers(self.actions_prev, self.obs_self_t, self.obs_self_v, self.obs_others, self.v_goal, f1=self.nn['A_conv_f'], k1=self.nn['A_conv_k'], n_h1=self.nn['A_n_h1'], n_h2=self.nn['A_n_h2'], n_actions=self.l_action)
with tf.variable_scope("Agent_target"):
if self.experiment == 'checkers':
self.agent_qs_target = networks.Qmix_single_checkers(self.actions_prev, self.obs_self_t, self.obs_self_v, self.obs_others, self.v_goal, f1=self.nn['A_conv_f'], k1=self.nn['A_conv_k'], n_h1=self.nn['A_n_h1'], n_h2=self.nn['A_n_h2'], n_actions=self.l_action)
self.argmax_Q = tf.argmax(self.agent_qs, axis=1)
self.argmax_Q_target = tf.argmax(self.agent_qs_target, axis=1)
self.actions_1hot = tf.placeholder(tf.float32, [None, self.l_action], 'actions_1hot')
self.q_selected = tf.reduce_sum(tf.multiply(self.agent_qs, self.actions_1hot), axis=1)
self.mixer_q_input = tf.reshape( self.q_selected, [-1, self.n_agents] )
self.q_target_selected = tf.reduce_sum(tf.multiply(self.agent_qs_target, self.actions_1hot), axis=1)
self.mixer_target_q_input = tf.reshape( self.q_target_selected, [-1, self.n_agents] )
with tf.variable_scope("Mixer_main"):
self.mixer = networks.Qmix_mixer_checkers(self.mixer_q_input, self.state_env, self.v_state, self.v_goal_all, self.l_state, self.l_goal, self.n_agents, f1=self.nn['Q_conv_f'], k1=self.nn['Q_conv_k'])
with tf.variable_scope("Mixer_target"):
self.mixer_target = networks.Qmix_mixer_checkers(self.mixer_q_input, self.state_env, self.v_state, self.v_goal_all, self.l_state, self.l_goal, self.n_agents, f1=self.nn['Q_conv_f'], k1=self.nn['Q_conv_k'])
def get_assign_target_ops(self, list_vars):
list_initial_ops = []
list_update_ops = []
list_Agent_main = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Agent_main')
map_name_Agent_main = {v.name.split('main')[1] : v for v in list_Agent_main}
list_Agent_target = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Agent_target')
map_name_Agent_target = {v.name.split('target')[1] : v for v in list_Agent_target}
if len(list_Agent_main) != len(list_Agent_target):
raise ValueError("get_initialize_target_ops : lengths of Agent_main and Agent_target do not match")
for name, var in map_name_Agent_main.items():
list_initial_ops.append( map_name_Agent_target[name].assign(var) )
for name, var in map_name_Agent_main.items():
list_update_ops.append( map_name_Agent_target[name].assign( self.tau*var + (1-self.tau)*map_name_Agent_target[name] ) )
list_Mixer_main = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Mixer_main')
map_name_Mixer_main = {v.name.split('main')[1] : v for v in list_Mixer_main}
list_Mixer_target = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Mixer_target')
map_name_Mixer_target = {v.name.split('target')[1] : v for v in list_Mixer_target}
if len(list_Mixer_main) != len(list_Mixer_target):
raise ValueError("get_initialize_target_ops : lengths of Mixer_main and Mixer_target do not match")
for name, var in map_name_Mixer_main.items():
list_initial_ops.append( map_name_Mixer_target[name].assign(var) )
for name, var in map_name_Mixer_main.items():
list_update_ops.append( map_name_Mixer_target[name].assign( self.tau*var + (1-self.tau)*map_name_Mixer_target[name] ) )
return list_initial_ops, list_update_ops
def run_actor(self, actions_prev, obs_others, obs_self_t, obs_self_v, goals, epsilon, sess):
obs_others = np.array(obs_others)
obs_self_t = np.array(obs_self_t)
obs_self_v = np.array(obs_self_v)
actions_prev_1hot = np.zeros([self.n_agents, self.l_action])
actions_prev_1hot[np.arange(self.n_agents), actions_prev] = 1
feed = {self.obs_others:obs_others, self.obs_self_t:obs_self_t,
self.obs_self_v:obs_self_v, self.v_goal:goals,
self.actions_prev: actions_prev_1hot}
actions_argmax = sess.run(self.argmax_Q, feed_dict=feed)
actions = np.zeros(self.n_agents, dtype=int)
for idx in range(self.n_agents):
if np.random.rand(1) < epsilon:
actions[idx] = np.random.randint(0, self.l_action)
else:
actions[idx] = actions_argmax[idx]
return actions
def create_train_op(self):
self.td_target = tf.placeholder(tf.float32, [None], 'td_target')
self.loss_mixer = tf.reduce_mean(tf.square(self.td_target - tf.squeeze(self.mixer)))
self.mixer_opt = tf.train.AdamOptimizer(self.lr_Q)
self.mixer_op = self.mixer_opt.minimize(self.loss_mixer)
def create_summary(self):
summaries_mixer = [tf.summary.scalar('loss_mixer', self.loss_mixer)]
mixer_main_variables = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Mixer_main')]
for v in mixer_main_variables:
summaries_Q.append(tf.summary.histogram(v.op.name, v))
grads = self.Q_opt.compute_gradients(self.loss_mixer, mixer_main_variables)
for grad, var in grads:
if grad is not None:
summaries_Q.append( tf.summary.histogram(var.op.name+'/gradient', grad) )
self.summary_op_Q = tf.summary.merge(summaries_mixer)
|
MIT License
|
pysmt/pysmt
|
pysmt/solvers/interpolation.py
|
Interpolator.sequence_interpolant
|
python
|
def sequence_interpolant(self, formulas):
raise NotImplementedError
|
Returns a sequence interpolant for the conjunction of formulas, or
None if the problem is satisfiable.
|
https://github.com/pysmt/pysmt/blob/ade4dc2a825727615033a96d31c71e9f53ce4764/pysmt/solvers/interpolation.py#L31-L36
|
class Interpolator(object):
def __init__(self):
self._destroyed = False
def binary_interpolant(self, a, b):
raise NotImplementedError
|
Apache License 2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.