repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
andresperezlopez/pysofaconventions
pysofaconventions/SOFANcFile.py
SOFANetCDFFile.getVariableAttributeFromName
python
def getVariableAttributeFromName(self, varName, attrName): varInstance = self.getVariableInstance(varName) return self.getVariableAttributeFromInstance(varInstance,attrName)
Get the value of a variable attribute :param varInstance: a variable name :param attrName: the name of the queried attribute :return: the value of the attribute, or None if not found
https://github.com/andresperezlopez/pysofaconventions/blob/5461d574293a8521f99aca99302fbfe41950197e/pysofaconventions/SOFANcFile.py#L150-L159
import netCDF4 from .SOFAError import SOFAError class SOFANetCDFFile(object): def __init__(self,path,mode): self.file = netCDF4.Dataset(path,mode) self.filename = path def close(self): self.file.close() def getGlobalAttributesAsDict(self): return self.file.__dict__ def getGlobalAttributeValue(self,attr): try: return getattr(self.file, attr) except AttributeError: raise SOFAError('Attribute not found: '+attr) def getDimensionsAsDict(self): return self.file.dimensions def getDimension(self,dimName): try: return self.getDimensionsAsDict()[dimName] except KeyError: raise SOFAError("Dimension not found: "+dimName) def getDimensionSize(self,dimName): try: return self.getDimensionsAsDict()[dimName].size except KeyError: raise SOFAError("Dimension not found: "+dimName) def getVariablesAsDict(self): return self.file.variables def getVariableInstance(self,varName): try: return self.file.variables[varName] except KeyError: raise SOFAError("Variable not found: "+varName) def getVariableShape(self,varName): try: return self.file.variables[varName].shape except KeyError: raise SOFAError("Variable not found: " + varName) def getVariableValues(self,varName): try: var = self.getVariableInstance(varName) except SOFAError: raise SOFAError('Variable not found: ' + varName ) return var[:]
BSD 3-Clause New or Revised License
perslev/u-time
utime/dataset/sleep_study/sleep_study.py
SleepStudy._load
python
def _load(self): self._psg, header = self._load_with_any_in(self._try_channels) self._set_loaded_channels(header['channel_names']) self._set_header_fields(header) if self.hyp_file_path is not None and not self.no_hypnogram: self._hypnogram, self.annotation_dict = load_hypnogram(self.hyp_file_path, period_length_sec=self.period_length_sec, annotation_dict=self.annotation_dict, sample_rate=header["sample_rate"]) else: self._hypnogram = False if self.strip_func: self._psg, self._hypnogram = apply_strip_func(self, self.org_sample_rate) elif self.hypnogram and not assert_equal_length(self.psg, self.hypnogram, self.org_sample_rate): self.raise_err(RuntimeError, "PSG and hypnogram are not equally " "long in seconds. Consider setting a " "strip_function. " "See utime.preprocessing.strip_funcs.") if self.quality_control_func: self._psg = apply_quality_control_func(self, self.org_sample_rate, not bool(self.times_loaded)) if self.org_sample_rate != self.sample_rate: self._psg = set_psg_sample_rate(self._psg, new_sample_rate=self.sample_rate, old_sample_rate=self.org_sample_rate) if self.scaler: self._psg, self._scaler_obj = apply_scaling(self.psg, self.scaler) if self.hypnogram: self._class_to_period_dict = create_class_int_to_period_idx_dict( self.hypnogram ) self._psg = self._psg.astype(np.float32) self.times_loaded += 1
Loads data from the PSG and HYP files -- If self.select_channels is set (aka non empty list), only the column names matching this list will be kept. -- PSG data is kept as a numpy array. Use self.select_channels to map between names and the numpy array -- If self.scaler is set, the PSG array will be scaled according to the specified sklearn.preprocessing scaler -- If self.hyp_strip_func is set, this function will be applied to the hypnogram object.
https://github.com/perslev/u-time/blob/f7c8e3f1368f43226872a69b0fbb8c29990e4bd9/utime/dataset/sleep_study/sleep_study.py#L327-L386
import numpy as np from utime import errors from utime.io.high_level_file_loaders import load_psg, load_hypnogram from utime.preprocessing import (apply_scaling, strip_funcs, apply_strip_func, assert_scaler, set_psg_sample_rate, quality_control_funcs, assert_equal_length, apply_quality_control_func) from utime.hypnogram.utils import create_class_int_to_period_idx_dict from utime.dataset.sleep_study.subject_dir_sleep_study_base import SubjectDirSleepStudyBase def assert_header_fields(header): check = (('sample_rate', False), ('channel_names', False), ('date', True)) for c, replace_with_none in check: if c not in header: if replace_with_none: header[c] = None else: raise ValueError("Invalid header file loaded, did not find " "attribute {} in header {}".format(c, header)) class SleepStudy(SubjectDirSleepStudyBase): def __init__(self, subject_dir, psg_regex=None, hyp_regex=None, header_regex=None, period_length_sec=None, no_hypnogram=None, annotation_dict=None, load=False, logger=None): super(SleepStudy, self).__init__( subject_dir=subject_dir, psg_regex=psg_regex, hyp_regex=hyp_regex, header_regex=header_regex, period_length_sec=period_length_sec, no_hypnogram=no_hypnogram, annotation_dict=annotation_dict, logger=logger ) self._scaler = None self._scaler_obj = None self._load_time_random_channel_selector = None self._strip_func = None self._quality_control_func = None self._class_to_period_dict = None self._sample_rate = None self._date = None self._org_sample_rate = None self._none_on_unload = ( '_psg', '_date', '_org_sample_rate', '_hypnogram', '_scaler_obj', '_class_to_period_dict' ) self.times_loaded = 0 if load: self.load() def __str__(self): if self.loaded: t = (self.identifier, len(self.select_channels), self.date, self.sample_rate, self.hypnogram is not False) return "SleepStudy(loaded=True, identifier={:s}, N channels: " "{}, date: {}, sample_rate={:.1f}, hypnogram={})".format(*t) else: return repr(self) def __repr__(self): return "SleepStudy(loaded={}, identifier={})".format(self.loaded, self.identifier) @property def class_to_period_dict(self): return self._class_to_period_dict @property def load_time_random_channel_selector(self): return self._load_time_random_channel_selector @load_time_random_channel_selector.setter def load_time_random_channel_selector(self, channel_selector): if channel_selector and self.select_channels: raise RuntimeError("Setting the 'load_time_random_channel_selector' " "attribute is not possible with set values in " "'select_channels'") from utime.io.channels import RandomChannelSelector if channel_selector is not None and not isinstance(channel_selector, RandomChannelSelector): raise TypeError("Expected 'channel_selector' argument to be of " "type {}, got {}".format(type(RandomChannelSelector), type(channel_selector))) self._load_time_random_channel_selector = channel_selector @property def sample_rate(self): return self._sample_rate @sample_rate.setter def sample_rate(self, sample_rate): sample_rate = int(sample_rate) if sample_rate <= 0: raise ValueError("Sample rate must be a positive integer, " "got {}".format(sample_rate)) self._sample_rate = sample_rate if self.loaded: self.reload(warning=True) @property def org_sample_rate(self): return self._org_sample_rate @property def date(self): return self._date @property def scaler(self): return self._scaler @scaler.setter def scaler(self, scaler): if not assert_scaler(scaler): raise ValueError("Invalid scaler, does not exist {}".format(scaler)) self._scaler = scaler if self.loaded: self.reload(warning=True) @property def scaler_obj(self): return self._scaler_obj @property def strip_func(self): return self._strip_func def set_strip_func(self, strip_func_str, **kwargs): if strip_func_str not in strip_funcs.__dict__: self.raise_err(ValueError, "Invalid strip function " "{}".format(strip_func_str)) self._strip_func = (strip_func_str, kwargs) if self.loaded: self.reload(warning=True) @property def quality_control_func(self): return self._quality_control_func def set_quality_control_func(self, quality_control_func, **kwargs): if quality_control_func not in quality_control_funcs.__dict__: self.raise_err(ValueError, "Invalid quality control function " "{}".format(quality_control_func)) self._quality_control_func = (quality_control_func, kwargs) if self.loaded: self.reload(warning=True) @property def loaded(self): return not any((self.psg is None, self.hypnogram is None)) def _load_with_any_in(self, channel_sets): for i, channel_set in enumerate(channel_sets): try: if self.load_time_random_channel_selector: channel_set = None temp = self.load_time_random_channel_selector psg, header = load_psg(psg_file_path=self.psg_file_path, load_channels=channel_set or None, load_time_channel_selector=temp, header_file_path=self.header_file_path) return psg, header except errors.ChannelNotFoundError as e: if i < len(channel_sets) - 1: continue else: s, sa = self.select_channels, self.alternative_select_channels err = errors.ChannelNotFoundError("Could not load " "select_channels {} or " "alternative_select_" "channels " "{}".format(s, sa)) raise err from e
MIT License
azure/autorest.az
test/scenarios/mixed-reality/output/ext_Incremental/src/mixed-reality/azext_mixed_reality/vendored_sdks/mixedreality/operations/spatial_anchors_accounts_operations.py
SpatialAnchorsAccountsOperations.delete
python
def delete( self, resource_group_name, spatial_anchors_account_name, custom_headers=None, raw=False, **operation_config): url = self.delete.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), 'spatialAnchorsAccountName': self._serialize.url("spatial_anchors_account_name", spatial_anchors_account_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$') } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') header_parameters = {} if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') request = self._client.delete(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 204]: raise models.ErrorResponseException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response
Delete a Spatial Anchors Account. :param resource_group_name: Name of an Azure resource group. :type resource_group_name: str :param spatial_anchors_account_name: Name of an Mixed Reality Spatial Anchors Account. :type spatial_anchors_account_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: None or ClientRawResponse if raw=true :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.mgmt.mixedreality.models.ErrorResponseException>`
https://github.com/azure/autorest.az/blob/b000db70f608c64918d04a0e0f5b50bb5468baa0/test/scenarios/mixed-reality/output/ext_Incremental/src/mixed-reality/azext_mixed_reality/vendored_sdks/mixedreality/operations/spatial_anchors_accounts_operations.py#L168-L218
import uuid from msrest.pipeline import ClientRawResponse from .. import models class SpatialAnchorsAccountsOperations(object): models = models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.api_version = "2019-02-28-preview" self.config = config def list_by_subscription( self, custom_headers=None, raw=False, **operation_config): def internal_paging(next_link=None, raw=False): if not next_link: url = self.list_by_subscription.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) return response deserialized = models.SpatialAnchorsAccountPaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.SpatialAnchorsAccountPaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.MixedReality/spatialAnchorsAccounts'} def list_by_resource_group( self, resource_group_name, custom_headers=None, raw=False, **operation_config): def internal_paging(next_link=None, raw=False): if not next_link: url = self.list_by_resource_group.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$') } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) return response deserialized = models.SpatialAnchorsAccountPaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.SpatialAnchorsAccountPaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MixedReality/spatialAnchorsAccounts'}
MIT License
masoniteframework/orm
src/masoniteorm/query/grammars/BaseGrammar.py
BaseGrammar.process_group_by
python
def process_group_by(self): sql = "" columns = [] for group_by in self._group_by: if group_by.raw: if group_by.bindings: self.add_binding(*group_by.bindings) sql += "GROUP BY " + group_by.column return sql else: columns.append(self._table_column_string(group_by.column)) if columns: sql += " GROUP BY {column}".format(column=", ".join(columns)) return sql
Compiles a group by for a query expression. Returns: self
https://github.com/masoniteframework/orm/blob/29b331068ddad9f17584c3dd48917043eba915ab/src/masoniteorm/query/grammars/BaseGrammar.py#L399-L421
import re from ...expressions.expressions import ( SubGroupExpression, SubSelectExpression, SelectExpression, BetweenExpression, JoinClause, OnClause, ) class BaseGrammar: table = "users" def __init__( self, columns=(), table="users", database=None, wheres=(), limit=False, offset=False, updates=None, aggregates=(), order_by=(), group_by=(), joins=(), lock=False, having=(), connection_details=None, ): self._columns = columns self.table = table self.database = database self._wheres = wheres self._limit = limit self._offset = offset self._updates = updates or {} self._aggregates = aggregates self._order_by = order_by self._group_by = group_by self._joins = joins self._having = having self.lock = lock self._connection_details = connection_details or {} self._column = None self._bindings = [] self._sql = "" self._sql_qmark = "" self._action = "select" self.queries = [] def compile(self, action, qmark=False): self._action = action return getattr(self, "_compile_" + action)(qmark=qmark) def _compile_select(self, qmark=False): if not self.table: self._sql = ( self.select_no_table() .format( columns=self.process_columns(separator=", ", qmark=qmark), table=self.process_table(self.table), joins=self.process_joins(qmark=qmark), wheres=self.process_wheres(qmark=qmark), limit=self.process_limit(), offset=self.process_offset(), aggregates=self.process_aggregates(), order_by=self.process_order_by(), group_by=self.process_group_by(), having=self.process_having(), lock=self.process_locks(), ) .strip() ) else: self._sql = ( self.select_format() .format( columns=self.process_columns(separator=", ", qmark=qmark), table=self.process_table(self.table), joins=self.process_joins(qmark=qmark), wheres=self.process_wheres(qmark=qmark), limit=self.process_limit(), offset=self.process_offset(), aggregates=self.process_aggregates(), order_by=self.process_order_by(), group_by=self.process_group_by(), having=self.process_having(), lock=self.process_locks(), ) .strip() ) return self def _compile_update(self, qmark=False): self._sql = self.update_format().format( key_equals=self._compile_key_value_equals(qmark=qmark), table=self.process_table(self.table), wheres=self.process_wheres(qmark=qmark), ) return self def _compile_insert(self, qmark=False): self._sql = self.insert_format().format( key_equals=self._compile_key_value_equals(qmark=qmark), table=self.process_table(self.table), columns=self.process_columns(separator=", ", action="insert", qmark=qmark), values=self.process_values(separator=", ", qmark=qmark), ) return self def _compile_bulk_create(self, qmark=False): all_values = [list(x.values()) for x in self._columns] self._sql = self.bulk_insert_format().format( key_equals=self._compile_key_value_equals(qmark=qmark), table=self.process_table(self.table), columns=self.columnize_bulk_columns(list(self._columns[0].keys())), values=self.columnize_bulk_values(all_values, qmark=qmark), ) return self def columnize_bulk_columns(self, columns=[]): return ", ".join( self.column_string().format(column=x, separator="") for x in columns ).rstrip(",") def columnize_bulk_values(self, columns=[], qmark=False): sql = "" for x in columns: inner = "" if isinstance(x, list): for y in x: if qmark: self.add_binding(y) inner += ( "'?', " if qmark else self.value_string().format(value=y, separator=", ") ) inner = inner.rstrip(", ") sql += self.process_value_string().format(value=inner, separator=", ") else: if qmark: self.add_binding(x) sql += ( "'?', " if qmark else self.process_value_string().format( value="?" if qmark else x, separator=", " ) ) return sql.rstrip(", ") def process_value_string(self): return "({value}){separator}" def _compile_delete(self, qmark=False): self._sql = self.delete_format().format( key_equals=self._compile_key_value_equals(qmark=qmark), table=self.process_table(self.table), wheres=self.process_wheres(qmark=qmark), ) return self def _get_multiple_columns(self, columns): if isinstance(columns, list): column_string = "" for col in columns: column_string += self.process_column(col) + ", " return column_string.rstrip(", ") return self.process_column(columns) def process_joins(self, qmark=False): sql = "" for join in self._joins: if isinstance(join, JoinClause): on_string = "" for clause_idx, clause in enumerate(join.get_on_clauses()): keyword = clause.operator.upper() if clause_idx else "ON" if isinstance(clause, OnClause): on_string += f"{keyword} {self._table_column_string(clause.column1)} {clause.equality} {self._table_column_string(clause.column2)} " else: if clause.value_type == "NULL": sql_string = self.where_null_string() on_string += sql_string.format( keyword=keyword, column=self.process_column(clause.column), ) elif clause.value_type == "NOT NULL": sql_string = self.where_not_null_string() on_string += sql_string.format( keyword=keyword, column=self.process_column(clause.column), ) else: if qmark: value = "'?'" self.add_binding(clause.value) else: value = self._compile_value(clause.value) on_string += f"{keyword} {self._table_column_string(clause.column)} {clause.equality} {value} " sql += self.join_string().format( foreign_table=self.process_table(join.table), alias=f" AS {self.process_table(join.alias)}" if join.alias else "", on=on_string, keyword=self.join_keywords[join.clause], ) sql += " " return sql def _compile_key_value_equals(self, qmark=False): sql = "" for update in self._updates: if update.update_type == "increment": sql_string = self.increment_string() elif update.update_type == "decrement": sql_string = self.decrement_string() else: sql_string = self.key_value_string() column = update.column value = update.value if isinstance(column, dict): for key, value in column.items(): if hasattr(value, "expression"): sql += self.column_value_string().format( column=self._table_column_string(key), value=value.expression, separator=", ", ) else: sql += sql_string.format( column=self._table_column_string(key), value=value if not qmark else "?", separator=", ", ) if qmark: self._bindings += (value,) else: sql += sql_string.format( column=self._table_column_string(column), value=value if not qmark else "?", separator=", ", ) if qmark: self._bindings += (value,) sql = sql.rstrip(", ") return sql def process_aggregates(self): sql = "" for aggregates in self._aggregates: aggregate = aggregates.aggregate column = aggregates.column aggregate_function = self.aggregate_options.get(aggregate, "") if not aggregates.alias and column == "*": aggregate_string = self.aggregate_string_without_alias() else: aggregate_string = self.aggregate_string_with_alias() sql += ( aggregate_string.format( aggregate_function=aggregate_function, column="*" if column == "*" else self._table_column_string(column), alias=self.process_alias(aggregates.alias or column), ) + ", " ) return sql def process_order_by(self): sql = "" if self._order_by: order_crit = "" for order_bys in self._order_by: if order_bys.raw: order_crit += order_bys.column if not isinstance(order_bys.bindings, (list, tuple)): raise ValueError( f"Bindings must be tuple or list. Received {type(order_bys.bindings)}" ) if order_bys.bindings: self.add_binding(*order_bys.bindings) continue if len(order_crit): order_crit += ", " column = order_bys.column direction = order_bys.direction if "." in column: column_string = self._table_column_string(column) else: column_string = self.column_string().format( column=column, separator="" ) order_crit += self.order_by_format().format( column=column_string, direction=direction.upper() ) sql += self.order_by_string().format(order_columns=order_crit) return sql
MIT License
aakashkumarnain/augmix_tf2
augmentation.py
int_parameter
python
def int_parameter(level, maxval): return int(level * maxval / 10)
Helper function to scale `val` between 0 and maxval . Args: level: Level of the operation that will be in [0, `PARAMETER_MAX`]. maxval: Maximum value that the operation can have. This will be scaled to level/PARAMETER_MAX. Returns: An int that results from scaling `maxval` according to `level`.
https://github.com/aakashkumarnain/augmix_tf2/blob/dbc3c1ec56798363133afd3b591637f378814586/augmentation.py#L9-L18
import numpy as np from PIL import Image from PIL import ImageOps from config import IMAGE_SIZE
MIT License
jkibele/opticalrs
OpticalRS/Lyzenga1981.py
plot_band_combos
python
def plot_band_combos(sandarr,n_bands,apply_log=True,figsize=(15,15)): if apply_log: logarr = np.log( sandarr[:,:,:n_bands] ) else: logarr = sandarr[:,:,:n_bands] logmax = logarr.max() logmin = logarr.min() fig,axarr = plt.subplots(n_bands-1,n_bands-1,figsize=figsize,sharex=True,sharey=True,frameon=False) for i,j in combinations(range(n_bands),2): if np.ma.is_masked: x,y = logarr[:,:,i].compressed(),logarr[:,:,j].compressed() else: x,y = logarr[:,:,i].ravel(),logarr[:,:,j].ravel() axarr[i,j-1].set_axis_off() ax = axarr[j-1,i] ax.set_axis_on() ax.scatter(x,y,alpha=0.01,c='steelblue',marker='o') ax.set_xlim(logmin,logmax) ax.set_ylim(logmin,logmax) xl = r"band %i" % (i + 1) yl = r"band %i" % (j + 1) ax.set_xlabel(xl) ax.set_ylabel(yl) ax.set_frame_on(False) odrslope,odrinter,odr_res_val = lin_odr(x,y) odrline = lambda x: odrslope*x + odrinter xmm = np.array([x.min(),x.max()]) ax.plot(xmm,odrline(xmm),c='r') plt.tight_layout()
Draw plots sort of similar to Figure 3 of Lyzenga 1981. Parameters ---------- sandarr : numpy.array This is an array image array that contains only pixels from a uniform bottom type (usually sand) and varying depth. Values from this array will be passed to the attenuation_coef_ratio method. n_bands : int The number of bands you want to generate plots for. apply_log : boolean Whether or not to log the `sandarr` values. If you've already log transformed them, this should be ``False``. (Default value = True) figsize : tuple The size of the figure. (Default value = (15,15)) Returns ------- Nothing It just draws a plot. Exactly how the plot is drawn depends on matplotlib settings.
https://github.com/jkibele/opticalrs/blob/20d73aec1cbabfa54e62214ae3179e3ba375dff9/OpticalRS/Lyzenga1981.py#L212-L264
import numpy as np from math import sqrt from fractions import Fraction from decimal import Decimal from itertools import combinations from matplotlib import pyplot as plt from scipy import odr def attenuation_coef_ratio(band_i,band_j,apply_log=True): if np.ma.is_masked(band_i): band_i = band_i.compressed() else: band_i = band_i.ravel() if np.ma.is_masked(band_j): band_j = band_j.compressed() else: band_j = band_j.ravel() if apply_log: band_i = np.log(band_i) band_j = np.log(band_j) cov_mat = np.cov(band_i,band_j) i_var = cov_mat[0,0] j_var = cov_mat[1,1] ij_cov = cov_mat[0,1] a = (i_var - j_var) / ( 2.0 * ij_cov ) att_coef = a + sqrt( a**2 + 1 ) return att_coef def di_index(imarr,sandarr,i,j,apply_log=True): atr = attenuation_coef_ratio(sandarr[:,:,i],sandarr[:,:,j],apply_log=apply_log) fr = Fraction(Decimal(atr)) Ki,Kj = fr.numerator,fr.denominator Bi = imarr[:,:,i] Bj = imarr[:,:,j] DI = ( Kj * np.log(Bi) - Ki * np.log(Bj) ) / sqrt( Ki**2 + Kj**2 ) return DI def di_indexes_bandarr(imarr,sandarr,n_bands,subset_slice=None,pix_band_shaped=False): if subset_slice: imarr = imarr[subset_slice] combos = [cb for cb in combinations( range(n_bands), 2 ) ] n_combos = len( combos ) arrdtype = imarr.dtype if np.ma.is_masked( imarr ): out_arr = np.ma.empty((imarr.shape[0],imarr.shape[1],n_combos),dtype=arrdtype) else: out_arr = np.empty((imarr.shape[0],imarr.shape[1],n_combos),dtype=arrdtype) for ind,bc in enumerate( combos ): i,j = bc di_ind = di_index(imarr,sandarr,i,j) out_arr[:,:,ind] = di_ind if np.ma.is_masked( imarr ): out_arr[...,ind].mask = imarr[...,i].mask | imarr[...,j].mask if pix_band_shaped: out_arr = out_arr.reshape( out_arr.shape[0]*out_arr.shape[1], -1 ) return out_arr, combos def lin_odr(x,y): def lf(B,x): return B[0]*x + B[1] linmod = odr.Model(lf) mydata = odr.RealData(x,y) myodr = odr.ODR(mydata,linmod,beta0=[1.,2.]) myout = myodr.run() slope,intercept = myout.beta return slope, intercept, myout.res_var
BSD 3-Clause New or Revised License
themson/murdock
dockCLIENT.py
egressBust
python
def egressBust(docBuffer): targetIP = '' portList = '' topPortsPath = "../top-ports" minPort = 0 maxPort = 65535 threads = 0 global EGRESSPORT print "\n *** Choose an egress method (TCP only) ***\n" method = 99 while method not in range(1, 6): print "1. Check a X ports by % prevalence in nmap-service file. (X)" print "2. Check a given range of ports. (X-Y)" print "3. Enter comma delimited list of ports. (X,Y,Z,A,F,B,J...)" print "4. Print stored egress port for this session" print "5. No thanks... return to shell" try: method = int(raw_input('Method (1-5): ')) except: method = 99 if method == 1: topPortsCount = 0 try: with open(topPortsPath) as f: topPortsCount = sum(1 for line in f) except: print "ERROR: Top Ports File missing" return print "\nTry top X ports in the NMAP services file." portCount = 0 while portCount not in range (1, topPortsCount + 1): if portCount > topPortsCount: print "\n*** Only %s ports are available. ***" % topPortsCount print "How many ports would you like to check?" try: portCount = int(raw_input('Check: ')) except: portCount = 0 with open(topPortsPath, 'r') as myFile: portList = [myFile.next() for line in xrange(portCount)] portList = ','.join([nl.rstrip() for nl in portList]) elif method == 2: minChoice = -1; maxChoice = 99999 while minChoice < minPort or maxChoice > maxPort or minChoice > maxChoice: if minChoice < minPort or maxChoice > maxPort or minChoice > maxChoice: print "\n*** Out of Bounds: Min=0 Max=65535 ***" print "Scan port range Min - Max?" try: minChoice = int(raw_input('Min Port: ')) maxChoice = int(raw_input('Max Port: ')) except: minChoice = -1 maxChoice = 99999 portList = "%s-%s" % (minChoice, maxChoice) elif method == 3: isValid = False while not isValid: print "\nEnter comma separated port list. (X,Y,Z,A,F,B,J...)" try: portList = raw_input('List: ').strip().split(",") for port in portList: port = int(port) if port < minPort or port > maxPort: print "\n *** Error - Invalid port in range: %s" % port isValid = False break else: isValid = True except Exception, e: print e isValid = False portList = ','.join(list(set(portList))) elif method == 4: if EGRESSPORT: return """ *** Stored Egress Port for Session ***" Port: """ + EGRESSPORT + """ We suggest confirming with egress method #3. """ else: return """ *** No known Egress Ports *** Egress Port clears on session init and !sync """ else: print "\n" return "" while targetIP == '': try: targetIP = raw_input('\nExternal Egress Target IPv4 Address: ') socket.inet_aton(targetIP) except socket.error: print "\n*** Invalid IP Address ***" targetIP = '' while threads < 1: try: threads = int(raw_input('\nWorker Threads (default 10): ')) threads = str(threads) except: print "Default 10 threads being used." threads = '10' isCorrect = '' while isCorrect == '': print "\n*** Confirm Egress Data ***" print "Target IP : %s" % targetIP tmpList = [] if "-" in portList: minPort, maxPort = portList.split("-") tmpList = list(( str(port) for port in range(int(minPort), int(maxPort) + 1) )) else: tmpList = portList.split(",") print "Ports Count : %s" % len(tmpList) print "Thread Count: %s" % threads try: isCorrect = str(raw_input('\nLaunch Check (y/n): ')).lower() if isCorrect.startswith('y'): isCorrect = 'yes' elif isCorrect.startswith('n'): isCorrect = 'no' return "\n*** Egress check cancelled ***\n" else: isCorrect = '' except: isCorrect = '' egressCmd = "!egress|" + portList + "|" + targetIP + "|" + threads logger.debug(egressCmd) print "\n*** Delivering Request to remote host ***" docBuffer.sendData(egressCmd) try: while 1: try: srvResponse = docBuffer.readData() except Exception as e: if str(e) == "READ ERROR: Connection timed out.": logger.debug("executing continue on : " + str(e) ) continue else: logger.debug("returning with error of: " + str(e) ) return str(e) if srvResponse.startswith("<egress>"): egressList = srvResponse.split(".") if egressList[1] == "<started>": print "\n*** Range accepted ***" print "Searching %s ports with %s worker threads." % (egressList[2].strip("<>"), threads) print "This may take a while..." elif egressList[1] == "<failed>": return "\n*** ERROR: Egress Check Failed ***" elif egressList[1] == "<open>": EGRESSPORT = egressList[2].strip("<>") return "\n*** OPEN - Egress port: %s ***\n" % EGRESSPORT elif egressList[1] == "<closed>": return "\n*** CLOSED - All checked ports closed ***\n" except KeyboardInterrupt: print "Interrupt caught.\n" return
Update topPorts file with the following command: sort -r -n -k 3 /<pathto>/nmap-services | grep -i tcp | awk '{print $2}' | cut -d/ -f1 > /<docDoorPath>/top-ports
https://github.com/themson/murdock/blob/ffa44e3a15367d79518f0ee707a7143cccbea27a/dockCLIENT.py#L234-L394
import logging import os import sys import subprocess import shlex import socket import random from time import sleep from hashlib import md5 from docBuffer import docClientBuffer from ast import literal_eval from string import ascii_uppercase, digits DEBUG = False logger = logging.getLogger('__docCLIENT__') if not DEBUG: logger.setLevel(logging.ERROR) else: logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() if not DEBUG: ch.setLevel(logging.ERROR) else: ch.setLevel(logging.NOTSET) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) logger.addHandler(ch) OS = '' EGRESSPORT = '' BLOCKSIZE=2**10 sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) def upload(docBuffer, cmdStr): BLOCKSIZE cmdList = shlex.split(cmdStr) localPath = os.path.expandvars(cmdList[1].replace('~','$HOME')) if OS == 'Windows': remotePath = shlex.split(cmdStr.replace('\\','\\\\'))[2] else: remotePath = cmdList[2] uploadStr = '!upload.' + remotePath.encode('base64','strict').strip('\n') localFileExist = os.path.exists(localPath) if localFileExist: try: open(localPath,"rb") docBuffer.sendData(uploadStr) except IOError: return "ERROR - local: Can not read to local file: " + localPath try: canSend = docBuffer.readData() except Exception as e: return str(e) if canSend == "<OKAYSND>": try: fd = open(localPath,"rb") md5Obj = md5() fileSize = os.path.getsize(localPath) sentSize = 0 data = fd.read(BLOCKSIZE) md5Obj.update(data) docBuffer.sendData("<BOF>." + data.encode('base64','strict') ) sentSize += BLOCKSIZE if sentSize >= fileSize: print "0% .. 100%" else: print "0%% .. %3.2F%% .. " % ( 100 * (sentSize / float(fileSize)) ), while True: data = fd.read(BLOCKSIZE) if not data: fileHash = md5Obj.digest().encode('base64','strict') try: docBuffer.sendData("<EOF>." + fileHash ) except Exception as e: return str(e) print "Send Complete, waiting for remote integrity check." break toWrite = docBuffer.CLIENT_WRITE_COL + str(docBuffer.getToWrite()) currentData = docBuffer.getCellData(toWrite) while(currentData != "<NULL>" and currentData != "<READ>"): print " ... ", sleep(1) currentData = docBuffer.getCellData(toWrite) md5Obj.update(data) docBuffer.sendData(data.encode('base64','strict') ) sentSize += BLOCKSIZE if sentSize >= fileSize: print " 100%" else: print "%3.2F%% .. " % ( 100 * (sentSize / float(fileSize)) ), try: integrityCheck = docBuffer.readData() except Exception as e: return str(e) if integrityCheck == "<OKAYRCV>": return "\nFile transfered successfully, integrity verified." elif integrityCheck == "<OKAYFAIL>": return "ERROR -remote: Remote integrity check failed, deleting remote file." except IOError: return "ERROR - local: can not read file : " + localPath else: return "ERROR - remote: Remote path: " + remotePath + " does not exist or insufficient permissions." elif not localFileExist: return "ERROR - local: Local File: " + localPath + " does not exist." def download(docBuffer, cmdStr): BLOCKSIZE cmdList = shlex.split(cmdStr) if OS == 'Windows': remotePath = shlex.split(cmdStr.replace('\\','\\\\'))[1] else: remotePath = cmdList[1] localPath = os.path.expandvars(cmdList[2].replace('~','$HOME')) tmpFile = localPath + ".tmp" dloadStr = '!download.' + remotePath.encode('base64','strict').strip('\n') canRcv = '' existBefore = os.path.exists(localPath) try: open(localPath,"wb") docBuffer.sendData(dloadStr) except IOError: return "ERROR: Can not write to local file: " + localPath try: canRcv = docBuffer.readData() except Exception as e: return str(e) if canRcv.startswith("<OKAYRCV>"): try: fd = open(tmpFile, "wb") md5Obj = md5() fileSize = int( canRcv.split(".")[1] ) rcvSize = 0 try: rawData = docBuffer.readData() except Exception as e: return str(e) dataList = rawData.split('.') if dataList[0] != "<BOF>": docBuffer.lastReadUpdate( docBuffer.getToRead() ) return "ERROR: download() - Data BOF format error." binData = dataList[1].decode('base64','strict') fd.write(binData) md5Obj.update(binData) rcvSize += BLOCKSIZE if rcvSize >= fileSize: print " 100%" else: print "%3.2F%% .. " % ( 100 * (rcvSize / float(fileSize)) ), while binData != "<EOF>" : try: binData = docBuffer.readData() except Exception as e: return str(e) if binData == "<READ>" or binData == "<NULL>": pass elif binData.startswith("<EOF>"): fd.close() dataList = binData.split(".") binData = dataList[0] fileHash = dataList[1] docBuffer.lastReadUpdate( docBuffer.getToRead() ) if fileHash == md5Obj.digest().encode('base64','strict'): if os.path.exists(localPath): os.remove(localPath) os.rename(tmpFile, localPath) return "\nFile transfered successfully, integrity verified." else: if os.path.exists(tmpFile): os.remove(tmpFile) return "ERROR: Integrity check failed, deleting temp file." else: binData = binData.decode('base64','strict') fd.write(binData) md5Obj.update(binData) rcvSize += BLOCKSIZE if rcvSize >= fileSize: print " 100%" else: print "%3.2F%% .. " % ( 100 * (rcvSize / float(fileSize)) ), except IOError: return "ERROR: Cannot write to file: " + tmpFile else: if not existBefore: os.remove(localPath) return "ERROR: remote path does not exist or insufficient permissions."
MIT License
schemaorg/sdopythonapp
lib/rdflib/plugins/sparql/sparql.py
FrozenBindings.remember
python
def remember(self, these): return FrozenBindings( self.ctx, (x for x in iteritems(self) if x[0] in these))
return a frozen dict only of bindings in these
https://github.com/schemaorg/sdopythonapp/blob/128be97d359178b26e5211a3e758933ff3a7b3df/lib/rdflib/plugins/sparql/sparql.py#L220-L225
from __future__ import absolute_import import collections import itertools import datetime import isodate from six import text_type, iteritems from rdflib.compat import Mapping, MutableMapping from rdflib.namespace import NamespaceManager from rdflib import Variable, BNode, Graph, ConjunctiveGraph, URIRef, Literal from rdflib.term import Node from rdflib.plugins.sparql.parserutils import CompValue import rdflib.plugins.sparql class SPARQLError(Exception): def __init__(self, msg=None): Exception.__init__(self, msg) class NotBoundError(SPARQLError): def __init__(self, msg=None): SPARQLError.__init__(self, msg) class AlreadyBound(SPARQLError): def __init__(self): SPARQLError.__init__(self) class SPARQLTypeError(SPARQLError): def __init__(self, msg): SPARQLError.__init__(self, msg) class Bindings(MutableMapping): def __init__(self, outer=None, d=[]): self._d = dict(d) self.outer = outer def __getitem__(self, key): try: return self._d[key] except KeyError: if not self.outer: raise return self.outer[key] def __contains__(self, key): try: self[key] return True except KeyError: return False def __setitem__(self, key, value): self._d[key] = value def __delitem__(self, key): raise Exception("DelItem is not implemented!") def __len__(self): i = 0 for x in self: i += 1 return i def __iter__(self): d = self while d is not None: for i in dict.__iter__(d._d): yield i d = d.outer def __str__(self): return "Bindings({" + ", ".join((k, self[k]) for k in self) + "})" def __repr__(self): return text_type(self) class FrozenDict(Mapping): def __init__(self, *args, **kwargs): self._d = dict(*args, **kwargs) self._hash = None def __iter__(self): return iter(self._d) def __len__(self): return len(self._d) def __getitem__(self, key): return self._d[key] def __hash__(self): if self._hash is None: self._hash = 0 for key, value in iteritems(self): self._hash ^= hash(key) self._hash ^= hash(value) return self._hash def project(self, vars): return FrozenDict( (x for x in iteritems(self) if x[0] in vars)) def disjointDomain(self, other): return not bool(set(self).intersection(other)) def compatible(self, other): for k in self: try: if self[k] != other[k]: return False except KeyError: pass return True def merge(self, other): res = FrozenDict( itertools.chain(iteritems(self), iteritems(other))) return res def __str__(self): return str(self._d) def __repr__(self): return repr(self._d) class FrozenBindings(FrozenDict): def __init__(self, ctx, *args, **kwargs): FrozenDict.__init__(self, *args, **kwargs) self.ctx = ctx def __getitem__(self, key): if not isinstance(key, Node): key = Variable(key) if not type(key) in (BNode, Variable): return key if key not in self._d: return self.ctx.initBindings[key] else: return self._d[key] def project(self, vars): return FrozenBindings( self.ctx, (x for x in iteritems(self) if x[0] in vars)) def merge(self, other): res = FrozenBindings( self.ctx, itertools.chain(iteritems(self), iteritems(other))) return res def _now(self): return self.ctx.now def _bnodes(self): return self.ctx.bnodes def _prologue(self): return self.ctx.prologue prologue = property(_prologue) bnodes = property(_bnodes) now = property(_now) def forget(self, before, _except=None): if not _except: _except = [] return FrozenBindings( self.ctx, ( x for x in iteritems(self) if ( x[0] in _except or x[0] in self.ctx.initBindings or before[x[0]] is None)))
Apache License 2.0
microsoft/oscar
oscar/utils/cider/pyciderevalcap/cider/cider_scorer.py
CiderScorer.cook_append
python
def cook_append(self, test, refs): if refs is not None: self.crefs.append(cook_refs(refs)) if test is not None: self.ctest.append(cook_test(test)) else: self.ctest.append(None)
called by constructor and __iadd__ to avoid creating new instances.
https://github.com/microsoft/oscar/blob/423997b099650914a37203110a244e2de0426c69/oscar/utils/cider/pyciderevalcap/cider/cider_scorer.py#L81-L89
from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import six from six.moves import cPickle from collections import defaultdict import numpy as np import math import os def precook(s, n=4, out=False): words = s.split() counts = defaultdict(int) for k in range(1,n+1): for i in range(len(words)-k+1): ngram = tuple(words[i:i+k]) counts[ngram] += 1 return counts def cook_refs(refs, n=4): return [precook(ref, n) for ref in refs] def cook_test(test, n=4): return precook(test, n, True) class CiderScorer(object): def copy(self): new = CiderScorer(n=self.n) new.ctest = copy.copy(self.ctest) new.crefs = copy.copy(self.crefs) return new def __init__(self, df_mode="corpus", test=None, refs=None, n=4, sigma=6.0): self.n = n self.sigma = sigma self.crefs = [] self.ctest = [] self.df_mode = df_mode self.ref_len = None if self.df_mode != "corpus": pkl_file = cPickle.load(open(os.path.join('data', df_mode + '.p'),'rb'), **(dict(encoding='latin1') if six.PY3 else {})) self.ref_len = np.log(float(pkl_file['ref_len'])) self.document_frequency = pkl_file['document_frequency'] self.cook_append(test, refs) def clear(self): self.crefs = [] self.ctest = []
MIT License
pennlinc/aslprep
aslprep/sdcflows/interfaces/fmap.py
get_trt
python
def get_trt(in_meta, in_file=None): trt = in_meta.get('TotalReadoutTime', None) if trt is not None: return trt acc = float(in_meta.get('ParallelReductionFactorInPlane', 1.0)) npe = nb.load(in_file).shape[_get_pe_index(in_meta)] etl = npe // acc ees = in_meta.get('EffectiveEchoSpacing', None) if ees is not None: return ees * (etl - 1) wfs = in_meta.get('WaterFatShift', None) if wfs is not None: fstrength = in_meta['MagneticFieldStrength'] wfd_ppm = 3.4 g_ratio_mhz_t = 42.57 wfs_hz = fstrength * wfd_ppm * g_ratio_mhz_t return wfs / wfs_hz raise ValueError('Unknown total-readout time specification')
r""" Extract the *total readout time* :math:`t_\text{RO}` from BIDS. Calculate the *total readout time* for an input :abbr:`EPI (echo-planar imaging)` scan. There are several procedures to calculate the total readout time. The basic one is that a ``TotalReadoutTime`` field is set in the JSON sidecar. The following examples use an ``'epi.nii.gz'`` file-stub which has 90 pixels in the j-axis encoding direction. >>> meta = {'TotalReadoutTime': 0.02596} >>> get_trt(meta) 0.02596 If the *effective echo spacing* :math:`t_\text{ees}` (``EffectiveEchoSpacing`` BIDS field) is provided, then the total readout time can be calculated reading the number of voxels along the readout direction :math:`T_\text{ro}` and the parallel acceleration factor of the EPI :math:`f_\text{acc}`. .. math :: T_\text{ro} = t_\text{ees} \, (N_\text{PE} / f_\text{acc} - 1) >>> meta = {'EffectiveEchoSpacing': 0.00059, ... 'PhaseEncodingDirection': 'j-', ... 'ParallelReductionFactorInPlane': 2} >>> get_trt(meta, in_file='epi.nii.gz') 0.02596 Some vendors, like Philips, store different parameter names: >>> meta = {'WaterFatShift': 8.129, ... 'MagneticFieldStrength': 3, ... 'PhaseEncodingDirection': 'j-', ... 'ParallelReductionFactorInPlane': 2} >>> get_trt(meta, in_file='epi.nii.gz') 0.018721183563864822
https://github.com/pennlinc/aslprep/blob/cc7198cfc65b881f10d02d3fe455695de80af46c/aslprep/sdcflows/interfaces/fmap.py#L465-L532
import numpy as np import nibabel as nb from nipype import logging from nipype.utils.filemanip import fname_presuffix from nipype.interfaces.base import ( BaseInterfaceInputSpec, TraitedSpec, File, isdefined, traits, SimpleInterface) LOGGER = logging.getLogger('nipype.interface') class _SubtractPhasesInputSpec(BaseInterfaceInputSpec): in_phases = traits.List(File(exists=True), min=1, max=2, desc='input phase maps') in_meta = traits.List(traits.Dict(), min=1, max=2, desc='metadata corresponding to the inputs') class _SubtractPhasesOutputSpec(TraitedSpec): phase_diff = File(exists=True, desc='phase difference map') metadata = traits.Dict(desc='output metadata') class SubtractPhases(SimpleInterface): input_spec = _SubtractPhasesInputSpec output_spec = _SubtractPhasesOutputSpec def _run_interface(self, runtime): if len(self.inputs.in_phases) != len(self.inputs.in_meta): raise ValueError( 'Length of input phase-difference maps and metadata files ' 'should match.') if len(self.inputs.in_phases) == 1: self._results['phase_diff'] = self.inputs.in_phases[0] self._results['metadata'] = self.inputs.in_meta[0] return runtime self._results['phase_diff'], self._results['metadata'] = _subtract_phases(self.inputs.in_phases, self.inputs.in_meta, newpath=runtime.cwd) return runtime class _FieldEnhanceInputSpec(BaseInterfaceInputSpec): in_file = File(exists=True, mandatory=True, desc='input fieldmap') in_mask = File(exists=True, desc='brain mask') in_magnitude = File(exists=True, desc='input magnitude') unwrap = traits.Bool(False, usedefault=True, desc='run phase unwrap') despike = traits.Bool(True, usedefault=True, desc='run despike filter') bspline_smooth = traits.Bool(True, usedefault=True, desc='run 3D bspline smoother') mask_erode = traits.Int(1, usedefault=True, desc='mask erosion iterations') despike_threshold = traits.Float(0.2, usedefault=True, desc='mask erosion iterations') num_threads = traits.Int(1, usedefault=True, nohash=True, desc='number of jobs') class _FieldEnhanceOutputSpec(TraitedSpec): out_file = File(desc='the output fieldmap') out_unwrapped = File(desc='unwrapped fieldmap') class FieldEnhance(SimpleInterface): input_spec = _FieldEnhanceInputSpec output_spec = _FieldEnhanceOutputSpec def _run_interface(self, runtime): from scipy import ndimage as sim fmap_nii = nb.load(self.inputs.in_file) data = np.squeeze(fmap_nii.get_fdata(dtype='float32')) if self.inputs.despike: data = _despike2d(data, self.inputs.despike_threshold) mask = None if isdefined(self.inputs.in_mask): masknii = nb.load(self.inputs.in_mask) mask = np.asanyarray(masknii.dataobj).astype('uint8') if self.inputs.mask_erode > 0: struc = sim.iterate_structure(sim.generate_binary_structure(3, 2), 1) mask = sim.binary_erosion( mask, struc, iterations=self.inputs.mask_erode ).astype(np.uint8) self._results['out_file'] = fname_presuffix( self.inputs.in_file, suffix='_enh', newpath=runtime.cwd) datanii = nb.Nifti1Image(data, fmap_nii.affine, fmap_nii.header) if self.inputs.unwrap: data = _unwrap(data, self.inputs.in_magnitude, mask) self._results['out_unwrapped'] = fname_presuffix( self.inputs.in_file, suffix='_unwrap', newpath=runtime.cwd) nb.Nifti1Image(data, fmap_nii.affine, fmap_nii.header).to_filename( self._results['out_unwrapped']) if not self.inputs.bspline_smooth: datanii.to_filename(self._results['out_file']) return runtime else: from ..utils import bspline as fbsp from statsmodels.robust.scale import mad bspobj = fbsp.BSplineFieldmap(datanii, weights=mask, njobs=self.inputs.num_threads) bspobj.fit() smoothed1 = bspobj.get_smoothed() diffmap = data - smoothed1.get_fdata(dtype='float32') sderror = mad(diffmap[mask > 0]) LOGGER.info('SD of error after B-Spline fitting is %f', sderror) errormask = np.zeros_like(diffmap) errormask[np.abs(diffmap) > (10 * sderror)] = 1 errormask *= mask nslices = 0 try: errorslice = np.squeeze(np.argwhere(errormask.sum(0).sum(0) > 0)) nslices = errorslice[-1] - errorslice[0] except IndexError: pass if nslices > 1: diffmapmsk = mask[..., errorslice[0]:errorslice[-1]] diffmapnii = nb.Nifti1Image( diffmap[..., errorslice[0]:errorslice[-1]] * diffmapmsk, datanii.affine, datanii.header) bspobj2 = fbsp.BSplineFieldmap(diffmapnii, knots_zooms=[24., 24., 4.], njobs=self.inputs.num_threads) bspobj2.fit() smoothed2 = bspobj2.get_smoothed().get_fdata(dtype='float32') final = smoothed1.get_fdata(dtype='float32').copy() final[..., errorslice[0]:errorslice[-1]] += smoothed2 else: final = smoothed1.get_fdata(dtype='float32') nb.Nifti1Image(final, datanii.affine, datanii.header).to_filename( self._results['out_file']) return runtime class _FieldToRadSInputSpec(BaseInterfaceInputSpec): in_file = File(exists=True, mandatory=True, desc='input fieldmap') fmap_range = traits.Float(desc='range of input field map') class _FieldToRadSOutputSpec(TraitedSpec): out_file = File(desc='the output fieldmap') fmap_range = traits.Float(desc='range of input field map') class FieldToRadS(SimpleInterface): input_spec = _FieldToRadSInputSpec output_spec = _FieldToRadSOutputSpec def _run_interface(self, runtime): fmap_range = None if isdefined(self.inputs.fmap_range): fmap_range = self.inputs.fmap_range self._results['out_file'], self._results['fmap_range'] = _torads( self.inputs.in_file, fmap_range, newpath=runtime.cwd) return runtime class _FieldToHzInputSpec(BaseInterfaceInputSpec): in_file = File(exists=True, mandatory=True, desc='input fieldmap') range_hz = traits.Float(mandatory=True, desc='range of input field map') class _FieldToHzOutputSpec(TraitedSpec): out_file = File(desc='the output fieldmap') class FieldToHz(SimpleInterface): input_spec = _FieldToHzInputSpec output_spec = _FieldToHzOutputSpec def _run_interface(self, runtime): self._results['out_file'] = _tohz( self.inputs.in_file, self.inputs.range_hz, newpath=runtime.cwd) return runtime class _Phasediff2FieldmapInputSpec(BaseInterfaceInputSpec): in_file = File(exists=True, mandatory=True, desc='input fieldmap') metadata = traits.Dict(mandatory=True, desc='BIDS metadata dictionary') class _Phasediff2FieldmapOutputSpec(TraitedSpec): out_file = File(desc='the output fieldmap') class Phasediff2Fieldmap(SimpleInterface): input_spec = _Phasediff2FieldmapInputSpec output_spec = _Phasediff2FieldmapOutputSpec def _run_interface(self, runtime): self._results['out_file'] = phdiff2fmap( self.inputs.in_file, _delta_te(self.inputs.metadata), newpath=runtime.cwd) return runtime class _PhaseMap2radsInputSpec(BaseInterfaceInputSpec): in_file = File(exists=True, mandatory=True, desc='input (wrapped) phase map') class _PhaseMap2radsOutputSpec(TraitedSpec): out_file = File(desc='the phase map in the range 0 - 6.28') class PhaseMap2rads(SimpleInterface): input_spec = _PhaseMap2radsInputSpec output_spec = _PhaseMap2radsOutputSpec def _run_interface(self, runtime): self._results['out_file'] = au2rads( self.inputs.in_file, newpath=runtime.cwd) return runtime class _FUGUEvsm2ANTSwarpInputSpec(BaseInterfaceInputSpec): in_file = File(exists=True, mandatory=True, desc='input displacements field map') pe_dir = traits.Enum('i', 'i-', 'j', 'j-', 'k', 'k-', desc='phase-encoding axis') class _FUGUEvsm2ANTSwarpOutputSpec(TraitedSpec): out_file = File(desc='the output warp field') fieldmap = File(desc='field map in mm') class FUGUEvsm2ANTSwarp(SimpleInterface): _dtype = '<f4' input_spec = _FUGUEvsm2ANTSwarpInputSpec output_spec = _FUGUEvsm2ANTSwarpOutputSpec def _run_interface(self, runtime): phaseEncDim = {'i': 0, 'j': 1, 'k': 2}[self.inputs.pe_dir[0]] phaseEncSign = [1.0, -1.0][len(self.inputs.pe_dir) != 2] nii = nb.load(self.inputs.in_file) hdr = nii.header.copy() hdr.set_data_dtype(self._dtype) data = nii.get_fdata(dtype=self._dtype) aff = np.diag([1.0, 1.0, -1.0]) if np.linalg.det(aff) < 0 and phaseEncDim != 0: aff *= -1.0 aff = aff.dot(nii.affine[:3, :3]) data *= phaseEncSign * nii.header.get_zooms()[phaseEncDim] self._results['fieldmap'] = fname_presuffix( self.inputs.in_file, suffix='_units-mm_fieldmap', newpath=runtime.cwd) nb.Nifti1Image(data, nii.affine, hdr).to_filename(self._results['fieldmap']) zeros = np.zeros_like(data, dtype=self._dtype) field = [zeros, zeros] field.insert(phaseEncDim, data) field = np.stack(field, -1) hdr.set_intent('vector', (), '') self._results['out_file'] = fname_presuffix( self.inputs.in_file, suffix='_desc-field_sdcwarp', newpath=runtime.cwd) nb.Nifti1Image(field[:, :, :, np.newaxis, :], nii.affine, hdr).to_filename( self._results['out_file']) return runtime def _despike2d(data, thres, neigh=None): if neigh is None: neigh = [-1, 0, 1] nslices = data.shape[-1] for k in range(nslices): data2d = data[..., k] for i in range(data2d.shape[0]): for j in range(data2d.shape[1]): vals = [] thisval = data2d[i, j] for ii in neigh: for jj in neigh: try: vals.append(data2d[i + ii, j + jj]) except IndexError: pass vals = np.array(vals) patch_range = vals.max() - vals.min() patch_med = np.median(vals) if (patch_range > 1e-6 and (abs(thisval - patch_med) / patch_range) > thres): data[i, j, k] = patch_med return data def _unwrap(fmap_data, mag_file, mask=None): from math import pi from nipype.interfaces.fsl import PRELUDE magnii = nb.load(mag_file) if mask is None: mask = np.ones_like(fmap_data, dtype=np.uint8) fmapmax = max(abs(fmap_data[mask > 0].min()), fmap_data[mask > 0].max()) fmap_data *= pi / fmapmax nb.Nifti1Image(fmap_data, magnii.affine).to_filename('fmap_rad.nii.gz') nb.Nifti1Image(mask, magnii.affine).to_filename('fmap_mask.nii.gz') nb.Nifti1Image(magnii.get_fdata(dtype='float32'), magnii.affine).to_filename('fmap_mag.nii.gz') res = PRELUDE(phase_file='fmap_rad.nii.gz', magnitude_file='fmap_mag.nii.gz', mask_file='fmap_mask.nii.gz').run() unwrapped = nb.load( res.outputs.unwrapped_phase_file).get_fdata(dtype='float32') * (fmapmax / pi) return unwrapped def get_ees(in_meta, in_file=None): import nibabel as nb from sdcflows.interfaces.fmap import _get_pe_index ees = in_meta.get('EffectiveEchoSpacing', None) if ees is not None: return ees acc = float(in_meta.get('ParallelReductionFactorInPlane', 1.0)) npe = nb.load(in_file).shape[_get_pe_index(in_meta)] etl = npe // acc trt = in_meta.get('TotalReadoutTime', None) if trt is not None: return trt / (etl - 1) wfs = in_meta.get('WaterFatShift', None) if wfs is not None: fstrength = in_meta['MagneticFieldStrength'] wfd_ppm = 3.4 g_ratio_mhz_t = 42.57 wfs_hz = fstrength * wfd_ppm * g_ratio_mhz_t return wfs / (wfs_hz * etl) raise ValueError('Unknown effective echo-spacing specification')
BSD 3-Clause New or Revised License
yaoyao-liu/class-incremental-learning
adaptive-aggregation-networks/trainer/base_trainer.py
BaseTrainer.init_fusion_vars
python
def init_fusion_vars(self): self.fusion_vars = nn.ParameterList() if self.args.dataset == 'cifar100': if self.args.branch_mode == 'dual': for idx in range(3): self.fusion_vars.append(nn.Parameter(torch.FloatTensor([0.5]))) elif self.args.branch_mode == 'single': for idx in range(3): self.fusion_vars.append(nn.Parameter(torch.FloatTensor([1.0]))) else: raise ValueError('Please set correct mode.') self.fusion_vars.to(self.device) elif self.args.dataset == 'imagenet_sub' or self.args.dataset == 'imagenet': if self.args.branch_mode == 'dual': for idx in range(4): self.fusion_vars.append(nn.Parameter(torch.FloatTensor([0.5]))) elif self.args.branch_mode == 'single': for idx in range(4): self.fusion_vars.append(nn.Parameter(torch.FloatTensor([1.0]))) else: raise ValueError('Please set correct mode.') self.fusion_vars.to(self.device) else: raise ValueError('Please set correct dataset.')
The function to initialize the aggregation weights.
https://github.com/yaoyao-liu/class-incremental-learning/blob/fe9a7bee0bf46d69f70373934feb87a8f7134d4b/adaptive-aggregation-networks/trainer/base_trainer.py#L182-L215
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.optim import lr_scheduler import torchvision from torchvision import datasets, models, transforms from torch.autograd import Variable from tensorboardX import SummaryWriter import numpy as np import time import os import os.path as osp import sys import copy import argparse from PIL import Image try: import cPickle as pickle except: import pickle import math import utils.misc import models.modified_resnet_cifar as modified_resnet_cifar import models.modified_resnetmtl_cifar as modified_resnetmtl_cifar import models.modified_resnet as modified_resnet import models.modified_resnetmtl as modified_resnetmtl import models.modified_linear as modified_linear from utils.imagenet.utils_dataset import split_images_labels from utils.imagenet.utils_dataset import merge_images_labels from utils.incremental.compute_features import compute_features from utils.incremental.compute_accuracy import compute_accuracy from utils.misc import process_mnemonics import warnings warnings.filterwarnings('ignore') class BaseTrainer(object): def __init__(self, the_args): self.args = the_args self.set_save_path() self.set_cuda_device() self.set_dataset_variables() def set_save_path(self): self.log_dir = './logs/' if not osp.exists(self.log_dir): os.mkdir(self.log_dir) self.save_path = self.log_dir + self.args.dataset + '_nfg' + str(self.args.nb_cl_fg) + '_ncls' + str(self.args.nb_cl) + '_nproto' + str(self.args.nb_protos) + '_' + self.args.baseline + '_' + self.args.branch_mode + '_b1' + self.args.branch_1 if self.args.branch_mode == 'dual': self.save_path += '_b2' + self.args.branch_2 if self.args.dynamic_budget: self.save_path += '_dynamic' else: self.save_path += '_fixed' self.save_path += '_' + str(self.args.ckpt_label) if not osp.exists(self.save_path): os.mkdir(self.save_path) def set_cuda_device(self): self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") def set_dataset_variables(self): if self.args.dataset == 'cifar100': self.transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)),]) self.transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)),]) self.trainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=self.transform_train) self.testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=self.transform_test) self.evalset = torchvision.datasets.CIFAR100(root='./data', train=False, download=False, transform=self.transform_test) self.balancedset = torchvision.datasets.CIFAR100(root='./data', train=False, download=False, transform=self.transform_train) self.network = modified_resnet_cifar.resnet32 self.network_mtl = modified_resnetmtl_cifar.resnetmtl32 self.lr_strat = [int(self.args.epochs*0.5), int(self.args.epochs*0.75)] self.dictionary_size = 500 elif self.args.dataset == 'imagenet_sub' or self.args.dataset == 'imagenet': traindir = os.path.join(self.args.data_dir, 'train') valdir = os.path.join(self.args.data_dir, 'val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) self.trainset = datasets.ImageFolder(traindir, transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize,])) self.testset = datasets.ImageFolder(valdir, transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize,])) self.evalset = datasets.ImageFolder(valdir, transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize,])) self.balancedset = datasets.ImageFolder(traindir, transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize,])) if self.args.imgnet_backbone == 'resnet18': self.network = modified_resnet.resnet18 self.network_mtl = modified_resnetmtl.resnetmtl18 elif self.args.imgnet_backbone == 'resnet34': self.network = modified_resnet.resnet34 self.network_mtl = modified_resnetmtl.resnetmtl34 else: raise ValueError('Please set the correct backbone.') self.lr_strat = [int(self.args.epochs*0.333), int(self.args.epochs*0.667)] self.dictionary_size = 1500 else: raise ValueError('Please set the correct dataset.') def map_labels(self, order_list, Y_set): map_Y = [] for idx in Y_set: map_Y.append(order_list.index(idx)) map_Y = np.array(map_Y) return map_Y def set_dataset(self): if self.args.dataset == 'cifar100': X_train_total = np.array(self.trainset.data) Y_train_total = np.array(self.trainset.targets) X_valid_total = np.array(self.testset.data) Y_valid_total = np.array(self.testset.targets) elif self.args.dataset == 'imagenet_sub' or self.args.dataset == 'imagenet': X_train_total, Y_train_total = split_images_labels(self.trainset.imgs) X_valid_total, Y_valid_total = split_images_labels(self.testset.imgs) else: raise ValueError('Please set the correct dataset.') return X_train_total, Y_train_total, X_valid_total, Y_valid_total
MIT License
openstack/swift
swift/common/middleware/symlink.py
SymlinkContainerContext.handle_container
python
def handle_container(self, req, start_response): app_resp = self._app_call(req.environ) if req.method == 'GET' and is_success(self._get_status_int()): app_resp = self._process_json_resp(app_resp, req) start_response(self._response_status, self._response_headers, self._response_exc_info) return app_resp
Handle container requests. :param req: a :class:`~swift.common.swob.Request` :param start_response: start_response function :return: Response Iterator after start_response called.
https://github.com/openstack/swift/blob/dbd0960aeebedc0487699d3ca2a4d6f21e7ed524/swift/common/middleware/symlink.py#L338-L355
import json import os from cgi import parse_header from swift.common.utils import get_logger, register_swift_info, split_path, MD5_OF_EMPTY_STRING, close_if_possible, closing_if_possible, config_true_value, drain_and_close from swift.common.constraints import check_account_format from swift.common.wsgi import WSGIContext, make_subrequest, make_pre_authed_request from swift.common.request_helpers import get_sys_meta_prefix, check_path_header, get_container_update_override_key, update_ignore_range_header from swift.common.swob import Request, HTTPBadRequest, HTTPTemporaryRedirect, HTTPException, HTTPConflict, HTTPPreconditionFailed, wsgi_quote, wsgi_unquote, status_map, normalize_etag from swift.common.http import is_success, HTTP_NOT_FOUND from swift.common.exceptions import LinkIterError from swift.common.header_key_dict import HeaderKeyDict DEFAULT_SYMLOOP_MAX = 2 TGT_OBJ_SYMLINK_HDR = 'x-symlink-target' TGT_ACCT_SYMLINK_HDR = 'x-symlink-target-account' TGT_ETAG_SYMLINK_HDR = 'x-symlink-target-etag' TGT_BYTES_SYMLINK_HDR = 'x-symlink-target-bytes' TGT_OBJ_SYSMETA_SYMLINK_HDR = get_sys_meta_prefix('object') + 'symlink-target' TGT_ACCT_SYSMETA_SYMLINK_HDR = get_sys_meta_prefix('object') + 'symlink-target-account' TGT_ETAG_SYSMETA_SYMLINK_HDR = get_sys_meta_prefix('object') + 'symlink-target-etag' TGT_BYTES_SYSMETA_SYMLINK_HDR = get_sys_meta_prefix('object') + 'symlink-target-bytes' SYMLOOP_EXTEND = get_sys_meta_prefix('object') + 'symloop-extend' ALLOW_RESERVED_NAMES = get_sys_meta_prefix('object') + 'allow-reserved-names' def _validate_and_prep_request_headers(req): error_body = 'X-Symlink-Target header must be of the form ' '<container name>/<object name>' if wsgi_unquote(req.headers[TGT_OBJ_SYMLINK_HDR]).startswith('/'): raise HTTPPreconditionFailed( body=error_body, request=req, content_type='text/plain') container, obj = check_path_header( req, TGT_OBJ_SYMLINK_HDR, 2, error_body) req.headers[TGT_OBJ_SYMLINK_HDR] = wsgi_quote('%s/%s' % (container, obj)) account = check_account_format( req, wsgi_unquote(req.headers[TGT_ACCT_SYMLINK_HDR])) if TGT_ACCT_SYMLINK_HDR in req.headers else None _junk, req_acc, req_cont, req_obj = req.split_path(4, 4, True) if account: req.headers[TGT_ACCT_SYMLINK_HDR] = wsgi_quote(account) else: account = req_acc if (account, container, obj) == (req_acc, req_cont, req_obj): raise HTTPBadRequest( body='Symlink cannot target itself', request=req, content_type='text/plain') etag = normalize_etag(req.headers.get(TGT_ETAG_SYMLINK_HDR, None)) if etag and any(c in etag for c in ';"\\'): raise HTTPBadRequest( body='Bad %s format' % TGT_ETAG_SYMLINK_HDR.title(), request=req, content_type='text/plain') if not (etag or req.headers.get('Content-Type')): req.headers['Content-Type'] = 'application/symlink' return '/v1/%s/%s/%s' % (account, container, obj), etag def symlink_usermeta_to_sysmeta(headers): for user_hdr, sysmeta_hdr in ( (TGT_OBJ_SYMLINK_HDR, TGT_OBJ_SYSMETA_SYMLINK_HDR), (TGT_ACCT_SYMLINK_HDR, TGT_ACCT_SYSMETA_SYMLINK_HDR)): if user_hdr in headers: headers[sysmeta_hdr] = headers.pop(user_hdr) def symlink_sysmeta_to_usermeta(headers): for user_hdr, sysmeta_hdr in ( (TGT_OBJ_SYMLINK_HDR, TGT_OBJ_SYSMETA_SYMLINK_HDR), (TGT_ACCT_SYMLINK_HDR, TGT_ACCT_SYSMETA_SYMLINK_HDR), (TGT_ETAG_SYMLINK_HDR, TGT_ETAG_SYSMETA_SYMLINK_HDR), (TGT_BYTES_SYMLINK_HDR, TGT_BYTES_SYSMETA_SYMLINK_HDR)): if sysmeta_hdr in headers: headers[user_hdr] = headers.pop(sysmeta_hdr) class SymlinkContainerContext(WSGIContext): def __init__(self, wsgi_app, logger): super(SymlinkContainerContext, self).__init__(wsgi_app) self.logger = logger
Apache License 2.0
craylabs/smartsim
smartsim/entity/ensemble.py
Ensemble._read_model_parameters
python
def _read_model_parameters(self): if not isinstance(self.params, dict): raise TypeError( "Ensemble initialization argument 'params' must be of type dict" ) param_names = [] parameters = [] for name, val in self.params.items(): param_names.append(name) if isinstance(val, list): parameters.append(val) elif isinstance(val, (int, str)): parameters.append([val]) else: raise TypeError( "Incorrect type for ensemble parameters\n" + "Must be list, int, or string." ) return param_names, parameters
Take in the parameters given to the ensemble and prepare to create models for the ensemble :raises TypeError: if params are of the wrong type :return: param names and values for permutation strategy :rtype: tuple[list, list]
https://github.com/craylabs/smartsim/blob/0c4b198650a026d7bd960f38b1866fb3b8c59a96/smartsim/entity/ensemble.py#L255-L281
from copy import deepcopy from os import getcwd from smartsim.error.errors import SmartSimError from ..error import EntityExistsError, SSUnsupportedError, UserStrategyError from ..settings.settings import BatchSettings, RunSettings from ..utils import get_logger from ..utils.helpers import init_default from .entityList import EntityList from .model import Model from .strategies import create_all_permutations, random_permutations, step_values logger = get_logger(__name__) class Ensemble(EntityList): def __init__( self, name, params, batch_settings=None, run_settings=None, perm_strat="all_perm", **kwargs, ): self.params = init_default({}, params, dict) self._key_prefixing_enabled = True self.batch_settings = init_default({}, batch_settings, BatchSettings) self.run_settings = init_default({}, run_settings, RunSettings) super().__init__(name, getcwd(), perm_strat=perm_strat, **kwargs) @property def models(self): return self.entities def _initialize_entities(self, **kwargs): strategy = self._set_strategy(kwargs.pop("perm_strat")) replicas = kwargs.pop("replicas", None) if self.params: if self.run_settings: names, params = self._read_model_parameters() all_model_params = strategy(names, params, **kwargs) if not isinstance(all_model_params, list): raise UserStrategyError(strategy) for i, param_set in enumerate(all_model_params): if not isinstance(param_set, dict): raise UserStrategyError(strategy) model_name = "_".join((self.name, str(i))) model = Model( model_name, param_set, self.path, run_settings=deepcopy(self.run_settings), ) model.enable_key_prefixing() self.add_model(model) else: raise SmartSimError( "Ensembles supplied with 'params' argument must be provided run settings" ) else: if self.run_settings: if replicas: for i in range(replicas): model_name = "_".join((self.name, str(i))) model = Model( model_name, {}, self.path, run_settings=deepcopy(self.run_settings), ) model.enable_key_prefixing() logger.debug( f"Created ensemble member: {model_name} in {self.name}" ) self.add_model(model) else: raise SmartSimError( "Ensembles without 'params' or 'replicas' argument to expand into members cannot be given run settings" ) elif not self.batch_settings: raise SmartSimError( "Ensemble must be provided batch settings or run settings" ) else: logger.info("Empty ensemble created for batch launch") def add_model(self, model): if not isinstance(model, Model): raise TypeError( f"Argument to add_model was of type {type(model)}, not Model" ) if model in self.entities: raise EntityExistsError( f"Model {model.name} already exists in ensemble {self.name}" ) self.entities.append(model) def register_incoming_entity(self, incoming_entity): for model in self.entities: model.register_incoming_entity(incoming_entity) def enable_key_prefixing(self): for model in self.entities: model.enable_key_prefixing() def query_key_prefixing(self): return all([model.query_key_prefixing() for model in self.entities]) def attach_generator_files(self, to_copy=None, to_symlink=None, to_configure=None): for model in self.entities: model.attach_generator_files( to_copy=to_copy, to_symlink=to_symlink, to_configure=to_configure ) def _set_strategy(self, strategy): if strategy == "all_perm": return create_all_permutations if strategy == "step": return step_values if strategy == "random": return random_permutations if callable(strategy): return strategy raise SSUnsupportedError( f"Permutation strategy given is not supported: {strategy}" )
BSD 2-Clause Simplified License
thenewboston-developers/validator
v1/tasks/bank_confirmation_services.py
create_confirmation_service
python
def create_confirmation_service(*, bank, confirmation_service_amount): current_confirmation_expiration = bank.confirmation_expiration now = timezone.now() if not current_confirmation_expiration: start = now else: start = max([current_confirmation_expiration, now]) self_configuration = get_self_configuration(exception_class=RuntimeError) daily_confirmation_rate = self_configuration.daily_confirmation_rate confirmation_service_amount = int(confirmation_service_amount) days_purchased = confirmation_service_amount / daily_confirmation_rate seconds_purchased = days_purchased * 86400 seconds_purchased = int(seconds_purchased) end = start + relativedelta(seconds=seconds_purchased) BankConfirmationService.objects.create(bank=bank, end=end, start=start) bank.confirmation_expiration = end bank.save() send_signed_post_request.delay( data={ 'end': str(end), 'start': str(start) }, ip_address=bank.ip_address, port=bank.port, protocol=bank.protocol, url_path='/validator_confirmation_services' ) return seconds_purchased
Create confirmation service for bank
https://github.com/thenewboston-developers/validator/blob/7994f5ca6cef02e4e548a857e62162ac69ce5332/v1/tasks/bank_confirmation_services.py#L11-L45
from celery import shared_task from dateutil.relativedelta import relativedelta from django.utils import timezone from v1.bank_confirmation_services.models.bank_confirmation_service import BankConfirmationService from v1.banks.models.bank import Bank from v1.self_configurations.helpers.self_configuration import get_self_configuration from .signed_requests import send_signed_post_request
MIT License
andriyko/sublime-robot-framework-assistant
command_helper/utils/get_text.py
get_object_from_line
python
def get_object_from_line(line, prefix, column): re_str = r'(?:\s)([^\s]+)(?:\.{0})$'.format(prefix) match = re.search(re_str, line[:column]) if match: return match.group(1) else: return None
Returns the object name after the prefix ``line`` -- Text in the line where cursor is. ``prefix`` -- Prefix determined by sublime. ``column`` -- Index of the cursor in the line.
https://github.com/andriyko/sublime-robot-framework-assistant/blob/fcd3323b5631d4abe61e8e8c728d0f4329640a64/command_helper/utils/get_text.py#L23-L35
import re def get_line(view): sel = view.sel()[0] line = view.substr(view.line(sel)) row, column = view.rowcol(sel.begin()) return line, column def get_prefix(line, column): m = re.search('\S*$', line[:column]) rside = line[column:] match = m.group(0) return {'match': match, 'rside': rside}
MIT License
jwcook/naturtag
naturtag/widgets/autocomplete.py
DropdownContainer.dismiss
python
def dismiss(self, *args): if self.view.data: self._data = self.view.data self.view.data = [] self.is_open = False
Close dropdown, and temporarily store data and remove from RecycleView to resize it
https://github.com/jwcook/naturtag/blob/34701c5be880baf5a1c4b1a6e43e28168de8c437/naturtag/widgets/autocomplete.py#L207-L212
import asyncio from collections.abc import Mapping from logging import getLogger from kivy.clock import Clock from kivy.core.window import Window from kivy.metrics import dp from kivy.properties import ( BooleanProperty, DictProperty, NumericProperty, ObjectProperty, StringProperty, ) from kivy.uix.behaviors import FocusBehavior from kivy.uix.recycleboxlayout import RecycleBoxLayout from kivy.uix.recycleview.layout import LayoutSelectionBehavior from kivy.uix.recycleview.views import RecycleDataViewBehavior from kivymd.uix.boxlayout import MDBoxLayout from kivymd.uix.card import MDCard from kivymd.uix.label import MDLabel from kivymd.uix.textfield import MDTextField from naturtag.app.screens import load_kv from naturtag.constants import AUTOCOMPLETE_DELAY, AUTOCOMPLETE_MIN_CHARS from naturtag.widgets import TextFieldWrapper PADDING = dp(50) ROW_SIZE = dp(22) logger = getLogger().getChild(__name__) load_kv('autocomplete') class AutocompleteSearch(MDBoxLayout, TextFieldWrapper): def __init__(self, text_input_kwargs=None, **kwargs): super().__init__(**kwargs) self.register_event_type('on_selection') self.trigger = Clock.create_trigger(self.callback, AUTOCOMPLETE_DELAY) Clock.schedule_once(lambda *x: self.post_init(text_input_kwargs or {})) def post_init(self, text_input_kwargs): self.text_input = self.ids.text_input self.text_input.bind( text=lambda *x: self.trigger(), focus=self.on_text_focus, ) self.ids.clear_input_button.bind(on_release=self.reset) if text_input_kwargs: logger.debug(f'Overriding text input settings: {text_input_kwargs}') for k, v in text_input_kwargs.items(): setattr(self.text_input, k, v) self.dropdown_container = self.ids.dropdown_container self.dropdown_view = self.ids.dropdown_view self.ids.dropdown_layout.bind(on_selection=lambda *x: self.update_selection(*x)) def on_text_focus(self, instance, *args): if instance.focus: logger.debug('Opening dropdown on text focus') self.dropdown_container.open() def callback(self, *args): logger.debug('Populating autocomplete results') search_str = self.text_input.text if len(search_str) < AUTOCOMPLETE_MIN_CHARS: return def get_row(item): if isinstance(item, Mapping): return item return {'text': item, 'suggestion_text': item, 'metadata': {}} matches = asyncio.run(self.get_autocomplete(search_str)) logger.info(f'Found {len(matches)} matches for search string "{search_str}"') self.dropdown_view.data = [get_row(i) for i in matches] self.dropdown_container.open() def update_selection(self, instance, suggestion_text, metadata): self.text_input.suggestion_text = ' ' + suggestion_text self.dispatch('on_selection', metadata) Clock.schedule_once(self.dropdown_container.dismiss, 0.2) def on_selection(self, metadata): async def get_autocomplete(self, search_str): return [{'text': f'Text: {search_str}'}] + [{'text': f'Text: {i}'} for i in range(9)] def reset(self, *args): self.text_input.text = '' self.text_input.suggestion_text = '' self.dropdown_view.data = [] class DropdownContainer(MDCard): caller = ObjectProperty() layout = ObjectProperty() view = ObjectProperty() max_height = NumericProperty(500) def __init__(self, **kwargs): super().__init__(**kwargs) Window.bind( on_resize=self.on_window_resize, on_restore=self.on_window_resize, on_maximize=self.on_window_resize, ) self._resize_complete = False self.start_coords = [0, 0] self._data = [] self.is_open = False def on_window_resize(self, *args): self._resize_complete = False if self.is_open: self.resize_layout() def resize_layout(self): self.start_coords = self.caller.to_window(*self.caller.pos) self.layout.size_hint_min_y = ROW_SIZE * len(self.view.data) if self.view.data: self._resize_complete = True def open(self): logger.debug(f'Opening dropdown at {self.layout.center_x}, {self.layout.center_y}') if not self._resize_complete: self.resize_layout() if self._data and not self.view.data: self.view.data = self._data self.is_open = True def on_touch_down(self, touch): if not self.is_open or self.view.collide_point(*touch.pos): super().on_touch_down(touch) else: self.dismiss()
MIT License
sage-org/sage-engine
sage/database/core/graph.py
Graph.delete
python
def delete(self, subject: str, predicate: str, obj: str): self._connector.delete(subject, predicate, obj)
Delete a RDF triple from the RDF graph. Args: * subject: Subject of the RDF triple. * predicate: Predicate of the RDF triple. * obj: Object of the RDF triple.
https://github.com/sage-org/sage-engine/blob/33b3c775f6932d0e61bcce2c763f2d63846dba40/sage/database/core/graph.py#L97-L105
from datetime import datetime from math import inf from typing import List, Optional, Tuple from sage.database.db_connector import DatabaseConnector from sage.database.db_iterator import DBIterator class Graph(object): def __init__(self, uri: str, name: str, description: str, connector: DatabaseConnector, quantum=75, max_results=inf, default_queries: List[dict] = list()): super(Graph, self).__init__() self._uri = uri self._name = name self._description = description self._connector = connector self._quantum = quantum self._max_results = max_results self._example_queries = default_queries @property def uri(self) -> str: return self._uri @property def name(self) -> str: return self._name @property def description(self) -> str: return self._description @property def quota(self) -> float: return self._quantum @property def max_results(self) -> float: return self._max_results @property def nb_triples(self) -> int: return self._connector.nb_triples @property def example_queries(self) -> List[dict]: return self._example_queries def connector(self) -> DatabaseConnector: return self._connector def search(self, subject: str, predicate: str, obj: str, last_read: Optional[str] = None, as_of: Optional[datetime] = None) -> Tuple[DBIterator, int]: return self._connector.search(subject, predicate, obj, last_read=last_read, as_of=as_of) def insert(self, subject: str, predicate: str, obj: str): self._connector.insert(subject, predicate, obj)
MIT License
pysat/pysat
pysat/tests/test_methods_general.py
TestGenMethods.setup
python
def setup(self): fname = 'fake_data_{year:04d}{month:02d}{day:02d}_v05.cdf' self.kwargs = {'tag': '', 'inst_id': '', 'data_path': '/fake/path/', 'format_str': None, 'supported_tags': {'': {'': fname}}}
Runs before every method to create a clean testing setup.
https://github.com/pysat/pysat/blob/4d12a09ea585b88d54560413e03cae9289113718/pysat/tests/test_methods_general.py#L12-L17
import datetime as dt from os import path import pandas as pds import pytest import pysat from pysat.instruments.methods import general as gen class TestGenMethods():
BSD 3-Clause New or Revised License
numba/numba
numba/core/postproc.py
PostProcessor._compute_generator_info
python
def _compute_generator_info(self): self._insert_var_dels() self._populate_generator_info() gi = self.func_ir.generator_info for yp in gi.get_yield_points(): live_vars = set(self.func_ir.get_block_entry_vars(yp.block)) weak_live_vars = set() stmts = iter(yp.block.body) for stmt in stmts: if isinstance(stmt, ir.Assign): if stmt.value is yp.inst: break live_vars.add(stmt.target.name) elif isinstance(stmt, ir.Del): live_vars.remove(stmt.value) else: assert 0, "couldn't find yield point" for stmt in stmts: if isinstance(stmt, ir.Del): name = stmt.value if name in live_vars: live_vars.remove(name) weak_live_vars.add(name) else: break yp.live_vars = live_vars yp.weak_live_vars = weak_live_vars st = set() for yp in gi.get_yield_points(): st |= yp.live_vars st |= yp.weak_live_vars gi.state_vars = sorted(st) self.remove_dels()
Compute the generator's state variables as the union of live variables at all yield points.
https://github.com/numba/numba/blob/8d4559a83b7b12da9121c030b8e3780874204a34/numba/core/postproc.py#L115-L155
from numba.core import utils, ir, analysis, transforms, ir_utils class YieldPoint(object): def __init__(self, block, inst): assert isinstance(block, ir.Block) assert isinstance(inst, ir.Yield) self.block = block self.inst = inst self.live_vars = None self.weak_live_vars = None class GeneratorInfo(object): def __init__(self): self.yield_points = {} self.state_vars = [] def get_yield_points(self): return self.yield_points.values() class VariableLifetime(object): def __init__(self, blocks): self._blocks = blocks @utils.cached_property def cfg(self): return analysis.compute_cfg_from_blocks(self._blocks) @utils.cached_property def usedefs(self): return analysis.compute_use_defs(self._blocks) @utils.cached_property def livemap(self): return analysis.compute_live_map(self.cfg, self._blocks, self.usedefs.usemap, self.usedefs.defmap) @utils.cached_property def deadmaps(self): return analysis.compute_dead_maps(self.cfg, self._blocks, self.livemap, self.usedefs.defmap) ir_extension_insert_dels = {} class PostProcessor(object): def __init__(self, func_ir): self.func_ir = func_ir def run(self, emit_dels=False): self.func_ir.blocks = transforms.canonicalize_cfg(self.func_ir.blocks) vlt = VariableLifetime(self.func_ir.blocks) self.func_ir.variable_lifetime = vlt bev = analysis.compute_live_variables(vlt.cfg, self.func_ir.blocks, vlt.usedefs.defmap, vlt.deadmaps.combined) for offset, ir_block in self.func_ir.blocks.items(): self.func_ir.block_entry_vars[ir_block] = bev[offset] if self.func_ir.is_generator: self.func_ir.generator_info = GeneratorInfo() self._compute_generator_info() else: self.func_ir.generator_info = None if emit_dels: self._insert_var_dels() def _populate_generator_info(self): dct = self.func_ir.generator_info.yield_points assert not dct, 'rerunning _populate_generator_info' for block in self.func_ir.blocks.values(): for inst in block.body: if isinstance(inst, ir.Assign): yieldinst = inst.value if isinstance(yieldinst, ir.Yield): index = len(dct) + 1 yieldinst.index = index yp = YieldPoint(block, yieldinst) dct[yieldinst.index] = yp
BSD 2-Clause Simplified License
ros-industrial/robodk_postprocessors
Toshiba.py
RobotPost.setSpeed
python
def setSpeed(self, speed_mms): speed_percent = max(100*speed_mms/5000,100) self.addline('SPEED=%.0f' % (speed_percent))
Changes the robot speed (in mm/s)
https://github.com/ros-industrial/robodk_postprocessors/blob/d7e6c1c07758d67d2906cfd638049bdff88cca72/Toshiba.py#L166-L169
from robodk import * def pose_2_str(pose): [x,y,z,w,p,r] = Pose_2_Adept(pose) c = angle3(pose.VX(),[1,0,0])*180/pi return ('TRANS(%.3f,%.3f,%.3f,%.3f)' % (x,y,z,c)) def target_2_str(pose,joints): [x,y,z,w,p,r] = Pose_2_Adept(pose) c = angle3(pose.VX(),[1,0,0])*180/pi t = joints[3] if joints[1] >= 0: config = 1 else: config = 2 return ('POINT(%.3f,%.3f,%.3f,%.3f, %.1f, %i)' % (x,y,z,c,t,config)) def angles_2_str(angles): return '{%s}' % (','.join(format(ji, ".5f") for ji in angles)) class RobotPost(object): PROG_EXT = 'txt' ROBOT_POST = '' ROBOT_NAME = '' PROG_FILES = [] PROG = '' LOG = '' nAxes = 6 TAB = '' REF_FRAME = None def __init__(self, robotpost=None, robotname=None, robot_axes = 6, **kwargs): self.ROBOT_POST = robotpost self.ROBOT_NAME = robotname self.PROG = '' self.LOG = '' self.nAxes = robot_axes def ProgStart(self, progname): self.addline('PROGRAM %s' % progname) self.TAB = ' ' def ProgFinish(self, progname): self.TAB = '' self.addline('END') def ProgSave(self, folder, progname, ask_user = False, show_result = False): progname = progname + '.' + self.PROG_EXT if ask_user or not DirExists(folder): filesave = getSaveFile(folder, progname, 'Save program as...') if filesave is not None: filesave = filesave.name else: return else: filesave = folder + '/' + progname fid = open(filesave, "w") fid.write(self.PROG) fid.close() print('SAVED: %s\n' % filesave) self.PROG_FILES = filesave if show_result: if type(show_result) is str: import subprocess p = subprocess.Popen([show_result, filesave]) elif type(show_result) is list: import subprocess p = subprocess.Popen(show_result + [filesave]) else: import os os.startfile(filesave) if len(self.LOG) > 0: mbox('Program generation LOG:\n\n' + self.LOG) def ProgSendRobot(self, robot_ip, remote_path, ftp_user, ftp_pass): UploadFTP(self.PROG_FILES, robot_ip, remote_path, ftp_user, ftp_pass) def MoveJ(self, pose, joints, conf_RLF=None): self.addline('MOVE ' + target_2_str(pose, joints)) def MoveL(self, pose, joints, conf_RLF=None): self.addline('MOVES ' + target_2_str(pose, joints)) def MoveC(self, pose1, joints1, pose2, joints2, conf_RLF_1=None, conf_RLF_2=None): self.addline('MOVEC %s %s' % (target_2_str(pose1, joints1), target_2_str(pose2, joints2))) def setFrame(self, pose, frame_id=None, frame_name=None): self.addline('BASE = %s' % pose_2_str(pose)) def setTool(self, pose, tool_id=None, tool_name=None): self.addline('TOOL = %s' % pose_2_str(pose)) def Pause(self, time_ms): if time_ms <= 0: self.addline('PAUSE') else: self.addline('DELAY %.3f' % (time_ms*0.001))
Apache License 2.0
nastools/homeassistant
homeassistant/components/media_player/demo.py
DemoYoutubePlayer.play_media
python
def play_media(self, media_type, media_id, **kwargs): self.youtube_id = media_id self.update_ha_state()
Play a piece of media.
https://github.com/nastools/homeassistant/blob/7ca1180bd42713f2d77bbc3f0b27b231ba8784aa/homeassistant/components/media_player/demo.py#L155-L158
from homeassistant.components.media_player import ( MEDIA_TYPE_MUSIC, MEDIA_TYPE_TVSHOW, MEDIA_TYPE_VIDEO, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_SELECT_SOURCE, SUPPORT_CLEAR_PLAYLIST, MediaPlayerDevice) from homeassistant.const import STATE_OFF, STATE_PAUSED, STATE_PLAYING def setup_platform(hass, config, add_devices, discovery_info=None): add_devices([ DemoYoutubePlayer( 'Living Room', 'eyU3bRy2x44', '♥♥ The Best Fireplace Video (3 hours)'), DemoYoutubePlayer('Bedroom', 'kxopViU98Xo', 'Epic sax guy 10 hours'), DemoMusicPlayer(), DemoTVShowPlayer(), ]) YOUTUBE_COVER_URL_FORMAT = 'https://img.youtube.com/vi/{}/hqdefault.jpg' YOUTUBE_PLAYER_SUPPORT = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PLAY_MEDIA MUSIC_PLAYER_SUPPORT = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_CLEAR_PLAYLIST NETFLIX_PLAYER_SUPPORT = SUPPORT_PAUSE | SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE class AbstractDemoPlayer(MediaPlayerDevice): def __init__(self, name): self._name = name self._player_state = STATE_PLAYING self._volume_level = 1.0 self._volume_muted = False @property def should_poll(self): return False @property def name(self): return self._name @property def state(self): return self._player_state @property def volume_level(self): return self._volume_level @property def is_volume_muted(self): return self._volume_muted def turn_on(self): self._player_state = STATE_PLAYING self.update_ha_state() def turn_off(self): self._player_state = STATE_OFF self.update_ha_state() def mute_volume(self, mute): self._volume_muted = mute self.update_ha_state() def set_volume_level(self, volume): self._volume_level = volume self.update_ha_state() def media_play(self): self._player_state = STATE_PLAYING self.update_ha_state() def media_pause(self): self._player_state = STATE_PAUSED self.update_ha_state() class DemoYoutubePlayer(AbstractDemoPlayer): def __init__(self, name, youtube_id=None, media_title=None): super().__init__(name) self.youtube_id = youtube_id self._media_title = media_title @property def media_content_id(self): return self.youtube_id @property def media_content_type(self): return MEDIA_TYPE_VIDEO @property def media_duration(self): return 360 @property def media_image_url(self): return YOUTUBE_COVER_URL_FORMAT.format(self.youtube_id) @property def media_title(self): return self._media_title @property def app_name(self): return "YouTube" @property def supported_media_commands(self): return YOUTUBE_PLAYER_SUPPORT
MIT License
unity-technologies/datasetinsights
datasetinsights/stats/visualization/app.py
_init_app
python
def _init_app(): this_dir = os.path.dirname(os.path.abspath(__file__)) css_file = os.path.join(this_dir, "stylesheet.css") app = dash.Dash( __name__, external_stylesheets=[css_file], suppress_callback_exceptions=True, ) return app
Intializes the dash app.
https://github.com/unity-technologies/datasetinsights/blob/0c6e2407f3b6ceb7a38cb82e3bbcf41a6c2d4672/datasetinsights/stats/visualization/app.py#L6-L16
import os import dash
Apache License 2.0
missionpinball/mpf
mpf/platforms/fast/fast_switch.py
FASTSwitch.get_board_name
python
def get_board_name(self): if self.platform.machine_type == 'wpc': return "FAST WPC" switch_index = 0 number = Util.hex_string_to_int(self.number) for board_obj in self.platform.io_boards.values(): if switch_index <= number < switch_index + board_obj.switch_count: return "FAST Board {}".format(str(board_obj.node_id)) switch_index += board_obj.switch_count return "FAST Unknown Board"
Return the board of this switch.
https://github.com/missionpinball/mpf/blob/1eda6ba6892b8f7cc6dedf6cb6472ff92293b8ef/mpf/platforms/fast/fast_switch.py#L29-L42
import logging from mpf.core.platform import SwitchConfig from mpf.core.utility_functions import Util from mpf.platforms.interfaces.switch_platform_interface import SwitchPlatformInterface MYPY = False if MYPY: from mpf.platforms.fast.fast import FastHardwarePlatform class FASTSwitch(SwitchPlatformInterface): __slots__ = ["log", "connection", "send", "platform_settings", "_configured_debounce"] def __init__(self, config: SwitchConfig, number_tuple, platform: "FastHardwarePlatform", platform_settings) -> None: super().__init__(config, number_tuple, platform) self.log = logging.getLogger('FASTSwitch') self.connection = number_tuple[1] self.send = platform.net_connection.send self.platform_settings = platform_settings self._configured_debounce = False self.configure_debounce(config.debounce in ("normal", "auto"))
MIT License
alliefitter/boto3_type_annotations
boto3_type_annotations_with_docs/boto3_type_annotations/config/client.py
Client.describe_config_rule_evaluation_status
python
def describe_config_rule_evaluation_status(self, ConfigRuleNames: List = None, NextToken: str = None, Limit: int = None) -> Dict: pass
Returns status information for each of your AWS managed Config rules. The status includes information such as the last time AWS Config invoked the rule, the last time AWS Config failed to invoke the rule, and the related error for the last failure. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeConfigRuleEvaluationStatus>`_ **Request Syntax** :: response = client.describe_config_rule_evaluation_status( ConfigRuleNames=[ 'string', ], NextToken='string', Limit=123 ) **Response Syntax** :: { 'ConfigRulesEvaluationStatus': [ { 'ConfigRuleName': 'string', 'ConfigRuleArn': 'string', 'ConfigRuleId': 'string', 'LastSuccessfulInvocationTime': datetime(2015, 1, 1), 'LastFailedInvocationTime': datetime(2015, 1, 1), 'LastSuccessfulEvaluationTime': datetime(2015, 1, 1), 'LastFailedEvaluationTime': datetime(2015, 1, 1), 'FirstActivatedTime': datetime(2015, 1, 1), 'LastErrorCode': 'string', 'LastErrorMessage': 'string', 'FirstEvaluationStarted': True|False }, ], 'NextToken': 'string' } **Response Structure** - *(dict) --* - **ConfigRulesEvaluationStatus** *(list) --* Status information about your AWS managed Config rules. - *(dict) --* Status information for your AWS managed Config rules. The status includes information such as the last time the rule ran, the last time it failed, and the related error for the last failure. This action does not return status information about custom AWS Config rules. - **ConfigRuleName** *(string) --* The name of the AWS Config rule. - **ConfigRuleArn** *(string) --* The Amazon Resource Name (ARN) of the AWS Config rule. - **ConfigRuleId** *(string) --* The ID of the AWS Config rule. - **LastSuccessfulInvocationTime** *(datetime) --* The time that AWS Config last successfully invoked the AWS Config rule to evaluate your AWS resources. - **LastFailedInvocationTime** *(datetime) --* The time that AWS Config last failed to invoke the AWS Config rule to evaluate your AWS resources. - **LastSuccessfulEvaluationTime** *(datetime) --* The time that AWS Config last successfully evaluated your AWS resources against the rule. - **LastFailedEvaluationTime** *(datetime) --* The time that AWS Config last failed to evaluate your AWS resources against the rule. - **FirstActivatedTime** *(datetime) --* The time that you first activated the AWS Config rule. - **LastErrorCode** *(string) --* The error code that AWS Config returned when the rule last failed. - **LastErrorMessage** *(string) --* The error message that AWS Config returned when the rule last failed. - **FirstEvaluationStarted** *(boolean) --* Indicates whether AWS Config has evaluated your resources against the rule at least once. * ``true`` - AWS Config has evaluated your AWS resources against the rule at least once. * ``false`` - AWS Config has not once finished evaluating your AWS resources against the rule. - **NextToken** *(string) --* The string that you use in a subsequent request to get the next page of results in a paginated response. :type ConfigRuleNames: list :param ConfigRuleNames: The name of the AWS managed Config rules for which you want status information. If you do not specify any names, AWS Config returns status information for all AWS managed Config rules that you use. - *(string) --* :type NextToken: string :param NextToken: The ``nextToken`` string returned on a previous page that you use to get the next page of results in a paginated response. :type Limit: integer :param Limit: The number of rule evaluation results that you want returned. This parameter is required if the rule limit for your account is more than the default of 50 rules. For information about requesting a rule limit increase, see `AWS Config Limits <http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_config>`__ in the *AWS General Reference Guide* . :rtype: dict :returns:
https://github.com/alliefitter/boto3_type_annotations/blob/2a88aa562b1aee6e8a6cc30402980884b3707fbb/boto3_type_annotations_with_docs/boto3_type_annotations/config/client.py#L804-L888
from typing import Optional from botocore.client import BaseClient from typing import Dict from botocore.paginate import Paginator from datetime import datetime from botocore.waiter import Waiter from typing import Union from typing import List class Client(BaseClient): def batch_get_aggregate_resource_config(self, ConfigurationAggregatorName: str, ResourceIdentifiers: List) -> Dict: pass def batch_get_resource_config(self, resourceKeys: List) -> Dict: pass def can_paginate(self, operation_name: str = None): pass def delete_aggregation_authorization(self, AuthorizedAccountId: str, AuthorizedAwsRegion: str): pass def delete_config_rule(self, ConfigRuleName: str): pass def delete_configuration_aggregator(self, ConfigurationAggregatorName: str): pass def delete_configuration_recorder(self, ConfigurationRecorderName: str): pass def delete_delivery_channel(self, DeliveryChannelName: str): pass def delete_evaluation_results(self, ConfigRuleName: str) -> Dict: pass def delete_pending_aggregation_request(self, RequesterAccountId: str, RequesterAwsRegion: str): pass def delete_remediation_configuration(self, ConfigRuleName: str, ResourceType: str = None) -> Dict: pass def delete_retention_configuration(self, RetentionConfigurationName: str): pass def deliver_config_snapshot(self, deliveryChannelName: str) -> Dict: pass def describe_aggregate_compliance_by_config_rules(self, ConfigurationAggregatorName: str, Filters: Dict = None, Limit: int = None, NextToken: str = None) -> Dict: pass def describe_aggregation_authorizations(self, Limit: int = None, NextToken: str = None) -> Dict: pass def describe_compliance_by_config_rule(self, ConfigRuleNames: List = None, ComplianceTypes: List = None, NextToken: str = None) -> Dict: pass def describe_compliance_by_resource(self, ResourceType: str = None, ResourceId: str = None, ComplianceTypes: List = None, Limit: int = None, NextToken: str = None) -> Dict: pass
MIT License
mdsol/rwslib
rwslib/builders/modm.py
LastUpdateMixin.set_update_time
python
def set_update_time(self, update_time=None): if update_time and isinstance(update_time, (datetime.datetime,)): self.last_update_time = update_time else: self.last_update_time = datetime.datetime.utcnow()
Set the Update Time from the local clock (in UTC)
https://github.com/mdsol/rwslib/blob/799cbc2ca75dc1be3cb4099bf26b7a5cc360fbfd/rwslib/builders/modm.py#L162-L170
__author__ = 'glow' import datetime import enum class MODMExtensionRegistry(enum.Enum): StudyEventDef = ["ArmAssociation"] StudyEventRef = ["ArmAssociation"] ClinicalData = ["ExternalStudyID", "StudyUUID", "AuditSubCategoryName", "StudyName", "ClientDivisionUUID", "ClientDivisionSchemeUUID", "SDRCompleteDate", "SDVCompleteDate", "LockCompleteDate", "IsSDVRequired", "IsSDVComplete"] StudyEventData = ["StartWindowDate", "EndWindowDate", "StudyEventUUID", "InstanceName", "VisitTargetDate", "InstanceId", "InstanceOverDue", "InstanceStartWindow", "InstanceEndWindow", "InstanceClose", "InstanceAccess", "StudyEventDate", "SDRCompleteDate", "SDVCompleteDate", "LockCompleteDate", "VisitFirstDataEntryDate", "IsSDVRequired", "IsSDVComplete"] SubjectData = ["SubjectName", "Status", "SDRCompleteDate", "SDVCompleteDate", "LockCompleteDate", "IsSDVRequired", "IsSDVComplete", "SubjectUUID"] FormData = ["FormUUID", "DataPageName", "DataPageID", "SDRCompleteDate", "SDVCompleteDate", "LockCompleteDate", "IsSDVRequired", "IsSDVComplete"] ItemGroupData = ["ItemGroupUUID", "RecordID", "SDRCompleteDate", "SDVCompleteDate", "LockCompleteDate", "IsSDVRequired", "IsSDVComplete"] ItemData = ["ItemUUID", "SDRCompleteDate", "SDVCompleteDate", "LockCompleteDate", "IsSDVRequired", "IsSDVComplete"] SiteRef = ["SiteStartDate", "SiteCloseDate", "LocationOIDType"] Location = ["SiteStartDate", "SiteCloseDate"] class MODMAttribute(object): def __init__(self, attribute, value): self.attribute = attribute self.raw_value = value @property def tag(self): return "mdsol:{}".format(self.attribute) @property def value(self): if isinstance(self.raw_value, (datetime.datetime, datetime.date)): return self.raw_value.isoformat() elif isinstance(self.raw_value, (bool,)): return 'Yes' if self.raw_value else 'No' return self.raw_value class MODMMixin(object): @property def attributes(self): if not hasattr(self, "_attributes"): self._attributes = [] return self._attributes def add_attribute(self, attribute, value): class_name = self.__class__.__name__ if class_name.startswith('ItemData'): class_name = 'ItemData' if attribute not in MODMExtensionRegistry[class_name].value: raise ValueError("Can't add {} to {}".format(attribute, self.__class__.__name__)) self.attributes.append(MODMAttribute(attribute, value)) def mixin(self): pass def mixin_params(self, params): if not isinstance(params, (dict,)): raise AttributeError("Cannot mixin to object of type {}".format(type(params))) for attribute in self.attributes: params.update({attribute.tag: attribute.value}) class LastUpdateMixin(MODMMixin): @property def last_update_time(self): if not hasattr(self, "_last_update_time"): self._last_update_time = None return self._last_update_time @last_update_time.setter def last_update_time(self, value): if isinstance(value, (datetime.datetime,)): self._last_update_time = value else: raise ValueError("Expect last_update_time to be a datetime")
MIT License
tensorflow/agents
tf_agents/specs/tensor_spec.py
to_nest_placeholder
python
def to_nest_placeholder(nested_tensor_specs, default=None, name_scope="", outer_dims=()): if default is None: to_ph = lambda spec: to_placeholder(spec, outer_dims=outer_dims) else: if not isinstance(default, (int, float, np.ndarray)): raise ValueError("to_nest_placeholder default value must be an int, " "float, or np.ndarray") def to_ph(spec): shape = list(outer_dims) + spec.shape.as_list() if isinstance(default, np.ndarray) and list(default.shape) != shape: raise ValueError("Shape mismatch between default value and spec. " "Got {}, expected {}".format(default.shape, shape)) const = tf.constant(default, shape=shape, dtype=spec.dtype) return to_placeholder_with_default(const, spec, outer_dims=outer_dims) with tf.name_scope(name_scope): return tf.nest.map_structure(to_ph, nested_tensor_specs)
Converts a nest of TensorSpecs to a nest of matching placeholders. Args: nested_tensor_specs: A nest of tensor specs. default: Optional constant value to set as a default for the placeholder. name_scope: String name for the scope to create the placeholders in. outer_dims: Optional leading dimensions for the placeholder. Returns: A nest of placeholders matching the given tensor spec. Raises: ValueError: If a default is provided outside of the allowed types, or if default is a np.array that does not match the spec shape.
https://github.com/tensorflow/agents/blob/ad18e95cfd95e4e76b771aeafa653f70c5080a29/tf_agents/specs/tensor_spec.py#L141-L176
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from typing import Union import numpy as np import tensorflow as tf import tensorflow_probability as tfp from tf_agents.specs import array_spec from tf_agents.typing import types from google.protobuf import text_format from tensorflow.core.protobuf import struct_pb2 from tensorflow.python.framework import tensor_spec as ts from tensorflow.python.saved_model import nested_structure_coder tfd = tfp.distributions TensorSpec = tf.TensorSpec BoundedTensorSpec = ts.BoundedTensorSpec def is_bounded(spec): return isinstance(spec, (array_spec.BoundedArraySpec, BoundedTensorSpec)) def is_discrete(spec): if isinstance(spec, TensorSpec): return spec.dtype.is_integer else: return array_spec.is_discrete(spec) def is_continuous(spec): if isinstance(spec, TensorSpec): return spec.dtype.is_floating else: return array_spec.is_continuous(spec) def from_spec(spec): def _convert_to_tensor_spec(s): if isinstance(s, tf.TypeSpec): return s if isinstance(s, (array_spec.BoundedArraySpec, BoundedTensorSpec)): return BoundedTensorSpec.from_spec(s) elif isinstance(s, array_spec.ArraySpec): return TensorSpec.from_spec(s) else: raise ValueError( "No known conversion from type `%s` to a TensorSpec. Saw:\n %s" % (type(s), s)) return tf.nest.map_structure(_convert_to_tensor_spec, spec) def to_array_spec( tensor_spec: Union[types.NestedArraySpec, types.NestedTensorSpec] ) -> types.NestedArraySpec: def _convert(s): if isinstance(s, array_spec.ArraySpec): return s if hasattr(s, "minimum") and hasattr(s, "maximum"): return array_spec.BoundedArraySpec( s.shape.as_list(), s.dtype.as_numpy_dtype, minimum=s.minimum, maximum=s.maximum, name=s.name) else: return array_spec.ArraySpec(s.shape.as_list(), s.dtype.as_numpy_dtype, s.name) return tf.nest.map_structure(_convert, tensor_spec) def to_nest_array_spec( nest_array_spec: Union[types.NestedArraySpec, types.NestedTensorSpec] ) -> types.NestedArraySpec: return to_array_spec(nest_array_spec) def to_placeholder(spec, outer_dims=()): ph_shape = list(outer_dims) + spec.shape.as_list() return tf.compat.v1.placeholder(spec.dtype, ph_shape, spec.name) def to_placeholder_with_default(default, spec, outer_dims=()): ph_shape = list(outer_dims) + spec.shape.as_list() return tf.compat.v1.placeholder_with_default(default, ph_shape, spec.name)
Apache License 2.0
awslabs/dgl-ke
python/dglke/models/general_models.py
KEModel.prepare_relation
python
def prepare_relation(self, device=None): self.relation_emb = ExternalEmbedding(self.args, self.n_relations, self.rel_dim, device) self.relation_emb.init(self.emb_init) if self.model_name == 'TransR': local_projection_emb = ExternalEmbedding(self.args, self.n_relations, self.entity_dim * self.rel_dim, device) self.score_func.prepare_local_emb(local_projection_emb) self.score_func.reset_parameters()
Prepare relation embeddings in multi-process multi-gpu training model. device : th.device Which device (GPU) to put relation embeddings in.
https://github.com/awslabs/dgl-ke/blob/30558e069c42038cded08bddd26ac75f153aae75/python/dglke/models/general_models.py#L590-L602
import os import numpy as np import math import dgl.backend as F backend = os.environ.get('DGLBACKEND', 'pytorch') if backend.lower() == 'mxnet': from .mxnet.tensor_models import masked_select from .mxnet.tensor_models import logsigmoid from .mxnet.tensor_models import abs from .mxnet.tensor_models import get_device, get_dev from .mxnet.tensor_models import norm from .mxnet.tensor_models import get_scalar from .mxnet.tensor_models import reshape from .mxnet.tensor_models import cuda from .mxnet.tensor_models import ExternalEmbedding from .mxnet.tensor_models import InferEmbedding from .mxnet.score_fun import * DEFAULT_INFER_BATCHSIZE = 1024 else: from .pytorch.tensor_models import logsigmoid from .pytorch.tensor_models import abs from .pytorch.tensor_models import masked_select from .pytorch.tensor_models import get_device, get_dev from .pytorch.tensor_models import norm from .pytorch.tensor_models import get_scalar from .pytorch.tensor_models import reshape from .pytorch.tensor_models import cuda from .pytorch.tensor_models import ExternalEmbedding from .pytorch.tensor_models import InferEmbedding from .pytorch.score_fun import * from .pytorch.loss import LossGenerator DEFAULT_INFER_BATCHSIZE = 2048 EMB_INIT_EPS = 2.0 class InferModel(object): def __init__(self, device, model_name, hidden_dim, double_entity_emb=False, double_relation_emb=False, gamma=0., batch_size=DEFAULT_INFER_BATCHSIZE): super(InferModel, self).__init__() self.device = device self.model_name = model_name entity_dim = 2 * hidden_dim if double_entity_emb else hidden_dim relation_dim = 2 * hidden_dim if double_relation_emb else hidden_dim self.entity_emb = InferEmbedding(device) self.relation_emb = InferEmbedding(device) self.batch_size = batch_size if model_name == 'TransE' or model_name == 'TransE_l2': self.score_func = TransEScore(gamma, 'l2') elif model_name == 'TransE_l1': self.score_func = TransEScore(gamma, 'l1') elif model_name == 'TransR': assert False, 'Do not support inference of TransR model now.' elif model_name == 'DistMult': self.score_func = DistMultScore() elif model_name == 'ComplEx': self.score_func = ComplExScore() elif model_name == 'RESCAL': self.score_func = RESCALScore(relation_dim, entity_dim) elif model_name == 'RotatE': emb_init = (gamma + EMB_INIT_EPS) / hidden_dim self.score_func = RotatEScore(gamma, emb_init) elif model_name == 'SimplE': self.score_func = SimplEScore() def load_emb(self, path, dataset): self.entity_emb.load(path, dataset+'_'+self.model_name+'_entity') self.relation_emb.load(path, dataset+'_'+self.model_name+'_relation') self.score_func.load(path, dataset+'_'+self.model_name) def score(self, head, rel, tail, triplet_wise=False): head_emb = self.entity_emb(head) rel_emb = self.relation_emb(rel) tail_emb = self.entity_emb(tail) num_head = F.shape(head)[0] num_rel = F.shape(rel)[0] num_tail = F.shape(tail)[0] batch_size = self.batch_size score = [] if triplet_wise: class FakeEdge(object): def __init__(self, head_emb, rel_emb, tail_emb): self._hobj = {} self._robj = {} self._tobj = {} self._hobj['emb'] = head_emb self._robj['emb'] = rel_emb self._tobj['emb'] = tail_emb @property def src(self): return self._hobj @property def dst(self): return self._tobj @property def data(self): return self._robj for i in range((num_head + batch_size - 1) // batch_size): sh_emb = head_emb[i * batch_size : (i + 1) * batch_size if (i + 1) * batch_size < num_head else num_head] sr_emb = rel_emb[i * batch_size : (i + 1) * batch_size if (i + 1) * batch_size < num_head else num_head] st_emb = tail_emb[i * batch_size : (i + 1) * batch_size if (i + 1) * batch_size < num_head else num_head] edata = FakeEdge(sh_emb, sr_emb, st_emb) score.append(F.copy_to(self.score_func.edge_func(edata)['score'], F.cpu())) score = F.cat(score, dim=0) return score else: for i in range((num_head + batch_size - 1) // batch_size): sh_emb = head_emb[i * batch_size : (i + 1) * batch_size if (i + 1) * batch_size < num_head else num_head] s_score = [] for j in range((num_tail + batch_size - 1) // batch_size): st_emb = tail_emb[j * batch_size : (j + 1) * batch_size if (j + 1) * batch_size < num_tail else num_tail] s_score.append(F.copy_to(self.score_func.infer(sh_emb, rel_emb, st_emb), F.cpu())) score.append(F.cat(s_score, dim=2)) score = F.cat(score, dim=0) return F.reshape(score, (num_head * num_rel * num_tail,)) @property def num_entity(self): return self.entity_emb.emb.shape[0] @property def num_rel(self): return self.relation_emb.emb.shape[0] class KEModel(object): def __init__(self, args, model_name, n_entities, n_relations, hidden_dim, gamma, double_entity_emb=False, double_relation_emb=False): super(KEModel, self).__init__() self.args = args self.has_edge_importance = args.has_edge_importance self.n_entities = n_entities self.n_relations = n_relations self.model_name = model_name self.hidden_dim = hidden_dim self.eps = EMB_INIT_EPS self.emb_init = (gamma + self.eps) / hidden_dim entity_dim = 2 * hidden_dim if double_entity_emb else hidden_dim relation_dim = 2 * hidden_dim if double_relation_emb else hidden_dim device = get_device(args) self.loss_gen = LossGenerator(args, args.loss_genre if hasattr(args, 'loss_genre') else 'Logsigmoid', args.neg_adversarial_sampling if hasattr(args, 'neg_adversarial_sampling') else False, args.adversarial_temperature if hasattr(args, 'adversarial_temperature') else 1.0, args.pairwise if hasattr(args, 'pairwise') else False) self.entity_emb = ExternalEmbedding(args, n_entities, entity_dim, F.cpu() if args.mix_cpu_gpu else device) if model_name == 'RESCAL': rel_dim = relation_dim * entity_dim else: rel_dim = relation_dim self.rel_dim = rel_dim self.entity_dim = entity_dim self.strict_rel_part = args.strict_rel_part self.soft_rel_part = args.soft_rel_part if not self.strict_rel_part and not self.soft_rel_part: self.relation_emb = ExternalEmbedding(args, n_relations, rel_dim, F.cpu() if args.mix_cpu_gpu else device) else: self.global_relation_emb = ExternalEmbedding(args, n_relations, rel_dim, F.cpu()) if model_name == 'TransE' or model_name == 'TransE_l2': self.score_func = TransEScore(gamma, 'l2') elif model_name == 'TransE_l1': self.score_func = TransEScore(gamma, 'l1') elif model_name == 'TransR': projection_emb = ExternalEmbedding(args, n_relations, entity_dim * relation_dim, F.cpu() if args.mix_cpu_gpu else device) self.score_func = TransRScore(gamma, projection_emb, relation_dim, entity_dim) elif model_name == 'DistMult': self.score_func = DistMultScore() elif model_name == 'ComplEx': self.score_func = ComplExScore() elif model_name == 'RESCAL': self.score_func = RESCALScore(relation_dim, entity_dim) elif model_name == 'RotatE': self.score_func = RotatEScore(gamma, self.emb_init) elif model_name == 'SimplE': self.score_func = SimplEScore() self.model_name = model_name self.head_neg_score = self.score_func.create_neg(True) self.tail_neg_score = self.score_func.create_neg(False) self.head_neg_prepare = self.score_func.create_neg_prepare(True) self.tail_neg_prepare = self.score_func.create_neg_prepare(False) self.reset_parameters() def share_memory(self): self.entity_emb.share_memory() if self.strict_rel_part or self.soft_rel_part: self.global_relation_emb.share_memory() else: self.relation_emb.share_memory() if self.model_name == 'TransR': self.score_func.share_memory() def save_emb(self, path, dataset): self.entity_emb.save(path, dataset+'_'+self.model_name+'_entity') if self.strict_rel_part or self.soft_rel_part: self.global_relation_emb.save(path, dataset+'_'+self.model_name+'_relation') else: self.relation_emb.save(path, dataset+'_'+self.model_name+'_relation') self.score_func.save(path, dataset+'_'+self.model_name) def load_emb(self, path, dataset): self.entity_emb.load(path, dataset+'_'+self.model_name+'_entity') self.relation_emb.load(path, dataset+'_'+self.model_name+'_relation') self.score_func.load(path, dataset+'_'+self.model_name) def reset_parameters(self): self.entity_emb.init(self.emb_init) self.score_func.reset_parameters() if (not self.strict_rel_part) and (not self.soft_rel_part): self.relation_emb.init(self.emb_init) else: self.global_relation_emb.init(self.emb_init) def predict_score(self, g): self.score_func(g) return g.edata['score'] def predict_neg_score(self, pos_g, neg_g, to_device=None, gpu_id=-1, trace=False, neg_deg_sample=False): num_chunks = neg_g.num_chunks chunk_size = neg_g.chunk_size neg_sample_size = neg_g.neg_sample_size mask = F.ones((num_chunks, chunk_size * (neg_sample_size + chunk_size)), dtype=F.float32, ctx=F.context(pos_g.ndata['emb'])) if neg_g.neg_head: neg_head_ids = neg_g.ndata['id'][neg_g.head_nid] neg_head = self.entity_emb(neg_head_ids, gpu_id, trace) head_ids, tail_ids = pos_g.all_edges(order='eid') if to_device is not None and gpu_id >= 0: tail_ids = to_device(tail_ids, gpu_id) tail = pos_g.ndata['emb'][tail_ids] rel = pos_g.edata['emb'] if neg_deg_sample: head = pos_g.ndata['emb'][head_ids] head = head.reshape(num_chunks, chunk_size, -1) neg_head = neg_head.reshape(num_chunks, neg_sample_size, -1) neg_head = F.cat([head, neg_head], 1) neg_sample_size = chunk_size + neg_sample_size mask[:,0::(neg_sample_size + 1)] = 0 neg_head = neg_head.reshape(num_chunks * neg_sample_size, -1) neg_head, tail = self.head_neg_prepare(pos_g.edata['id'], num_chunks, neg_head, tail, gpu_id, trace) neg_score = self.head_neg_score(neg_head, rel, tail, num_chunks, chunk_size, neg_sample_size) else: neg_tail_ids = neg_g.ndata['id'][neg_g.tail_nid] neg_tail = self.entity_emb(neg_tail_ids, gpu_id, trace) head_ids, tail_ids = pos_g.all_edges(order='eid') if to_device is not None and gpu_id >= 0: head_ids = to_device(head_ids, gpu_id) head = pos_g.ndata['emb'][head_ids] rel = pos_g.edata['emb'] if neg_deg_sample: tail = pos_g.ndata['emb'][tail_ids] tail = tail.reshape(num_chunks, chunk_size, -1) neg_tail = neg_tail.reshape(num_chunks, neg_sample_size, -1) neg_tail = F.cat([tail, neg_tail], 1) neg_sample_size = chunk_size + neg_sample_size mask[:,0::(neg_sample_size + 1)] = 0 neg_tail = neg_tail.reshape(num_chunks * neg_sample_size, -1) head, neg_tail = self.tail_neg_prepare(pos_g.edata['id'], num_chunks, head, neg_tail, gpu_id, trace) neg_score = self.tail_neg_score(head, rel, neg_tail, num_chunks, chunk_size, neg_sample_size) if neg_deg_sample: neg_g.neg_sample_size = neg_sample_size mask = mask.reshape(num_chunks, chunk_size, neg_sample_size) return neg_score * mask else: return neg_score def forward_test(self, pos_g, neg_g, logs, gpu_id=-1): pos_g.ndata['emb'] = self.entity_emb(pos_g.ndata['id'], gpu_id, False) pos_g.edata['emb'] = self.relation_emb(pos_g.edata['id'], gpu_id, False) self.score_func.prepare(pos_g, gpu_id, False) batch_size = pos_g.number_of_edges() pos_scores = self.predict_score(pos_g) pos_scores = reshape(pos_scores, batch_size, -1) neg_scores = self.predict_neg_score(pos_g, neg_g, to_device=cuda, gpu_id=gpu_id, trace=False, neg_deg_sample=self.args.neg_deg_sample_eval) neg_scores = reshape(neg_scores, batch_size, -1) if self.args.eval_filter: filter_bias = reshape(neg_g.edata['bias'], batch_size, -1) if gpu_id >= 0: filter_bias = cuda(filter_bias, gpu_id) mask = filter_bias != -1 for i in range(batch_size): if self.args.eval_filter: ranking = F.asnumpy(F.sum(masked_select(neg_scores[i] >= pos_scores[i], mask[i]), dim=0) + 1) else: ranking = F.asnumpy(F.sum(neg_scores[i] >= pos_scores[i], dim=0) + 1) logs.append({ 'MRR': 1.0 / ranking, 'MR': float(ranking), 'HITS@1': 1.0 if ranking <= 1 else 0.0, 'HITS@3': 1.0 if ranking <= 3 else 0.0, 'HITS@10': 1.0 if ranking <= 10 else 0.0 }) def forward_test_wikikg(self, query, ans, candidate, mode, logs, gpu_id=-1): scores = self.predict_score_wikikg(query, candidate, mode, to_device=cuda, gpu_id=gpu_id, trace=False) if mode == "Valid": batch_size = query.shape[0] neg_scores = reshape(scores, batch_size, -1) for i in range(batch_size): ranking = F.asnumpy(F.sum(neg_scores[i] >= neg_scores[i][ans[i]], dim=0) + 1) logs.append({ 'MRR': 1.0 / ranking, 'MR': float(ranking), 'HITS@1': 1.0 if ranking <= 1 else 0.0, 'HITS@3': 1.0 if ranking <= 3 else 0.0, 'HITS@10': 1.0 if ranking <= 10 else 0.0 }) else: argsort = F.argsort(scores, dim=1, descending=True) logs.append(argsort[:,:10]) def predict_score_wikikg(self, query, candidate, mode, to_device=None, gpu_id=-1, trace=False): num_chunks = len(query) chunk_size = 1 neg_sample_size = candidate.shape[1] neg_tail = self.entity_emb(candidate.view(-1), gpu_id, False) head = self.entity_emb(query[:,0], gpu_id, False) rel = self.relation_emb(query[:,1], gpu_id, False) neg_score = self.tail_neg_score(head, rel, neg_tail, num_chunks, chunk_size, neg_sample_size) return neg_score.squeeze() def forward(self, pos_g, neg_g, gpu_id=-1): pos_g.ndata['emb'] = self.entity_emb(pos_g.ndata['id'], gpu_id, True) pos_g.edata['emb'] = self.relation_emb(pos_g.edata['id'], gpu_id, True) self.score_func.prepare(pos_g, gpu_id, True) pos_score = self.predict_score(pos_g) if gpu_id >= 0: neg_score = self.predict_neg_score(pos_g, neg_g, to_device=cuda, gpu_id=gpu_id, trace=True, neg_deg_sample=self.args.neg_deg_sample) else: neg_score = self.predict_neg_score(pos_g, neg_g, trace=True, neg_deg_sample=self.args.neg_deg_sample) neg_score = reshape(neg_score, -1, neg_g.neg_sample_size) edge_weight = F.copy_to(pos_g.edata['impts'], get_dev(gpu_id)) if self.has_edge_importance else None loss, log = self.loss_gen.get_total_loss(pos_score, neg_score, edge_weight) if self.args.regularization_coef > 0.0 and self.args.regularization_norm > 0: coef, nm = self.args.regularization_coef, self.args.regularization_norm reg = coef * (norm(self.entity_emb.curr_emb(), nm) + norm(self.relation_emb.curr_emb(), nm)) log['regularization'] = get_scalar(reg) loss = loss + reg return loss, log def update(self, gpu_id=-1): self.entity_emb.update(gpu_id) self.relation_emb.update(gpu_id) self.score_func.update(gpu_id)
Apache License 2.0
adammast/rscbot
transactions/transactions.py
Transactions.unsetTransactionChannel
python
async def unsetTransactionChannel(self, ctx): await self._save_trans_channel(ctx, None) await ctx.send("Done")
Unsets the transaction channel. Transactions will not be performed if no transaction channel is set
https://github.com/adammast/rscbot/blob/869c6e9e1184db201048e0f5fdc80019d64c421a/transactions/transactions.py#L256-L259
import discord import re from redbot.core import Config from redbot.core import commands from redbot.core import checks defaults = { "TransChannel": None, "DevLeagueTiers": [], "DevLeagueCutMessage": None, "NoDevLeagueCutMessage": None } class Transactions(commands.Cog): SUBBED_OUT_ROLE = "Subbed Out" def __init__(self, bot): self.bot = bot self.config = Config.get_conf(self, identifier=1234567895, force_registration=True) self.config.register_guild(**defaults) self.prefix_cog = bot.get_cog("PrefixManager") self.team_manager_cog = bot.get_cog("TeamManager") @commands.guild_only() @commands.command() @checks.admin_or_permissions(manage_roles=True) async def genericAnnounce(self, ctx, *, message): try: trans_channel = await self._trans_channel(ctx) await trans_channel.send(message) await ctx.send("Done") except KeyError: await ctx.send(":x: Transaction log channel not set") @commands.guild_only() @commands.command() @checks.admin_or_permissions(manage_roles=True) async def draft(self, ctx, user: discord.Member, team_name: str, round: int = None, pick: int = None): franchise_role, tier_role = await self.team_manager_cog._roles_for_team(ctx, team_name) gm_name = self._get_gm_name(ctx, franchise_role) if franchise_role in user.roles: message = "Round {0} Pick {1}: {2} was kept by the {3} ({4} - {5})".format(round, pick, user.mention, team_name, gm_name, tier_role.name) else: message = "Round {0} Pick {1}: {2} was drafted by the {3} ({4} - {5})".format(round, pick, user.mention, team_name, gm_name, tier_role.name) trans_channel = await self._trans_channel(ctx) if trans_channel is not None: try: await self.add_player_to_team(ctx, user, team_name) free_agent_roles = await self.find_user_free_agent_roles(ctx, user) await trans_channel.send(message) draftEligibleRole = None for role in user.roles: if role.name == "Draft Eligible": draftEligibleRole = role break if len(free_agent_roles) > 0: for role in free_agent_roles: await user.remove_roles(role) if draftEligibleRole is not None: await user.remove_roles(draftEligibleRole) await ctx.send("Done") except KeyError: await ctx.send(":x: Free agent role not found in dictionary") except LookupError: await ctx.send(":x: Free agent role not found in server") return @commands.guild_only() @commands.command() @checks.admin_or_permissions(manage_roles=True) async def sign(self, ctx, user: discord.Member, team_name: str): franchise_role, tier_role = await self.team_manager_cog._roles_for_team(ctx, team_name) if franchise_role in user.roles and tier_role in user.roles: await ctx.send(":x: {0} is already on the {1}".format(user.mention, team_name)) return trans_channel = await self._trans_channel(ctx) if trans_channel is not None: try: await self.add_player_to_team(ctx, user, team_name) free_agent_roles = await self.find_user_free_agent_roles(ctx, user) if len(free_agent_roles) > 0: for role in free_agent_roles: await user.remove_roles(role) gm_name = self._get_gm_name(ctx, franchise_role) message = "{0} was signed by the {1} ({2} - {3})".format(user.mention, team_name, gm_name, tier_role.name) await trans_channel.send(message) await ctx.send("Done") except Exception as e: await ctx.send(e) @commands.guild_only() @commands.command() @checks.admin_or_permissions(manage_roles=True) async def cut(self, ctx, user : discord.Member, team_name: str, tier_fa_role: discord.Role = None): franchise_role, tier_role = await self.team_manager_cog._roles_for_team(ctx, team_name) trans_channel = await self._trans_channel(ctx) if trans_channel is not None: try: await self.remove_player_from_team(ctx, user, team_name) if not self.team_manager_cog.is_gm(user): if tier_fa_role is None: role_name = "{0}FA".format((await self.team_manager_cog.get_current_tier_role(ctx, user)).name) tier_fa_role = self.team_manager_cog._find_role_by_name(ctx, role_name) fa_role = self.team_manager_cog._find_role_by_name(ctx, "Free Agent") await self.team_manager_cog._set_user_nickname_prefix(ctx, "FA", user) await user.add_roles(tier_fa_role, fa_role) gm_name = self._get_gm_name(ctx, franchise_role) message = "{0} was cut by the {1} ({2} - {3})".format(user.mention, team_name, gm_name, tier_role.name) await trans_channel.send(message) await self._maybe_send_dev_league_dm(ctx, user, tier_role) await ctx.send("Done") except KeyError: await ctx.send(":x: Free agent role not found in dictionary") except LookupError: await ctx.send(":x: Free agent role not found in server") @commands.guild_only() @commands.command() @checks.admin_or_permissions(manage_roles=True) async def trade(self, ctx, user: discord.Member, new_team_name: str, user_2: discord.Member, new_team_name_2: str): franchise_role_1, tier_role_1 = await self.team_manager_cog._roles_for_team(ctx, new_team_name) franchise_role_2, tier_role_2 = await self.team_manager_cog._roles_for_team(ctx, new_team_name_2) gm_name_1 = self._get_gm_name(ctx, franchise_role_1) gm_name_2 = self._get_gm_name(ctx, franchise_role_2) if franchise_role_1 in user.roles and tier_role_1 in user.roles: await ctx.send(":x: {0} is already on the {1}".format(user.mention, new_team_name)) return if franchise_role_2 in user_2.roles and tier_role_2 in user_2.roles: await ctx.send(":x: {0} is already on the {1}".format(user_2.mention, new_team_name_2)) return trans_channel = await self._trans_channel(ctx) if trans_channel is not None: await self.remove_player_from_team(ctx, user, new_team_name_2) await self.remove_player_from_team(ctx, user_2, new_team_name) await self.add_player_to_team(ctx, user, new_team_name) await self.add_player_to_team(ctx, user_2, new_team_name_2) message = "{0} was traded by the {1} ({4} - {5}) to the {2} ({6} - {7}) for {3}".format(user.mention, new_team_name_2, new_team_name, user_2.mention, gm_name_2, tier_role_2.name, gm_name_1, tier_role_1.name) await trans_channel.send(message) await ctx.send("Done") @commands.guild_only() @commands.command() @checks.admin_or_permissions(manage_roles=True) async def sub(self, ctx, user: discord.Member, team_name: str, subbed_out_user: discord.Member = None): trans_channel = await self._trans_channel(ctx) free_agent_role = self.team_manager_cog._find_role_by_name(ctx, "Free Agent") if trans_channel is not None: leagueRole = self.team_manager_cog._find_role_by_name(ctx, "League") if leagueRole is not None: franchise_role, team_tier_role = await self.team_manager_cog._roles_for_team(ctx, team_name) if franchise_role in user.roles and team_tier_role in user.roles: if free_agent_role in user.roles: await user.remove_roles(franchise_role) fa_tier_role = self.team_manager_cog._find_role_by_name(ctx, "{0}FA".format(team_tier_role)) if not fa_tier_role in user.roles: player_tier = await self.get_tier_role_for_fa(ctx, user) await user.remove_roles(team_tier_role) await user.add_roles(player_tier) else: await user.remove_roles(team_tier_role) gm = self._get_gm_name(ctx, franchise_role, True) message = "{0} has finished their time as a substitute for the {1} ({2} - {3})".format(user.name, team_name, gm, team_tier_role.name) subbed_out_role = self.team_manager_cog._find_role_by_name(ctx, self.SUBBED_OUT_ROLE) if subbed_out_role: team_members = self.team_manager_cog.members_from_team(ctx, franchise_role, team_tier_role) for team_member in team_members: await team_member.remove_roles(subbed_out_role) player_ratings = self.bot.get_cog("PlayerRatings") if player_ratings: await player_ratings.reset_temp_rating(ctx, user) else: if free_agent_role in user.roles: player_tier = await self.get_tier_role_for_fa(ctx, user) await user.remove_roles(player_tier) await user.add_roles(franchise_role, team_tier_role, leagueRole) gm = self._get_gm_name(ctx, franchise_role) message = "{0} was signed to a temporary contract by the {1} ({2} - {3})".format(user.mention, team_name, gm, team_tier_role.name) subbed_out_role = self.team_manager_cog._find_role_by_name(ctx, self.SUBBED_OUT_ROLE) if subbed_out_user and subbed_out_role: await subbed_out_user.add_roles(subbed_out_role) player_ratings = self.bot.get_cog("PlayerRatings") if player_ratings: await player_ratings.set_player_temp_rating(ctx, user, subbed_out_user) elif subbed_out_user: await ctx.send(":x: The subbed out role is not set in this server") await trans_channel.send(message) await ctx.send("Done") @commands.guild_only() @commands.command() @checks.admin_or_permissions(manage_roles=True) async def promote(self, ctx, user: discord.Member, team_name: str): old_team_name = await self.team_manager_cog.get_current_team_name(ctx, user) if old_team_name is not None: if (await self.team_manager_cog._roles_for_team(ctx, old_team_name))[0] != (await self.team_manager_cog._roles_for_team(ctx, team_name))[0]: await ctx.send(":x: {0} is not in the same franchise as {1}'s current team, the {2}".format(team_name.name, user.name, old_team_name)) return trans_channel = await self._trans_channel(ctx) if trans_channel: await self.remove_player_from_team(ctx, user, old_team_name) await self.add_player_to_team(ctx, user, team_name) franchise_role, tier_role = await self.team_manager_cog._roles_for_team(ctx, team_name) gm_name = self._get_gm_name(ctx, franchise_role) message = "{0} was promoted to the {1} ({2} - {3})".format(user.mention, team_name, gm_name, tier_role.name) await trans_channel.send(message) await ctx.send("Done") else: await ctx.send("Either {0} isn't on a team right now or his current team can't be found".format(user.name)) @commands.guild_only() @commands.command(aliases=["setTransChannel"]) @checks.admin_or_permissions(manage_guild=True) async def setTransactionChannel(self, ctx, trans_channel: discord.TextChannel): await self._save_trans_channel(ctx, trans_channel.id) await ctx.send("Done") @commands.guild_only() @commands.command(aliases=["getTransChannel"]) @checks.admin_or_permissions(manage_guild=True) async def getTransactionChannel(self, ctx): try: await ctx.send("Transaction log channel set to: {0}".format((await self._trans_channel(ctx)).mention)) except: await ctx.send(":x: Transaction log channel not set") @commands.guild_only() @commands.command(aliases=["unsetTransChannel"]) @checks.admin_or_permissions(manage_guild=True)
MIT License
tommoral/dicodile
benchmarks/other/sporco/admm/admm.py
ADMM.obfn_f
python
def obfn_f(self, X): raise NotImplementedError()
r"""Compute :math:`f(\mathbf{x})` component of ADMM objective function. Overriding this method is required if :meth:`eval_objfn` is not overridden.
https://github.com/tommoral/dicodile/blob/7c069c6b8b13556c3227d9e951f0bca6f90caf9c/benchmarks/other/sporco/admm/admm.py#L649-L656
from __future__ import division from __future__ import print_function from builtins import object import copy import warnings import numpy as np from benchmarks.other.sporco import cdict from benchmarks.other.sporco import util from benchmarks.other.sporco.util import u from benchmarks.other.sporco import common __author__ = """Brendt Wohlberg <brendt@ieee.org>""" class ADMM(common.IterativeSolver): class Options(cdict.ConstrainedDict): defaults = {'FastSolve': False, 'Verbose': False, 'StatusHeader': True, 'DataType': None, 'MaxMainIter': 1000, 'IterTimer': 'solve', 'AbsStopTol': 0.0, 'RelStopTol': 1e-3, 'RelaxParam': 1.0, 'rho': None, 'AutoRho': { 'Enabled': False, 'Period': 10, 'Scaling': 2.0, 'RsdlRatio': 10.0, 'RsdlTarget': None, 'AutoScaling': False, 'StdResiduals': False }, 'Y0': None, 'U0': None, 'Callback': None } def __init__(self, opt=None): if opt is None: opt = {} cdict.ConstrainedDict.__init__(self, opt) fwiter = 4 fpothr = 2 itstat_fields_objfn = ('ObjFun', 'FVal', 'GVal') itstat_fields_alg = ('PrimalRsdl', 'DualRsdl', 'EpsPrimal', 'EpsDual', 'Rho') itstat_fields_extra = () hdrtxt_objfn = ('Fnc', 'f', 'g') hdrval_objfun = {'Fnc': 'ObjFun', 'f': 'FVal', 'g': 'GVal'} def __new__(cls, *args, **kwargs): instance = super(ADMM, cls).__new__(cls) instance.timer = util.Timer(['init', 'solve', 'solve_wo_func', 'solve_wo_rsdl']) instance.timer.start('init') return instance def __init__(self, Nx, yshape, ushape, dtype, opt=None): if opt is None: opt = ADMM.Options() if not isinstance(opt, ADMM.Options): raise TypeError('Parameter opt must be an instance of ' 'ADMM.Options') self.opt = opt self.Nx = Nx self.Nc = np.product(np.array(ushape)) self.set_dtype(opt, dtype) self.set_attr('rho', opt['rho'], dval=1.0, dtype=self.dtype) self.set_attr('rho_tau', opt['AutoRho', 'Scaling'], dval=2.0, dtype=self.dtype) self.set_attr('rho_mu', opt['AutoRho', 'RsdlRatio'], dval=10.0, dtype=self.dtype) self.set_attr('rho_xi', opt['AutoRho', 'RsdlTarget'], dval=1.0, dtype=self.dtype) self.set_attr('rlx', opt['RelaxParam'], dval=1.0, dtype=self.dtype) if not hasattr(self, 'X'): self.X = None if self.opt['Y0'] is None: self.Y = self.yinit(yshape) else: self.Y = self.opt['Y0'].astype(self.dtype, copy=True) self.Yprev = self.Y.copy() if self.opt['U0'] is None: self.U = self.uinit(ushape) else: self.U = self.opt['U0'].astype(self.dtype, copy=True) self.itstat = [] self.k = 0 def yinit(self, yshape): return np.zeros(yshape, dtype=self.dtype) def uinit(self, ushape): return np.zeros(ushape, dtype=self.dtype) def solve(self): fmtstr, nsep = self.display_start() self.timer.start(['solve', 'solve_wo_func', 'solve_wo_rsdl']) for self.k in range(self.k, self.k + self.opt['MaxMainIter']): self.Yprev = self.Y.copy() self.xstep() self.relax_AX() self.ystep() self.ustep() self.timer.stop('solve_wo_rsdl') if self.opt['AutoRho', 'Enabled'] or not self.opt['FastSolve']: r, s, epri, edua = self.compute_residuals() self.timer.start('solve_wo_rsdl') self.timer.stop(['solve_wo_func', 'solve_wo_rsdl']) if not self.opt['FastSolve']: itst = self.iteration_stats(self.k, r, s, epri, edua) self.itstat.append(itst) self.display_status(fmtstr, itst) self.timer.start(['solve_wo_func', 'solve_wo_rsdl']) self.timer.stop('solve_wo_rsdl') if self.opt['AutoRho', 'Enabled'] or not self.opt['FastSolve']: self.update_rho(self.k, r, s) self.timer.start('solve_wo_rsdl') if self.opt['Callback'] is not None: if self.opt['Callback'](self): break if self.opt['AutoRho', 'Enabled'] or not self.opt['FastSolve']: if r < epri and s < edua: break self.k += 1 self.timer.stop(['solve', 'solve_wo_func', 'solve_wo_rsdl']) self.display_end(nsep) return self.getmin() @property def runtime(self): warnings.warn("admm.ADMM.runtime attribute has been replaced by " "an upgraded timer class: please see the documentation " "for admm.ADMM.solve method and util.Timer class", PendingDeprecationWarning) return self.timer.elapsed('init') + self.timer.elapsed('solve') def getmin(self): return self.X def xstep(self): raise NotImplementedError() def ystep(self): raise NotImplementedError() def ustep(self): self.U += self.rsdl_r(self.AX, self.Y) def relax_AX(self): self.AXnr = self.cnst_A(self.X) if self.rlx == 1.0: self.AX = self.AXnr else: if not hasattr(self, '_cnst_c'): self._cnst_c = self.cnst_c() alpha = self.rlx self.AX = alpha*self.AXnr - (1 - alpha)*(self.cnst_B(self.Y) - self._cnst_c) def compute_residuals(self): if self.opt['AutoRho', 'StdResiduals']: r = np.linalg.norm(self.rsdl_r(self.AXnr, self.Y)) s = np.linalg.norm(self.rsdl_s(self.Yprev, self.Y)) epri = np.sqrt(self.Nc) * self.opt['AbsStopTol'] + self.rsdl_rn(self.AXnr, self.Y) * self.opt['RelStopTol'] edua = np.sqrt(self.Nx) * self.opt['AbsStopTol'] + self.rsdl_sn(self.U) * self.opt['RelStopTol'] else: rn = self.rsdl_rn(self.AXnr, self.Y) if rn == 0.0: rn = 1.0 sn = self.rsdl_sn(self.U) if sn == 0.0: sn = 1.0 r = np.linalg.norm(self.rsdl_r(self.AXnr, self.Y)) / rn s = np.linalg.norm(self.rsdl_s(self.Yprev, self.Y)) / sn epri = np.sqrt(self.Nc) * self.opt['AbsStopTol'] / rn + self.opt['RelStopTol'] edua = np.sqrt(self.Nx) * self.opt['AbsStopTol'] / sn + self.opt['RelStopTol'] return r, s, epri, edua @classmethod def hdrtxt(cls): return ('Itn',) + cls.hdrtxt_objfn + ('r', 's', u('ρ')) @classmethod def hdrval(cls): hdrmap = {'Itn': 'Iter'} hdrmap.update(cls.hdrval_objfun) hdrmap.update({'r': 'PrimalRsdl', 's': 'DualRsdl', u('ρ'): 'Rho'}) return hdrmap def iteration_stats(self, k, r, s, epri, edua): tk = self.timer.elapsed(self.opt['IterTimer']) tpl = (k,) + self.eval_objfn() + (r, s, epri, edua, self.rho) + self.itstat_extra() + (tk,) return type(self).IterationStats(*tpl) def eval_objfn(self): fval = self.obfn_f(self.X) gval = self.obfn_g(self.Y) obj = fval + gval return (obj, fval, gval) def itstat_extra(self): return () def getitstat(self): return util.transpose_ntpl_list(self.itstat) def update_rho(self, k, r, s): if self.opt['AutoRho', 'Enabled']: tau = self.rho_tau mu = self.rho_mu xi = self.rho_xi if k != 0 and np.mod(k + 1, self.opt['AutoRho', 'Period']) == 0: if self.opt['AutoRho', 'AutoScaling']: if s == 0.0 or r == 0.0: rhomlt = tau else: rhomlt = np.sqrt(r / (s * xi) if r > s * xi else (s * xi) / r) if rhomlt > tau: rhomlt = tau else: rhomlt = tau rsf = 1.0 if r > xi * mu * s: rsf = rhomlt elif s > (mu / xi) * r: rsf = 1.0 / rhomlt self.rho *= self.dtype.type(rsf) self.U /= rsf if rsf != 1.0: self.rhochange() def display_start(self): if self.opt['Verbose']: if self.opt['AutoRho', 'Enabled']: hdrtxt = type(self).hdrtxt() else: hdrtxt = type(self).hdrtxt()[0:-1] hdrstr, fmtstr, nsep = common.solve_status_str( hdrtxt, fwdth0=type(self).fwiter, fprec=type(self).fpothr) if self.opt['StatusHeader']: print(hdrstr) print("-" * nsep) else: fmtstr, nsep = '', 0 return fmtstr, nsep def display_status(self, fmtstr, itst): if self.opt['Verbose']: hdrtxt = type(self).hdrtxt() hdrval = type(self).hdrval() itdsp = tuple([getattr(itst, hdrval[col]) for col in hdrtxt]) if not self.opt['AutoRho', 'Enabled']: itdsp = itdsp[0:-1] print(fmtstr % itdsp) def display_end(self, nsep): if self.opt['Verbose'] and self.opt['StatusHeader']: print("-" * nsep) def var_x(self): return self.X def var_y(self): return self.Y def var_u(self): return self.U
BSD 3-Clause New or Revised License
pelioniot/mbed-cloud-sdk-python
src/mbed_cloud/_backends/iam/models/login_history.py
LoginHistory.__eq__
python
def __eq__(self, other): if not isinstance(other, LoginHistory): return False return self.__dict__ == other.__dict__
Returns true if both objects are equal
https://github.com/pelioniot/mbed-cloud-sdk-python/blob/71dc67fc2a8d1aff31e35ec781fb328e6a60639c/src/mbed_cloud/_backends/iam/models/login_history.py#L196-L203
from pprint import pformat from six import iteritems import re class LoginHistory(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'date': 'datetime', 'ip_address': 'str', 'success': 'bool', 'user_agent': 'str' } attribute_map = { 'date': 'date', 'ip_address': 'ip_address', 'success': 'success', 'user_agent': 'user_agent' } def __init__(self, date=None, ip_address=None, success=None, user_agent=None): self._date = date self._ip_address = ip_address self._success = success self._user_agent = user_agent self.discriminator = None @property def date(self): return self._date @date.setter def date(self, date): if date is None: raise ValueError("Invalid value for `date`, must not be `None`") self._date = date @property def ip_address(self): return self._ip_address @ip_address.setter def ip_address(self, ip_address): if ip_address is None: raise ValueError("Invalid value for `ip_address`, must not be `None`") self._ip_address = ip_address @property def success(self): return self._success @success.setter def success(self, success): if success is None: raise ValueError("Invalid value for `success`, must not be `None`") self._success = success @property def user_agent(self): return self._user_agent @user_agent.setter def user_agent(self, user_agent): if user_agent is None: raise ValueError("Invalid value for `user_agent`, must not be `None`") self._user_agent = user_agent def to_dict(self): result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): return pformat(self.to_dict()) def __repr__(self): return self.to_str()
Apache License 2.0
apprenticelearner/al_core
apprentice/working_memory/base.py
WorkingMemory.output
python
def output(self) -> object: pass
Returns an object; what will ultimately get sent over back as an action. .. todo:: Write a setter to set object.
https://github.com/apprenticelearner/al_core/blob/2d720f73d0ce75f68f67067bad33ad0ab8835056/apprentice/working_memory/base.py#L136-L145
from abc import ABCMeta from abc import abstractmethod from typing import Any from typing import Callable from typing import Collection from typing import Dict import jsondiff from apprentice.working_memory.representation import Activation from apprentice.working_memory.representation import Skill class WorkingMemory(metaclass=ABCMeta): def __init__(self, ke=None, reset=True): self.lookup = {} def build_skill(self, _condition: Any, _function: Callable) -> Skill: return self.skill_factory.build(_condition, _function) def update(self, diff: Dict) -> None: for k in diff: if k is jsondiff.symbols.replace: keys = [k2 for k2 in self.lookup] for k2 in keys: self.remove_fact(k2) for k2, v in diff[k].items(): self.add_fact(k2, v) elif k is jsondiff.symbols.delete: for k2 in diff[k]: self.remove_fact(k2) elif k in self.lookup: self.update_fact(k, diff[k]) else: self.add_fact(k, diff[k]) @property @abstractmethod def facts(self) -> Collection[dict]: pass @property @abstractmethod def skills(self) -> Collection[Skill]: pass def add_facts(self, facts: dict) -> None: for key, fact in facts.items(): self.add_fact(key, fact) @abstractmethod def add_fact(self, key: object, fact: dict) -> None: pass @abstractmethod def remove_fact(self, key: object) -> bool: pass @abstractmethod def update_fact(self, key: object, diff: dict) -> None: pass def add_skills(self, skills: Collection[Skill]) -> None: for skill in skills: self.add_skill(skill) @abstractmethod def add_skill(self, skill: Skill) -> None: pass @abstractmethod def update_skill(self, skill: Skill) -> None: pass @property @abstractmethod def activations(self) -> Collection[Activation]: pass @property @abstractmethod
MIT License
dmulyalin/ttp
ttp/output/validate_cerberus.py
validate
python
def validate(data, schema, result="valid", info="", errors="", allow_unknown=True): if not HAS_LIBS: return data schema_data = _ttp_["output_object"].template_obj.vars.get(schema, None) if not schema_data: log.error("ttp.output.validate, schema '{}' not found".format(schema)) return data validator_engine.allow_unknown = allow_unknown if isinstance(data, dict): return _run_validation( data, schema_data, info, errors, result, validator_engine ) elif isinstance(data, list): return [ _run_validation(i, schema_data, info, errors, result, validator_engine) for i in data if isinstance(i, dict) ]
Function to validate data using Cerberus validation library. Args:: * schema - schema template variable name * result - name of the field to assign validation result * info - string, contains additional information about test * errors - name of the field to assign validation errors * allow_unknown - informs cerberus to ignore uncknown keys
https://github.com/dmulyalin/ttp/blob/a3a3753724c4d980dff23548ab93fa6d9d389f01/ttp/output/validate_cerberus.py#L32-L59
import logging log = logging.getLogger(__name__) try: from cerberus import Validator HAS_LIBS = True except ImportError: log.error( "ttp.validate, failed to import Cerberus library, make sure it is installed" ) HAS_LIBS = False if HAS_LIBS: validator_engine = Validator() def _run_validation(data, schema_data, info, errors, result, validator_engine): ret = {result: validator_engine.validate(document=data, schema=schema_data)} if info: try: formatted, _ = _ttp_["group"]["sformat"](data, string=info, add_field="inf") ret["info"] = formatted["inf"] except: ret["info"] = info if errors: ret[errors] = validator_engine.errors return ret
MIT License
guillaumeblaquiere/berglas-python
berglas_python.py
_envelope_encrypt
python
def _envelope_encrypt(plaintext: bytes) -> (bytes, bytes): data_encryption_key = os.urandom(DATA_ENCRYPTION_KEY_SIZE) initialization_vector = os.urandom(GCM_NONCE_SIZE) cipher = Cipher( algorithms.AES(data_encryption_key), modes.GCM(initialization_vector), backend=default_backend() ) encryptor = cipher.encryptor() ciphertext = encryptor.update(plaintext) + encryptor.finalize() data = initialization_vector + ciphertext + encryptor.tag return (data_encryption_key, data)
Generates a unique Data Encryption Key and encrypts the plaintext with the given key. :param plaintext: String to be encrypted :return: The encryption key and resulting ciphertext
https://github.com/guillaumeblaquiere/berglas-python/blob/1ef8fae490928304689adb8e0b25970b2d5a50d2/berglas_python.py#L166-L201
import base64 import logging import os from cryptography.hazmat.backends import default_backend from google.cloud import storage, kms from google.api_core import iam from cryptography.hazmat.primitives.ciphers import ( Cipher, algorithms, modes ) BERGLAS_PREFIX = "berglas://" METADATA_KMS_KEY = "berglas-kms-key" METADATA_ID_KEY = "berglas-secret" METADATA_CONTENT_TYPE = "text/plain; charset=utf-8" METADATA_CACHE_CONTROL = "private, no-cache, no-store, no-transform, max-age=0" BLOB_CHUNK_SIZE = 256 * 1024 GCM_NONCE_SIZE = 12 GCM_TAG_SIZE = 16 DATA_ENCRYPTION_KEY_SIZE = 32 LOCATION = "global" KEY_RING = "berglas" CRYPTO_KEY = "berglas-key" def str2b(s: str) -> bytes: return bytes(s, "UTF-8") def b2str(bystr: str) -> str: return bystr.decode("UTF-8") def _validate_env_var_prefix(env_var_value: str): if not env_var_value.startswith(BERGLAS_PREFIX): log_msg = f"No berglas prefix for the env var value {env_var_value}" logging.error(log_msg) raise Exception(log_msg) def _validate_project_id(project_id: str): if project_id == "": log_msg = "Project id can't be empty" logging.error(log_msg) raise Exception(log_msg) def copy_blob_iam_policy(blob): new_policy = iam.Policy() blob_iam_policies = blob.get_iam_policy() for role_name, entity in blob_iam_policies.items(): new_policy[role_name] = list(entity) return new_policy def Replace(project_id: str, env_var_key: str): env_var_value: str = os.environ.get(env_var_key) if env_var_value == "": logging.info(f"No value for the env var key {env_var_key}") return plaintext = Resolve(project_id, env_var_value) os.environ[env_var_key] = plaintext def _get_bucket_object(env_var_value: str) -> (str, str): without_prefix = env_var_value[len(BERGLAS_PREFIX):] if without_prefix == "": log_msg = f"No bucket and object defined in {env_var_value}" logging.error(log_msg) raise Exception(log_msg) splitted = without_prefix.split("/", 1) if splitted[1] == "": log_msg = f"No object defined in {env_var_value}" logging.error(log_msg) raise Exception(log_msg) return splitted[0], splitted[1] def _envelope_decrypt(data_encryption_key: str, data: str) -> str: nonce = data[:GCM_NONCE_SIZE] ciphertext = data[GCM_NONCE_SIZE:-GCM_TAG_SIZE] tag = data[-GCM_TAG_SIZE:] algo = algorithms.AES(data_encryption_key) cipher = Cipher( algo, modes.GCM(nonce, tag), backend=default_backend() ) decrypter = cipher.decryptor() return b2str(decrypter.update(ciphertext))
Apache License 2.0
chaffelson/whoville
whoville/cloudbreak/models/instance_meta_data.py
InstanceMetaData.instance_type
python
def instance_type(self, instance_type): allowed_values = ["GATEWAY", "GATEWAY_PRIMARY", "CORE"] if instance_type not in allowed_values: raise ValueError( "Invalid value for `instance_type` ({0}), must be one of {1}" .format(instance_type, allowed_values) ) self._instance_type = instance_type
Sets the instance_type of this InstanceMetaData. type of the instance :param instance_type: The instance_type of this InstanceMetaData. :type: str
https://github.com/chaffelson/whoville/blob/f71fda629c9fd50d0a482120165ea5abcc754522/whoville/cloudbreak/models/instance_meta_data.py#L291-L306
from pprint import pformat from six import iteritems import re class InstanceMetaData(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'private_ip': 'str', 'public_ip': 'str', 'ssh_port': 'int', 'instance_id': 'str', 'ambari_server': 'bool', 'discovery_fqdn': 'str', 'instance_group': 'str', 'instance_status': 'str', 'instance_type': 'str' } attribute_map = { 'private_ip': 'privateIp', 'public_ip': 'publicIp', 'ssh_port': 'sshPort', 'instance_id': 'instanceId', 'ambari_server': 'ambariServer', 'discovery_fqdn': 'discoveryFQDN', 'instance_group': 'instanceGroup', 'instance_status': 'instanceStatus', 'instance_type': 'instanceType' } def __init__(self, private_ip=None, public_ip=None, ssh_port=None, instance_id=None, ambari_server=False, discovery_fqdn=None, instance_group=None, instance_status=None, instance_type=None): self._private_ip = None self._public_ip = None self._ssh_port = None self._instance_id = None self._ambari_server = None self._discovery_fqdn = None self._instance_group = None self._instance_status = None self._instance_type = None if private_ip is not None: self.private_ip = private_ip if public_ip is not None: self.public_ip = public_ip if ssh_port is not None: self.ssh_port = ssh_port if instance_id is not None: self.instance_id = instance_id if ambari_server is not None: self.ambari_server = ambari_server if discovery_fqdn is not None: self.discovery_fqdn = discovery_fqdn if instance_group is not None: self.instance_group = instance_group if instance_status is not None: self.instance_status = instance_status if instance_type is not None: self.instance_type = instance_type @property def private_ip(self): return self._private_ip @private_ip.setter def private_ip(self, private_ip): self._private_ip = private_ip @property def public_ip(self): return self._public_ip @public_ip.setter def public_ip(self, public_ip): self._public_ip = public_ip @property def ssh_port(self): return self._ssh_port @ssh_port.setter def ssh_port(self, ssh_port): self._ssh_port = ssh_port @property def instance_id(self): return self._instance_id @instance_id.setter def instance_id(self, instance_id): self._instance_id = instance_id @property def ambari_server(self): return self._ambari_server @ambari_server.setter def ambari_server(self, ambari_server): self._ambari_server = ambari_server @property def discovery_fqdn(self): return self._discovery_fqdn @discovery_fqdn.setter def discovery_fqdn(self, discovery_fqdn): self._discovery_fqdn = discovery_fqdn @property def instance_group(self): return self._instance_group @instance_group.setter def instance_group(self, instance_group): self._instance_group = instance_group @property def instance_status(self): return self._instance_status @instance_status.setter def instance_status(self, instance_status): allowed_values = ["REQUESTED", "CREATED", "UNREGISTERED", "REGISTERED", "DECOMMISSIONED", "TERMINATED", "DELETED_ON_PROVIDER_SIDE", "FAILED", "STOPPED"] if instance_status not in allowed_values: raise ValueError( "Invalid value for `instance_status` ({0}), must be one of {1}" .format(instance_status, allowed_values) ) self._instance_status = instance_status @property def instance_type(self): return self._instance_type @instance_type.setter
Apache License 2.0
cdisselkoen/pitchfork
abstractdata.py
secretValue
python
def secretValue(value=None, bits=64): return AbstractNonPointer(bits=bits, value=value, secret=True)
A single secret value bits: how many bits long the value is value: if not None, then a specific (concrete or symbolic) value which this value takes on
https://github.com/cdisselkoen/pitchfork/blob/0a95037f6f128ce41d161c545c932def6d86e1c6/abstractdata.py#L78-L84
class AbstractValue: def __init__(self, *, bits=64, value=None, secret): self.bits = bits self.secret = secret self.value = value class AbstractNonPointer(AbstractValue): pass class AbstractPointer(AbstractValue): def __init__(self, pointee, maxPointeeSize=0x10000, cannotPointSecret=False): super().__init__(bits=64, secret=False) assert isinstance(pointee, AbstractValue) or (isinstance(pointee, list) and all(isinstance(v, AbstractValue) for v in pointee)) self.pointee = pointee self.maxPointeeSize = maxPointeeSize self.cannotPointSecret = cannotPointSecret class AbstractPointerToUnconstrainedPublic(AbstractValue): def __init__(self, maxPointeeSize=0x10000, cannotPointSecret=False): super().__init__(bits=64, secret=False) self.maxPointeeSize = maxPointeeSize self.cannotPointSecret = cannotPointSecret class AbstractSecretPointer(AbstractValue): def __init__(self): super().__init__(bits=64, secret=True) def publicValue(value=None, bits=64): return AbstractNonPointer(bits=bits, value=value, secret=False)
BSD 3-Clause New or Revised License
fsxfreak/esys-pbi
src/pupil/pupil_src/shared_modules/calibration_routines/camera_intrinsics_estimation.py
_make_grid
python
def _make_grid(dim=(11,4)): x,y = range(dim[0]),range(dim[1]) p = np.array([[[s,i] for s in x] for i in y], dtype=np.float32) p[:,1::2,1] += 0.5 p = np.reshape(p, (-1,2), 'F') x_scale = 1./(np.amax(p[:,0])-np.amin(p[:,0])) y_scale = 1./(np.amax(p[:,1])-np.amin(p[:,1])) p *=x_scale,x_scale/.5 return p
this function generates the structure for an asymmetrical circle grid domain (0-1)
https://github.com/fsxfreak/esys-pbi/blob/0ba8f21f5b7618623cd8a0889c3656739a9ce0d6/src/pupil/pupil_src/shared_modules/calibration_routines/camera_intrinsics_estimation.py#L375-L391
import os import cv2 import numpy as np from file_methods import save_object,load_object from gl_utils import adjust_gl_view,clear_gl_screen,basic_gl_setup,make_coord_system_pixel_based,make_coord_system_norm_based from methods import normalize import OpenGL.GL as gl from pyglui import ui from pyglui.cygl.utils import draw_polyline,draw_points,RGBA,draw_gl_texture from pyglui.pyfontstash import fontstash from pyglui.ui import get_opensans_font_path from glfw import * from . calibration_plugin_base import Calibration_Plugin import logging logger = logging.getLogger(__name__) pre_recorded_calibrations = { 'Pupil Cam1 ID2':{ (1280, 720):{ 'dist_coefs': np.array([[-0.6746215 , 0.46527537, 0.01448595, -0.00070578, -0.17128751]]), 'camera_name': 'Pupil Cam1 ID2', 'resolution': (1280, 720), 'camera_matrix': np.array([[ 1.08891909e+03, 0.00000000e+00, 6.67944178e+02], [ 0.00000000e+00, 1.03230180e+03, 3.52772854e+02], [ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]) } }, 'Logitech Webcam C930e':{ (1280, 720):{ 'dist_coefs': np.array([[ 0.06330768, -0.17328079, 0.00074967, 0.000353 , 0.07648477]]), 'camera_name': 'Logitech Webcam C930e', 'resolution': (1280, 720), 'camera_matrix': np.array([[ 739.72227378, 0. , 624.44490772], [ 0. , 717.84832227, 350.46000651], [ 0. , 0. , 1. ]]) } }, } def idealized_camera_calibration(resolution,f=1000.): return { 'dist_coefs': np.array([[ 0.,0.,0.,0.,0.]]), 'camera_name': 'ideal camera with focal length {}'.format(f), 'resolution': resolution, 'camera_matrix': np.array([[ f, 0., resolution[0]/2.], [ 0., f, resolution[1]/2.], [ 0., 0., 1. ]]) } def load_camera_calibration(g_pool): if g_pool.app == 'capture': try: camera_calibration = load_object(os.path.join(g_pool.user_dir,'camera_calibration')) camera_calibration['camera_name'] except KeyError: camera_calibration = None logger.warning('Invalid or Deprecated camera calibration found. Please recalibrate camera.') except: camera_calibration = None else: same_name = camera_calibration['camera_name'] == g_pool.capture.name same_resolution = camera_calibration['resolution'] == g_pool.capture.frame_size if not (same_name and same_resolution): logger.warning('Loaded camera calibration but camera name and/or resolution has changed.') camera_calibration = None else: logger.info("Loaded user calibrated calibration for {}@{}.".format(g_pool.capture.name,g_pool.capture.frame_size)) if not camera_calibration: logger.debug("Trying to load pre recorded calibration.") try: camera_calibration = pre_recorded_calibrations[g_pool.capture.name][g_pool.capture.frame_size] except KeyError: logger.info("Pre recorded calibration for {}@{} not found.".format(g_pool.capture.name,g_pool.capture.frame_size)) else: logger.info("Loaded pre recorded calibration for {}@{}.".format(g_pool.capture.name,g_pool.capture.frame_size)) if not camera_calibration: camera_calibration = idealized_camera_calibration(g_pool.capture.frame_size) logger.warning("Camera calibration not found. Will assume idealized camera. Please calibrate your cameras. Using camera 'Camera_Intrinsics_Estimation'.") else: try: camera_calibration = load_object(os.path.join(g_pool.rec_dir,'camera_calibration')) except: camera_calibration = idealized_camera_calibration(g_pool.capture.frame_size) logger.warning("Camera calibration not found. Will assume idealized camera. Please calibrate your cameras before your next recording.") else: logger.info("Loaded Camera calibration from file.") return camera_calibration def on_resize(window,w, h): active_window = glfwGetCurrentContext() glfwMakeContextCurrent(window) adjust_gl_view(w,h) glfwMakeContextCurrent(active_window) class Camera_Intrinsics_Estimation(Calibration_Plugin): def __init__(self,g_pool,fullscreen = False): super().__init__(g_pool) self.collect_new = False self.calculated = False self.obj_grid = _gen_pattern_grid((4, 11)) self.img_points = [] self.obj_points = [] self.count = 10 self.display_grid = _make_grid() self._window = None self.menu = None self.button = None self.clicks_to_close = 5 self.window_should_close = False self.fullscreen = fullscreen self.monitor_idx = 0 self.glfont = fontstash.Context() self.glfont.add_font('opensans',get_opensans_font_path()) self.glfont.set_size(32) self.glfont.set_color_float((0.2,0.5,0.9,1.0)) self.glfont.set_align_string(v_align='center') self.undist_img = None self.show_undistortion = False self.show_undistortion_switch = None self.camera_calibration = load_camera_calibration(self.g_pool) if self.camera_calibration: logger.info('Loaded camera calibration. Click show undistortion to verify.') logger.info('Hint: Straight lines in the real world should be straigt in the image.') self.camera_intrinsics = self.camera_calibration['camera_matrix'],self.camera_calibration['dist_coefs'],self.camera_calibration['resolution'] else: self.camera_intrinsics = None def init_gui(self): monitor_names = [glfwGetMonitorName(m) for m in glfwGetMonitors()] self.info = ui.Info_Text("Estimate Camera intrinsics of the world camera. Using an 11x9 asymmetrical circle grid. Click 'C' to capture a pattern.") self.g_pool.calibration_menu.append(self.info) self.menu = ui.Growing_Menu('Controls') self.menu.append(ui.Button('show Pattern',self.open_window)) self.menu.append(ui.Selector('monitor_idx',self,selection = range(len(monitor_names)),labels=monitor_names,label='Monitor')) self.menu.append(ui.Switch('fullscreen',self,label='Use Fullscreen')) self.show_undistortion_switch = ui.Switch('show_undistortion',self,label='show undistorted image') self.menu.append(self.show_undistortion_switch) if not self.camera_intrinsics: self.show_undistortion_switch.read_only=True self.g_pool.calibration_menu.append(self.menu) self.button = ui.Thumb('collect_new',self,setter=self.advance,label='C',hotkey='c') self.button.on_color[:] = (.3,.2,1.,.9) self.g_pool.quickbar.insert(0,self.button) def deinit_gui(self): if self.menu: self.g_pool.calibration_menu.remove(self.menu) self.g_pool.calibration_menu.remove(self.info) self.menu = None if self.button: self.g_pool.quickbar.remove(self.button) self.button = None def do_open(self): if not self._window: self.window_should_open = True def get_count(self): return self.count def advance(self,_): if self.count == 10: logger.info("Capture 10 calibration patterns.") self.button.status_text = "{:d} to go".format(self.count) self.calculated = False self.img_points = [] self.obj_points = [] self.collect_new = True def open_window(self): if not self._window: if self.fullscreen: monitor = glfwGetMonitors()[self.monitor_idx] mode = glfwGetVideoMode(monitor) height,width= mode[0],mode[1] else: monitor = None height,width = 640,480 self._window = glfwCreateWindow(height, width, "Calibration", monitor=monitor, share=glfwGetCurrentContext()) if not self.fullscreen: glfwSetWindowPos(self._window,200,31) glfwSetFramebufferSizeCallback(self._window,on_resize) glfwSetKeyCallback(self._window,self.on_key) glfwSetWindowCloseCallback(self._window,self.on_close) glfwSetMouseButtonCallback(self._window,self.on_button) on_resize(self._window,*glfwGetFramebufferSize(self._window)) active_window = glfwGetCurrentContext() glfwMakeContextCurrent(self._window) basic_gl_setup() glfwMakeContextCurrent(active_window) self.clicks_to_close = 5 def on_key(self,window, key, scancode, action, mods): if action == GLFW_PRESS: if key == GLFW_KEY_ESCAPE: self.on_close() def on_button(self,window,button, action, mods): if action ==GLFW_PRESS: self.clicks_to_close -=1 if self.clicks_to_close ==0: self.on_close() def on_close(self,window=None): self.window_should_close = True def close_window(self): self.window_should_close=False if self._window: glfwDestroyWindow(self._window) self._window = None def calculate(self): self.calculated = True self.count = 10 rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(np.array(self.obj_points), np.array(self.img_points),self.g_pool.capture.frame_size,None,None) logger.info("Calibrated Camera, RMS:{}".format(rms)) camera_calibration = {'camera_matrix':camera_matrix,'dist_coefs':dist_coefs,'camera_name':self.g_pool.capture.name,'resolution':self.g_pool.capture.frame_size} save_object(camera_calibration,os.path.join(self.g_pool.user_dir,"camera_calibration")) logger.info("Calibration saved to user folder") self.camera_intrinsics = camera_matrix,dist_coefs,self.g_pool.capture.frame_size self.show_undistortion_switch.read_only=False def update(self,frame,events): if self.collect_new: img = frame.img status, grid_points = cv2.findCirclesGrid(img, (4,11), flags=cv2.CALIB_CB_ASYMMETRIC_GRID) if status: self.img_points.append(grid_points) self.obj_points.append(self.obj_grid) self.collect_new = False self.count -=1 self.button.status_text = "{:d} to go".format(self.count) if self.count<=0 and not self.calculated: self.calculate() self.button.status_text = '' if self.window_should_close: self.close_window() if self.show_undistortion: adjusted_k,roi = cv2.getOptimalNewCameraMatrix(cameraMatrix= self.camera_intrinsics[0], distCoeffs=self.camera_intrinsics[1], imageSize=self.camera_intrinsics[2], alpha=0.5,newImgSize=self.camera_intrinsics[2],centerPrincipalPoint=1) self.undist_img = cv2.undistort(frame.img, self.camera_intrinsics[0], self.camera_intrinsics[1],newCameraMatrix=adjusted_k) def gl_display(self): for grid_points in self.img_points: calib_bounds = cv2.convexHull(grid_points)[:,0] draw_polyline(calib_bounds,1,RGBA(0.,0.,1.,.5),line_type=gl.GL_LINE_LOOP) if self._window: self.gl_display_in_window() if self.show_undistortion: gl.glPushMatrix() make_coord_system_norm_based() draw_gl_texture(self.undist_img) gl.glPopMatrix() def gl_display_in_window(self): active_window = glfwGetCurrentContext() glfwMakeContextCurrent(self._window) clear_gl_screen() gl.glMatrixMode(gl.GL_PROJECTION) gl.glLoadIdentity() p_window_size = glfwGetWindowSize(self._window) r = p_window_size[0]/15. gl.glOrtho(-r,p_window_size[0]+r,p_window_size[1]+r,-r ,-1,1) gl.glMatrixMode(gl.GL_MODELVIEW) gl.glLoadIdentity() grid = _make_grid()*min((p_window_size[0],p_window_size[1]*5.5/4.)) grid -= np.mean(grid) grid +=(p_window_size[0]/2-r,p_window_size[1]/2+r) draw_points(grid,size=r,color=RGBA(0.,0.,0.,1),sharpness=0.95) if self.clicks_to_close <5: self.glfont.set_size(int(p_window_size[0]/30.)) self.glfont.draw_text(p_window_size[0]/2.,p_window_size[1]/4.,'Touch {} more times to close window.'.format(self.clicks_to_close)) glfwSwapBuffers(self._window) glfwMakeContextCurrent(active_window) def get_init_dict(self): return {} def cleanup(self): if self._window: self.close_window() self.deinit_gui() def _gen_pattern_grid(size=(4,11)): pattern_grid = [] for i in range(size[1]): for j in range(size[0]): pattern_grid.append([(2*j)+i%2,i,0]) return np.asarray(pattern_grid, dtype='f4')
MIT License
halcy/mastodon.py
tests/test_streaming.py
Listener.handle_stream_
python
def handle_stream_(self, lines): class MockResponse(): def __init__(self, data): self.data = data def iter_content(self, chunk_size): for line in self.data: for byte in line: bytearr = bytearray() bytearr.append(byte) yield(bytearr) yield(b'\n') return self.handle_stream(MockResponse(map(six.b, lines)))
Test helper to avoid littering all tests with six.b().
https://github.com/halcy/mastodon.py/blob/e9d2c3d53f7b1d371e5dc5bf47e5fe335b698c85/tests/test_streaming.py#L80-L93
import six import pytest import itertools from mastodon.streaming import StreamListener, CallbackStreamListener from mastodon.Mastodon import MastodonMalformedEventError from mastodon import Mastodon import threading import time import select import vcr.stubs streaming_is_patched = False real_connections = [] close_connections = False def patch_streaming(): global streaming_is_patched global close_connections if streaming_is_patched == True: return streaming_is_patched = True real_get_response = vcr.stubs.VCRConnection.getresponse def fake_get_response(*args, **kwargs): global close_connections close_connections = False if args[0]._vcr_request.path.startswith("/api/v1/streaming/"): real_connections.append(args[0].real_connection) real_connection_real_get_response = args[0].real_connection.getresponse def fakeRealConnectionGetresponse(*args, **kwargs): response = real_connection_real_get_response(*args, **kwargs) real_body = b"" try: while close_connections == False: if len(select.select([response], [], [], 0.01)[0]) > 0: chunk = response.read(1) real_body += chunk except AttributeError: pass print(real_body) response.read = (lambda: real_body) return response args[0].real_connection.getresponse = fakeRealConnectionGetresponse return real_get_response(*args, **kwargs) vcr.stubs.VCRConnection.getresponse = fake_get_response def streaming_close(): global real_connections for connection in real_connections: connection.close() real_connections = [] close_connections = True class Listener(StreamListener): def __init__(self): self.updates = [] self.notifications = [] self.deletes = [] self.heartbeats = 0 def on_update(self, status): self.updates.append(status) def on_notification(self, notification): self.notifications.append(notification) def on_delete(self, status_id): self.deletes.append(status_id) def on_blahblah(self, data): pass def handle_heartbeat(self): self.heartbeats += 1
MIT License
ivannz/cplxmodule
cplxmodule/cplx.py
sinh
python
def sinh(input): return Cplx(torch.sinh(input.real) * torch.cos(input.imag), torch.cosh(input.real) * torch.sin(input.imag))
r"""Compute the hyperbolic sine of the complex tensor in re-im pair. sinh(z) = - j sin(j z)
https://github.com/ivannz/cplxmodule/blob/d5fc89496ca4ea1f0a589a6d36c7ea2d4a8c9ef6/cplxmodule/cplx.py#L487-L493
import warnings from copy import deepcopy import torch import torch.nn.functional as F from math import sqrt from .utils import complex_view, fix_dim class Cplx(object): __slots__ = ("__real", "__imag") def __new__(cls, real, imag=None): if isinstance(real, cls): return real if isinstance(real, complex): real, imag = torch.tensor(real.real), torch.tensor(real.imag) elif isinstance(real, float): if imag is None: imag = 0.0 elif not isinstance(imag, float): raise TypeError("""Imaginary part must be float.""") real, imag = torch.tensor(real), torch.tensor(imag) elif not isinstance(real, torch.Tensor): raise TypeError("""Real part must be torch.Tensor.""") if imag is None: imag = torch.zeros_like(real) elif not isinstance(imag, torch.Tensor): raise TypeError("""Imaginary part must be torch.Tensor.""") if real.shape != imag.shape: raise ValueError("""Real and imaginary parts have """ """mistmatching shape.""") self = super().__new__(cls) self.__real, self.__imag = real, imag return self def __copy__(self): return type(self)(self.__real, self.__imag) def __deepcopy__(self, memo): real = deepcopy(self.__real, memo) imag = deepcopy(self.__imag, memo) return type(self)(real, imag) @property def real(self): return self.__real @property def imag(self): return self.__imag def __getitem__(self, key): return type(self)(self.__real[key], self.__imag[key]) def __setitem__(self, key, value): if not isinstance(value, (Cplx, complex)): self.__real[key], self.__imag[key] = value, value else: self.__real[key], self.__imag[key] = value.real, value.imag def __iter__(self): return map(type(self), self.__real, self.__imag) def __reversed__(self): return type(self)(reversed(self.__real), reversed(self.__imag)) def clone(self): return type(self)(self.__real.clone(), self.__imag.clone()) @property def conj(self): return type(self)(self.__real, -self.__imag) def conjugate(self): return self.conj def __pos__(self): return self def __neg__(self): return type(self)(-self.__real, -self.__imag) def __add__(u, v): if not isinstance(v, (Cplx, complex)): return type(u)(u.__real + v, u.__imag) return type(u)(u.__real + v.real, u.__imag + v.imag) __radd__ = __add__ __iadd__ = __add__ def __sub__(u, v): if not isinstance(v, (Cplx, complex)): return type(u)(u.__real - v, u.__imag) return type(u)(u.__real - v.real, u.__imag - v.imag) def __rsub__(u, v): return -u + v __isub__ = __sub__ def __mul__(u, v): if not isinstance(v, (Cplx, complex)): return type(u)(u.__real * v, u.__imag * v) return type(u)(u.__real * v.real - u.__imag * v.imag, u.__imag * v.real + u.__real * v.imag) __rmul__ = __mul__ __imul__ = __mul__ def __truediv__(u, v): if not isinstance(v, (Cplx, complex)): return type(u)(u.__real / v, u.__imag / v) denom = v.real * v.real + v.imag * v.imag return u * (v.conjugate() / denom) def __rtruediv__(u, v): denom = u.__real * u.__real + u.__imag * u.__imag return (u.conjugate() / denom) * v __itruediv__ = __truediv__ def __matmul__(u, v): if not isinstance(v, Cplx): return type(u)(torch.matmul(u.__real, v), torch.matmul(u.__imag, v)) re = torch.matmul(u.__real, v.__real) - torch.matmul(u.__imag, v.__imag) im = torch.matmul(u.__imag, v.__real) + torch.matmul(u.__real, v.__imag) return type(u)(re, im) def __rmatmul__(u, v): return type(u)(torch.matmul(v, u.__real), torch.matmul(v, u.__imag)) __imatmul__ = __matmul__ def __abs__(self): input = torch.stack([self.__real, self.__imag], dim=0) return torch.norm(input, p=2, dim=0, keepdim=False) @property def angle(self): return torch.atan2(self.__imag, self.__real) def apply(self, f, *a, **k): return type(self)(f(self.__real, *a, **k), f(self.__imag, *a, **k)) @property def shape(self): return self.__real.shape def __len__(self): return self.shape[0] def t(self): return type(self)(self.__real.t(), self.__imag.t()) def h(self): return self.conj.t() def flatten(self, start_dim=0, end_dim=-1): return type(self)(self.__real.flatten(start_dim, end_dim), self.__imag.flatten(start_dim, end_dim)) def view(self, *shape): shape = shape[0] if shape and isinstance(shape[0], tuple) else shape return type(self)(self.__real.view(*shape), self.__imag.view(*shape)) def view_as(self, other): shape = other.shape return self.view(*shape) def reshape(self, *shape): shape = shape[0] if shape and isinstance(shape[0], tuple) else shape return type(self)(self.__real.reshape(*shape), self.__imag.reshape(*shape)) def size(self, *dim): return self.__real.size(*dim) def squeeze(self, dim=None): if dim is None: return type(self)(self.__real.squeeze(), self.__imag.squeeze()) else: return type(self)( self.__real.squeeze(dim=dim), self.__imag.squeeze(dim=dim) ) def unsqueeze(self, dim=None): if dim is None: return type(self)(self.__real.unsqueeze(), self.__imag.unsqueeze()) else: return type(self)( self.__real.unsqueeze(dim=dim), self.__imag.unsqueeze(dim=dim) ) def item(self): return float(self.__real) + 1j * float(self.__imag) @classmethod def from_numpy(cls, numpy): re = torch.from_numpy(numpy.real) im = torch.from_numpy(numpy.imag) return cls(re, im) def numpy(self): return self.__real.numpy() + 1j * self.__imag.numpy() def __repr__(self): return f"{self.__class__.__name__}(\n" f" real={self.__real},\n imag={self.__imag}\n)" def detach(self): return type(self)(self.__real.detach(), self.__imag.detach()) def requires_grad_(self, requires_grad=True): return type(self)(self.__real.requires_grad_(requires_grad), self.__imag.requires_grad_(requires_grad)) @property def grad(self): re, im = self.__real.grad, self.__imag.grad return None if re is None or im is None else type(self)(re, im) def cuda(self, device=None, non_blocking=False): re = self.__real.cuda(device=device, non_blocking=non_blocking) im = self.__imag.cuda(device=device, non_blocking=non_blocking) return type(self)(re, im) def cpu(self): return type(self)(self.__real.cpu(), self.__imag.cpu()) def to(self, *args, **kwargs): return type(self)(self.__real.to(*args, **kwargs), self.__imag.to(*args, **kwargs)) @property def device(self): return self.__real.device @property def dtype(self): return self.__real.dtype def dim(self): return len(self.shape) def permute(self, *dims): return type(self)(self.__real.permute(*dims), self.__imag.permute(*dims)) def transpose(self, dim0, dim1): return type(self)(self.__real.transpose(dim0, dim1), self.__imag.transpose(dim0, dim1)) def is_complex(self): return True @classmethod def empty(cls, *sizes, dtype=None, device=None, requires_grad=False): re = torch.empty(*sizes, dtype=dtype, device=device, requires_grad=requires_grad) return cls(re, torch.empty_like(re, requires_grad=requires_grad)) @classmethod def zeros(cls, *sizes, dtype=None, device=None, requires_grad=False): re = torch.zeros(*sizes, dtype=dtype, device=device, requires_grad=requires_grad) return cls(re, torch.zeros_like(re, requires_grad=requires_grad)) @classmethod def ones(cls, *sizes, dtype=None, device=None, requires_grad=False): re = torch.ones(*sizes, dtype=dtype, device=device, requires_grad=requires_grad) return cls(re, torch.zeros_like(re, requires_grad=requires_grad)) def cat(tensors, dim): tensors = [*map(Cplx, tensors)] return Cplx(torch.cat([z.real for z in tensors], dim=dim), torch.cat([z.imag for z in tensors], dim=dim)) def split(input, split_size_or_sections, dim=0): return tuple(Cplx(re, im) for re, im in zip( torch.split(input.real, split_size_or_sections, dim), torch.split(input.imag, split_size_or_sections, dim), )) def chunk(input, chunks, dim=0): return tuple(Cplx(re, im) for re, im in zip( torch.chunk(input.real, chunks, dim), torch.chunk(input.imag, chunks, dim), )) def stack(tensors, dim): tensors = [*map(Cplx, tensors)] return Cplx(torch.stack([z.real for z in tensors], dim=dim), torch.stack([z.imag for z in tensors], dim=dim)) def unbind(input, dim=0): return tuple(Cplx(re, im) for re, im in zip( torch.unbind(input.real, dim), torch.unbind(input.imag, dim), )) def take(input, index): return Cplx(torch.take(input.real, index), torch.take(input.imag, index)) def narrow(input, dim, start, length): return Cplx(torch.narrow(input.real, dim, start, length), torch.narrow(input.imag, dim, start, length)) def squeeze(input, dim=None): return Cplx(torch.squeeze(input.real, dim), torch.squeeze(input.imag, dim)) def unsqueeze(input, dim): return Cplx(torch.unsqueeze(input.real, dim), torch.unsqueeze(input.imag, dim)) def from_interleaved_real(input, copy=True, dim=-1): output = Cplx(*complex_view(input, dim, squeeze=False)) return output.clone() if copy else output from_real = from_interleaved_real def from_concatenated_real(input, copy=True, dim=-1): output = Cplx(*torch.chunk(input, 2, dim=dim)) return output.clone() if copy else output def to_interleaved_real(input, flatten=True, dim=-1): dim = 1 + fix_dim(dim, input.dim()) input = torch.stack([input.real, input.imag], dim=dim) return input.flatten(dim-1, dim) if flatten else input to_real = to_interleaved_real def to_concatenated_real(input, flatten=None, dim=-1): assert flatten is None return torch.cat([input.real, input.imag], dim=dim) def exp(input): scale = torch.exp(input.real) return Cplx(scale * torch.cos(input.imag), scale * torch.sin(input.imag)) def log(input): return Cplx(torch.log(abs(input)), input.angle) def sin(input): return Cplx(torch.sin(input.real) * torch.cosh(input.imag), torch.cos(input.real) * torch.sinh(input.imag)) def cos(input): return Cplx(torch.cos(input.real) * torch.cosh(input.imag), - torch.sin(input.real) * torch.sinh(input.imag)) def tan(input): return sin(input) / cos(input)
MIT License
rbuffat/pyidf
pyidf/room_air_models.py
RoomAirTemperaturePatternTwoGradient.upper_temperature_bound
python
def upper_temperature_bound(self, value=None): self["Upper Temperature Bound"] = value
Corresponds to IDD field `Upper Temperature Bound`
https://github.com/rbuffat/pyidf/blob/c2f744211572b5e14e29522aac1421ba88addb0e/pyidf/room_air_models.py#L839-L841
from collections import OrderedDict import logging from pyidf.helper import DataObject logger = logging.getLogger("pyidf") logger.addHandler(logging.NullHandler()) class RoomAirModelType(DataObject): _schema = {'extensible-fields': OrderedDict(), 'fields': OrderedDict([(u'name', {'name': u'Name', 'pyname': u'name', 'required-field': True, 'autosizable': False, 'autocalculatable': False, 'type': u'alpha'}), (u'zone name', {'name': u'Zone Name', 'pyname': u'zone_name', 'required-field': True, 'autosizable': False, 'autocalculatable': False, 'type': u'object-list'}), (u'room-air modeling type', {'name': u'Room-Air Modeling Type', 'pyname': u'roomair_modeling_type', 'default': u'Mixing', 'required-field': True, 'autosizable': False, 'accepted-values': [u'Mixing', u'UserDefined', u'OneNodeDisplacementVentilation', u'ThreeNodeDisplacementVentilation', u'CrossVentilation', u'UnderFloorAirDistributionInterior', u'UnderFloorAirDistributionExterior', u'AirflowNetwork'], 'autocalculatable': False, 'type': 'alpha'}), (u'air temperature coupling strategy', {'name': u'Air Temperature Coupling Strategy', 'pyname': u'air_temperature_coupling_strategy', 'default': u'Direct', 'required-field': False, 'autosizable': False, 'accepted-values': [u'Direct', u'Indirect'], 'autocalculatable': False, 'type': 'alpha'})]), 'format': None, 'group': u'Room Air Models', 'min-fields': 0, 'name': u'RoomAirModelType', 'pyname': u'RoomAirModelType', 'required-object': False, 'unique-object': False} @property def name(self): return self["Name"] @name.setter def name(self, value=None): self["Name"] = value @property def zone_name(self): return self["Zone Name"] @zone_name.setter def zone_name(self, value=None): self["Zone Name"] = value @property def roomair_modeling_type(self): return self["Room-Air Modeling Type"] @roomair_modeling_type.setter def roomair_modeling_type(self, value="Mixing"): self["Room-Air Modeling Type"] = value @property def air_temperature_coupling_strategy(self): return self["Air Temperature Coupling Strategy"] @air_temperature_coupling_strategy.setter def air_temperature_coupling_strategy(self, value="Direct"): self["Air Temperature Coupling Strategy"] = value class RoomAirTemperaturePatternUserDefined(DataObject): _schema = {'extensible-fields': OrderedDict(), 'fields': OrderedDict([(u'name', {'name': u'Name', 'pyname': u'name', 'required-field': True, 'autosizable': False, 'autocalculatable': False, 'type': u'alpha'}), (u'zone name', {'name': u'Zone Name', 'pyname': u'zone_name', 'required-field': True, 'autosizable': False, 'autocalculatable': False, 'type': u'object-list'}), (u'availability schedule name', {'name': u'Availability Schedule Name', 'pyname': u'availability_schedule_name', 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': u'object-list'}), (u'pattern control schedule name', {'name': u'Pattern Control Schedule Name', 'pyname': u'pattern_control_schedule_name', 'required-field': True, 'autosizable': False, 'autocalculatable': False, 'type': u'object-list'})]), 'format': None, 'group': u'Room Air Models', 'min-fields': 0, 'name': u'RoomAir:TemperaturePattern:UserDefined', 'pyname': u'RoomAirTemperaturePatternUserDefined', 'required-object': False, 'unique-object': False} @property def name(self): return self["Name"] @name.setter def name(self, value=None): self["Name"] = value @property def zone_name(self): return self["Zone Name"] @zone_name.setter def zone_name(self, value=None): self["Zone Name"] = value @property def availability_schedule_name(self): return self["Availability Schedule Name"] @availability_schedule_name.setter def availability_schedule_name(self, value=None): self["Availability Schedule Name"] = value @property def pattern_control_schedule_name(self): return self["Pattern Control Schedule Name"] @pattern_control_schedule_name.setter def pattern_control_schedule_name(self, value=None): self["Pattern Control Schedule Name"] = value class RoomAirTemperaturePatternConstantGradient(DataObject): _schema = {'extensible-fields': OrderedDict(), 'fields': OrderedDict([(u'name', {'name': u'Name', 'pyname': u'name', 'required-field': True, 'autosizable': False, 'autocalculatable': False, 'type': u'alpha'}), (u'control integer for pattern control schedule name', {'name': u'Control Integer for Pattern Control Schedule Name', 'pyname': u'control_integer_for_pattern_control_schedule_name', 'required-field': True, 'autosizable': False, 'autocalculatable': False, 'type': u'integer'}), (u'thermostat offset', {'name': u'Thermostat Offset', 'pyname': u'thermostat_offset', 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': u'real', 'unit': u'deltaC'}), (u'return air offset', {'name': u'Return Air Offset', 'pyname': u'return_air_offset', 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': u'real', 'unit': u'deltaC'}), (u'exhaust air offset', {'name': u'Exhaust Air Offset', 'pyname': u'exhaust_air_offset', 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': u'real', 'unit': u'deltaC'}), (u'temperature gradient', {'name': u'Temperature Gradient', 'pyname': u'temperature_gradient', 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': u'real', 'unit': u'K/m'})]), 'format': None, 'group': u'Room Air Models', 'min-fields': 0, 'name': u'RoomAir:TemperaturePattern:ConstantGradient', 'pyname': u'RoomAirTemperaturePatternConstantGradient', 'required-object': False, 'unique-object': False} @property def name(self): return self["Name"] @name.setter def name(self, value=None): self["Name"] = value @property def control_integer_for_pattern_control_schedule_name(self): return self["Control Integer for Pattern Control Schedule Name"] @control_integer_for_pattern_control_schedule_name.setter def control_integer_for_pattern_control_schedule_name(self, value=None): self["Control Integer for Pattern Control Schedule Name"] = value @property def thermostat_offset(self): return self["Thermostat Offset"] @thermostat_offset.setter def thermostat_offset(self, value=None): self["Thermostat Offset"] = value @property def return_air_offset(self): return self["Return Air Offset"] @return_air_offset.setter def return_air_offset(self, value=None): self["Return Air Offset"] = value @property def exhaust_air_offset(self): return self["Exhaust Air Offset"] @exhaust_air_offset.setter def exhaust_air_offset(self, value=None): self["Exhaust Air Offset"] = value @property def temperature_gradient(self): return self["Temperature Gradient"] @temperature_gradient.setter def temperature_gradient(self, value=None): self["Temperature Gradient"] = value class RoomAirTemperaturePatternTwoGradient(DataObject): _schema = {'extensible-fields': OrderedDict(), 'fields': OrderedDict([(u'name', {'name': u'Name', 'pyname': u'name', 'required-field': True, 'autosizable': False, 'autocalculatable': False, 'type': u'alpha'}), (u'control integer for pattern control schedule name', {'name': u'Control Integer for Pattern Control Schedule Name', 'pyname': u'control_integer_for_pattern_control_schedule_name', 'required-field': True, 'autosizable': False, 'autocalculatable': False, 'type': u'integer'}), (u'thermostat height', {'name': u'Thermostat Height', 'pyname': u'thermostat_height', 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': u'real', 'unit': u'm'}), (u'return air height', {'name': u'Return Air Height', 'pyname': u'return_air_height', 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': u'real', 'unit': u'm'}), (u'exhaust air height', {'name': u'Exhaust Air Height', 'pyname': u'exhaust_air_height', 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': u'real', 'unit': u'm'}), (u'temperature gradient lower bound', {'name': u'Temperature Gradient Lower Bound', 'pyname': u'temperature_gradient_lower_bound', 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': u'real', 'unit': u'K/m'}), (u'temperature gradient upper bound', {'name': u'Temperature Gradient Upper Bound', 'pyname': u'temperature_gradient_upper_bound', 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': u'real', 'unit': u'K/m'}), (u'gradient interpolation mode', {'name': u'Gradient Interpolation Mode', 'pyname': u'gradient_interpolation_mode', 'required-field': False, 'autosizable': False, 'accepted-values': [u'OutdoorDryBulbTemperature', u'ZoneDryBulbTemperature', u'ZoneAndOutdoorTemperatureDifference', u'SensibleCoolingLoad', u'SensibleHeatingLoad'], 'autocalculatable': False, 'type': 'alpha'}), (u'upper temperature bound', {'name': u'Upper Temperature Bound', 'pyname': u'upper_temperature_bound', 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': u'real', 'unit': u'C'}), (u'lower temperature bound', {'name': u'Lower Temperature Bound', 'pyname': u'lower_temperature_bound', 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': u'real', 'unit': u'C'}), (u'upper heat rate bound', {'name': u'Upper Heat Rate Bound', 'pyname': u'upper_heat_rate_bound', 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': u'real', 'unit': u'W'}), (u'lower heat rate bound', {'name': u'Lower Heat Rate Bound', 'pyname': u'lower_heat_rate_bound', 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': u'real', 'unit': u'W'})]), 'format': None, 'group': u'Room Air Models', 'min-fields': 0, 'name': u'RoomAir:TemperaturePattern:TwoGradient', 'pyname': u'RoomAirTemperaturePatternTwoGradient', 'required-object': False, 'unique-object': False} @property def name(self): return self["Name"] @name.setter def name(self, value=None): self["Name"] = value @property def control_integer_for_pattern_control_schedule_name(self): return self["Control Integer for Pattern Control Schedule Name"] @control_integer_for_pattern_control_schedule_name.setter def control_integer_for_pattern_control_schedule_name(self, value=None): self["Control Integer for Pattern Control Schedule Name"] = value @property def thermostat_height(self): return self["Thermostat Height"] @thermostat_height.setter def thermostat_height(self, value=None): self["Thermostat Height"] = value @property def return_air_height(self): return self["Return Air Height"] @return_air_height.setter def return_air_height(self, value=None): self["Return Air Height"] = value @property def exhaust_air_height(self): return self["Exhaust Air Height"] @exhaust_air_height.setter def exhaust_air_height(self, value=None): self["Exhaust Air Height"] = value @property def temperature_gradient_lower_bound(self): return self["Temperature Gradient Lower Bound"] @temperature_gradient_lower_bound.setter def temperature_gradient_lower_bound(self, value=None): self["Temperature Gradient Lower Bound"] = value @property def temperature_gradient_upper_bound(self): return self["Temperature Gradient Upper Bound"] @temperature_gradient_upper_bound.setter def temperature_gradient_upper_bound(self, value=None): self["Temperature Gradient Upper Bound"] = value @property def gradient_interpolation_mode(self): return self["Gradient Interpolation Mode"] @gradient_interpolation_mode.setter def gradient_interpolation_mode(self, value=None): self["Gradient Interpolation Mode"] = value @property def upper_temperature_bound(self): return self["Upper Temperature Bound"] @upper_temperature_bound.setter
Apache License 2.0
accelergy-project/accelergy
accelergy/action.py
Action.expand_action_to_list_with_arg_values
python
def expand_action_to_list_with_arg_values(action_info): action_name = action_info['name'] total_entries = 1 argument_range_record = {} for argument_name, argument_range in action_info['arguments'].items(): start_idx, end_idx = Action.parse_arg_range(argument_range) total_entries *= (end_idx - start_idx + 1) argument_range_record[argument_name] = (start_idx, end_idx) expanded_list = [{'name': action_name, 'arguments':{}} for i in range(total_entries)] for entry_idx in range(total_entries): offset = 1 for argument_name, range_record in argument_range_record.items(): arg_range = range_record[1] - range_record[0] + 1 expanded_list[entry_idx]['arguments'][argument_name] = (entry_idx // offset) % arg_range + range_record[0] offset *= arg_range return expanded_list
flatten actions with arguments into list 1) input action is fully defined with numerical ranges 2) output list contains a list of actions each with a possible set of argument values
https://github.com/accelergy-project/accelergy/blob/bb39de0b79f11d49347ad22aec2c59f8c783338a/accelergy/action.py#L138-L160
from copy import deepcopy from accelergy.utils import * from accelergy.parsing_utils import * class Action(object): def __init__(self, action_def_dict): self.action_def_dict = action_def_dict self.name = action_def_dict['name'] self._arguments = None self.set_arguments(action_def_dict) self._subcomponents = None self.set_subcomponents(action_def_dict) if 'repeat' in action_def_dict: self._action_share = action_def_dict['repeat'] elif 'action_share' in action_def_dict: self._action_share = action_def_dict['action_share'] else: self._action_share = None self._primitive_list = None def set_arguments(self, action_def_dict): if 'arguments' in action_def_dict: self._arguments = {} for arg_name, arg_range in action_def_dict['arguments'].items(): self._arguments[arg_name] = arg_range def set_subcomponents(self, action_def_dict): if 'subcomponents' in action_def_dict: self._subcomponents = {} for subcomp in action_def_dict['subcomponents']: subcompActions = [] for subcompAction in subcomp['actions']: subcompActions.append(Action(subcompAction)) self._subcomponents[subcomp['name']] = subcompActions def set_primitive_list(self, primitive_list): self._primitive_list = primitive_list def set_action_share(self, new_action_share): self._action_share = new_action_share def set_argument(self, new_arg_dict): self._arguments.update(new_arg_dict) def set_subcomps(self, defined_subcomps): self._subcomponents = defined_subcomps def get_name(self): return self.name def get_action_share(self): return self._action_share def get_arguments(self): return self._arguments def get_argument(self, arg_name): return self._arguments[arg_name] def get_subcomps(self): ASSERT_MSG(self._subcomponents is not None, 'action does not have defined subcomponents') return self._subcomponents def get_primitive_list(self): return self._primitive_list def get_action_info_as_dict(self): action_dict = {'name': self.name} if self._subcomponents is not None: action_dict['subcomponents'] = self._subcomponents if self._arguments is not None: action_dict['arguments'] = self._arguments return action_dict def get_subactions(self, subcompName): ASSERT_MSG(self._subcomponents is not None and subcompName in self._subcomponents, 'cannot find subactions associated with %s for action %s'%(subcompName, self.name)) return self._subcomponents[subcompName] def get_arg_val(self, argName): ASSERT_MSG(argName in self._arguments, 'argument name %s is not associated with action %s'%(argName, self.name)) return self._arguments[argName] def set_arg(self, arg_dict): self._arguments.update(arg_dict) def flatten_action_args_into_list(self, mappingDict): args = self.get_arguments() if args is None: return [self] total_entries = 1 argument_range_record = {} for arg_name, arg_range in args.items(): ASSERT_MSG(type(arg_range) is str, '%s: argument value for action %s is not string, cannot parse range'%(arg_name,self.name)) ASSERT_MSG('..' in arg_range, '%s: argument value for action %s is not range, cannot parse range'%(arg_name,self.name)) new_arg_range = Action.map_arg_range_bounds(arg_range, mappingDict)[0] startIdx, endIdx = Action.parse_arg_range(new_arg_range) total_entries *= (endIdx - startIdx + 1) argument_range_record[arg_name] = (startIdx, endIdx) action_list = [] for entry_idx in range(total_entries): offset = 1 arg_def = {} for arg_name, range_record in argument_range_record.items(): arg_range = range_record[1] - range_record[0] + 1 arg_def[arg_name] = (entry_idx // offset) % arg_range + range_record[0] offset *= arg_range subcomp_list = [] new_action = deepcopy(self); new_action._arguments = arg_def action_list.append(new_action) return action_list @staticmethod def parse_arg_range(arg_range): if type(arg_range) is not str or '..' not in arg_range: ERROR_CLEAN_EXIT('cannot parse the argument range specification: ', arg_range) split_sub_string = arg_range.split('..') start_idx = int(split_sub_string[0]) end_idx = int(split_sub_string[1]) return start_idx, end_idx @staticmethod
MIT License
nipreps/mriqc
mriqc/viz/svg.py
svg2str
python
def svg2str(display_object, dpi=300): from io import StringIO image_buf = StringIO() display_object.frame_axes.figure.savefig( image_buf, dpi=dpi, format="svg", facecolor="k", edgecolor="k" ) image_buf.seek(0) return image_buf.getvalue()
Serializes a nilearn display object as a string
https://github.com/nipreps/mriqc/blob/468d04c0701432e4e3831ca5c716071dbbb0ea8d/mriqc/viz/svg.py#L26-L37
Apache License 2.0
dw/mitogen
ansible_mitogen/planner.py
ScriptPlanner._rewrite_interpreter
python
def _rewrite_interpreter(self, path): key = u'ansible_%s_interpreter' % os.path.basename(path).strip() try: template = self._inv.task_vars[key] except KeyError: return path return mitogen.utils.cast(self._inv.templar.template(template))
Given the original interpreter binary extracted from the script's interpreter line, look up the associated `ansible_*_interpreter` variable, render it and return it. :param str path: Absolute UNIX path to original interpreter. :returns: Shell fragment prefix used to execute the script via "/bin/sh -c". While `ansible_*_interpreter` documentation suggests shell isn't involved here, the vanilla implementation uses it and that use is exploited in common playbooks.
https://github.com/dw/mitogen/blob/cc8f9a016965876bcd9ec390d53035d6ed842b07/ansible_mitogen/planner.py#L216-L237
from __future__ import absolute_import from __future__ import unicode_literals import json import logging import os import random import re from ansible.executor import module_common from ansible.collections.list import list_collection_dirs import ansible.errors import ansible.module_utils import ansible.release import mitogen.core import mitogen.select import ansible_mitogen.loaders import ansible_mitogen.parsing import ansible_mitogen.target LOG = logging.getLogger(__name__) NO_METHOD_MSG = 'Mitogen: no invocation method found for: ' NO_INTERPRETER_MSG = 'module (%s) is missing interpreter line' NO_MODULE_MSG = 'The module %s was not found in configured module paths' _planner_by_path = {} class Invocation(object): def __init__(self, action, connection, module_name, module_args, task_vars, templar, env, wrap_async, timeout_secs): self.action = action self.connection = connection self.module_name = module_name self.module_args = module_args self.task_vars = task_vars self.templar = templar self.env = env self.wrap_async = wrap_async self.timeout_secs = timeout_secs self.module_path = None self._module_source = None self._overridden_sources = {} self._extra_sys_paths = set() def get_module_source(self): if self._module_source is None: self._module_source = read_file(self.module_path) return self._module_source def __repr__(self): return 'Invocation(module_name=%s)' % (self.module_name,) class Planner(object): def __init__(self, invocation): self._inv = invocation @classmethod def detect(cls, path, source): raise NotImplementedError() def should_fork(self): return self._inv.wrap_async def get_push_files(self): return [] def get_module_deps(self): return [] def get_kwargs(self, **kwargs): binding = self._inv.connection.get_binding() new = dict((mitogen.core.UnicodeType(k), kwargs[k]) for k in kwargs) new.setdefault('good_temp_dir', self._inv.connection.get_good_temp_dir()) new.setdefault('cwd', self._inv.connection.get_default_cwd()) new.setdefault('extra_env', self._inv.connection.get_default_env()) new.setdefault('emulate_tty', True) new.setdefault('service_context', binding.get_child_service_context()) return new def __repr__(self): return '%s()' % (type(self).__name__,) class BinaryPlanner(Planner): runner_name = 'BinaryRunner' @classmethod def detect(cls, path, source): return module_common._is_binary(source) def get_push_files(self): return [mitogen.core.to_text(self._inv.module_path)] def get_kwargs(self, **kwargs): return super(BinaryPlanner, self).get_kwargs( runner_name=self.runner_name, module=self._inv.module_name, path=self._inv.module_path, json_args=json.dumps(self._inv.module_args), env=self._inv.env, **kwargs ) class ScriptPlanner(BinaryPlanner):
BSD 3-Clause New or Revised License
google/uncertainty-baselines
uncertainty_baselines/datasets/cifar100_corrupted.py
_make_builder_configs
python
def _make_builder_configs(): config_list = [] for corruption in _CORRUPTIONS: for severity in range(1, 6): config_list.append( Cifar100CorruptedConfig( name=corruption + '_' + str(severity), description='Corruption method: ' + corruption + ', severity level: ' + str(severity), corruption_type=corruption, severity=severity, )) return config_list
Construct a list of BuilderConfigs. Construct a list of 85 Cifar100CorruptedConfig objects, corresponding to the 17 corruption types and 5 severities. Returns: A list of 85 Cifar100CorruptedConfig objects.
https://github.com/google/uncertainty-baselines/blob/d37c17c4b08a88d6546bbf299b59127a03398404/uncertainty_baselines/datasets/cifar100_corrupted.py#L82-L102
import os from typing import Optional from robustness_metrics.common import types import tensorflow.compat.v2 as tf import tensorflow_datasets as tfds from uncertainty_baselines.datasets import base _DESCRIPTION = """\ Cifar100Corrupted is a dataset generated by adding 17 corruptions to the test images in the Cifar100 dataset. """ _CITATION = """\ @inproceedings{ hendrycks2018benchmarking, title={Benchmarking Neural Network Robustness to Common Corruptions and Perturbations}, author={Dan Hendrycks and Thomas Dietterich}, booktitle={International Conference on Learning Representations}, year={2019}, url={https://openreview.net/forum?id=HJz6tiCqYm}, } """ _CIFAR_IMAGE_SIZE = (32, 32, 3) _CIFAR_CLASSES = 100 _CORRUPTIONS = [ 'brightness', 'contrast', 'defocus_blur', 'elastic_transform', 'fog', 'frost', 'glass_blur', 'gaussian_blur', 'gaussian_noise', 'impulse_noise', 'jpeg_compression', 'pixelate', 'saturate', 'shot_noise', 'spatter', 'speckle_noise', 'zoom_blur', ] _NUM_EXAMPLES = 50000 class Cifar100CorruptedConfig(tfds.core.BuilderConfig): def __init__(self, *, corruption_type, severity, **kwargs): super().__init__(**kwargs) self.corruption = corruption_type self.severity = severity
Apache License 2.0
aliev/aioauth
tests/utils.py
get_keys
python
def get_keys(query: Union[Query, Post]) -> Dict[str, Any]: return {key: value for key, value in query._asdict().items() if bool(value)}
Converts dataclass object to dict and returns dict without empty values
https://github.com/aliev/aioauth/blob/e8638c3d2660496fdf6b5bf3465747c80e25c966/tests/utils.py#L249-L251
from http import HTTPStatus from typing import Any, Callable, Dict, Union from aioauth.collections import HTTPHeaderDict from aioauth.constances import default_headers from aioauth.requests import Post, Query, Request from aioauth.responses import ErrorResponse, Response from aioauth.types import ErrorType, RequestMethod EMPTY_KEYS = { RequestMethod.GET: { "client_id": Response( content=ErrorResponse( error=ErrorType.INVALID_REQUEST, description="Missing client_id parameter.", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), "response_type": Response( content=ErrorResponse( error=ErrorType.INVALID_REQUEST, description="Missing response_type parameter.", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), "redirect_uri": Response( content=ErrorResponse( error=ErrorType.INVALID_REQUEST, description="Mismatching redirect URI.", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), "code_challenge": Response( content=ErrorResponse( error=ErrorType.INVALID_REQUEST, description="Code challenge required.", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), "nonce": Response( content=ErrorResponse( error=ErrorType.INVALID_REQUEST, description="Nonce required for response_type id_token.", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), }, RequestMethod.POST: { "grant_type": Response( content=ErrorResponse( error=ErrorType.INVALID_REQUEST, description="Request is missing grant type.", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), "redirect_uri": Response( content=ErrorResponse( error=ErrorType.INVALID_REQUEST, description="Mismatching redirect URI.", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), "code": Response( content=ErrorResponse( error=ErrorType.INVALID_REQUEST, description="Missing code parameter.", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), "refresh_token": Response( content=ErrorResponse( error=ErrorType.INVALID_REQUEST, description="Missing refresh token parameter.", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), "code_verifier": Response( content=ErrorResponse( error=ErrorType.INVALID_REQUEST, description="Code verifier required.", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), "client_id": Response( content=ErrorResponse( error=ErrorType.INVALID_CLIENT, description="", )._asdict(), status_code=HTTPStatus.UNAUTHORIZED, headers=HTTPHeaderDict({"www-authenticate": "Basic"}), ), "client_secret": Response( content=ErrorResponse( error=ErrorType.INVALID_CLIENT, description="", )._asdict(), status_code=HTTPStatus.UNAUTHORIZED, headers=HTTPHeaderDict({"www-authenticate": "Basic"}), ), "username": Response( content=ErrorResponse( error=ErrorType.INVALID_GRANT, description="Invalid credentials given.", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), "password": Response( content=ErrorResponse( error=ErrorType.INVALID_GRANT, description="Invalid credentials given.", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), }, } INVALID_KEYS = { RequestMethod.GET: { "client_id": Response( content=ErrorResponse( error=ErrorType.INVALID_REQUEST, description="Invalid client_id parameter value.", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), "response_type": Response( content=ErrorResponse( error=ErrorType.UNSUPPORTED_RESPONSE_TYPE, description="", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), "redirect_uri": Response( content=ErrorResponse( error=ErrorType.INVALID_REQUEST, description="Invalid redirect URI.", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), "code_challenge_method": Response( content=ErrorResponse( error=ErrorType.INVALID_REQUEST, description="Transform algorithm not supported.", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), "scope": Response( content=ErrorResponse( error=ErrorType.INVALID_SCOPE, description="", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), }, RequestMethod.POST: { "grant_type": Response( content=ErrorResponse( error=ErrorType.UNSUPPORTED_GRANT_TYPE, description="", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), "redirect_uri": Response( content=ErrorResponse( error=ErrorType.INVALID_REQUEST, description="Invalid redirect URI.", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), "code": Response( content=ErrorResponse( error=ErrorType.INVALID_GRANT, description="", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), "code_verifier": Response( content=ErrorResponse( error=ErrorType.MISMATCHING_STATE, description="CSRF Warning! State not equal in request and response.", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), "refresh_token": Response( content=ErrorResponse( error=ErrorType.INVALID_GRANT, description="", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), "client_id": Response( content=ErrorResponse( error=ErrorType.INVALID_REQUEST, description="Invalid client_id parameter value.", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), "client_secret": Response( content=ErrorResponse( error=ErrorType.INVALID_REQUEST, description="Invalid client_id parameter value.", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), "username": Response( content=ErrorResponse( error=ErrorType.INVALID_GRANT, description="Invalid credentials given.", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), "password": Response( content=ErrorResponse( error=ErrorType.INVALID_GRANT, description="Invalid credentials given.", )._asdict(), status_code=HTTPStatus.BAD_REQUEST, headers=default_headers, ), }, }
MIT License
araith/pydea
pyDEA/core/models/multiplier_model_decorators.py
MultiplierModelWithVirtualWeightRestrictions._get_constraint_prefix_name
python
def _get_constraint_prefix_name(self): return 'Virtual'
See base class.
https://github.com/araith/pydea/blob/eac02bac901b9109efb5d6a3841809f70e378912/pyDEA/core/models/multiplier_model_decorators.py#L565-L568
import pulp from pyDEA.core.models.multiplier_model_base import MultiplierModelBase from pyDEA.core.data_processing.solution import SolutionWithVRS class MultiplierModelVRSDecorator(MultiplierModelBase): def __init__(self, model_to_decorate): self._model_to_decorate = model_to_decorate self._vrs_variable = None self.multiplier = 1 if (self._model_to_decorate._concrete_model.get_orientation() == 'output'): self.multiplier = -1 def __getattr__(self, name): return getattr(self._model_to_decorate, name) def _create_lp(self): self._model_to_decorate._create_lp() self.lp_model = self._model_to_decorate.lp_model self._vrs_variable = pulp.LpVariable('VRS_variable', None, None, pulp.LpContinuous) self._model_to_decorate.lp_model.objective += self._vrs_variable for dmu_constraint in self._model_to_decorate._dmu_constraint_names.keys(): self._model_to_decorate.lp_model.constraints[dmu_constraint] += ( self.multiplier * self._vrs_variable) def _update_lp(self, dmu_code): self._model_to_decorate._update_lp(dmu_code) def _create_solution(self): basic_solution = super()._create_solution() return SolutionWithVRS(basic_solution) def _fill_solution(self, dmu_code, model_solution): self._model_to_decorate._fill_solution(dmu_code, model_solution) model_solution.add_VRS_dual(dmu_code, self._vrs_variable.varValue) def _get_efficiency_score(self, lambda_variable): return self._model_to_decorate._get_efficiency_score(lambda_variable) class MultiplierModelWithDisposableCategories(MultiplierModelBase): def __init__(self, model_to_decorate, weakly_disposable_categories): self._model_to_decorate = model_to_decorate assert(weakly_disposable_categories) self.weakly_disposable_categories = weakly_disposable_categories def __getattr__(self, name): return getattr(self._model_to_decorate, name) def _create_lp(self): self._model_to_decorate._create_lp() self.lp_model = self._model_to_decorate.lp_model self._change_lower_bound(self._model_to_decorate._input_variables) self._change_lower_bound(self._model_to_decorate._output_variables) def _update_lp(self, dmu_code): self._model_to_decorate._update_lp(dmu_code) def _change_lower_bound(self, variables): for category, var in variables.items(): if category in self.weakly_disposable_categories: var.lowBound = None def _create_solution(self): return self._model_to_decorate._create_solution() def _fill_solution(self, dmu_code, model_solution): self._model_to_decorate._fill_solution(dmu_code, model_solution) def _get_efficiency_score(self, lambda_variable): return self._model_to_decorate._get_efficiency_score(lambda_variable) class MultiplierModelWithNonDiscVarsForDecoration(MultiplierModelBase): def __init__(self, model_to_decorate, categories): assert(categories) self._model_to_decorate = model_to_decorate self.categories = categories def __getattr__(self, name): return getattr(self._model_to_decorate, name) def _create_lp(self): self._model_to_decorate._create_lp() self.lp_model = self._model_to_decorate.lp_model for elem in self.input_data.DMU_codes: dmu_code = elem break variables = self._get_variables() sum_vars = pulp.lpSum( [var * self._model_to_decorate.input_data.coefficients[ dmu_code, category] for category, var in variables.items() if category in self.categories]) self.lp_model.objective += -sum_vars for category, var in variables.items(): if category in self.categories: var.lowBound = 0 self.lp_model.constraints['equality_constraint'] = self._get_equality_constraint( dmu_code) def _update_lp(self, dmu_code): self._model_to_decorate._concrete_model.update_objective( self.input_data, dmu_code, self._input_variables, self._output_variables, self.lp_model) in_vars = self._get_input_variables() out_vars = self._get_output_variables() self._model_to_decorate._concrete_model.update_equality_constraint( self.input_data, dmu_code, in_vars, out_vars, self.lp_model) variables = self._get_variables() for category, var in variables.items(): if category in self.categories: self.lp_model.objective[var] = ( -self._model_to_decorate.input_data.coefficients[ dmu_code, category]) def _get_variables(self): raise NotImplementedError() def _get_input_variables(self): return self._model_to_decorate._input_variables def _get_output_variables(self): return self._model_to_decorate._output_variables def _get_equality_constraint(self, dmu_code): coeffs = self._model_to_decorate.input_data.coefficients variables = self._get_variables() sum_vars = pulp.lpSum([coeffs[dmu_code, category] * value for category, value in variables.items() if category not in self.categories]) assert(sum_vars) return sum_vars == 1 def _create_solution(self): return self._model_to_decorate._create_solution() def _fill_solution(self, dmu_code, model_solution): self._model_to_decorate._fill_solution(dmu_code, model_solution) def _get_efficiency_score(self, lambda_variable): return self._model_to_decorate._get_efficiency_score(lambda_variable) class MultiplierModelInputOrientedWithNonDiscVars( MultiplierModelWithNonDiscVarsForDecoration): def __init__(self, model_to_decorate, categories): super(MultiplierModelInputOrientedWithNonDiscVars, self).__init__( model_to_decorate, categories) if (len(categories) >= len(self._model_to_decorate.input_data.input_categories)): raise ValueError('Too many non-discretionary categories') def _get_variables(self): return self._model_to_decorate._input_variables def _get_input_variables(self): out_dict = {key: var for key, var in self._model_to_decorate._input_variables.items() if key not in self.categories} return out_dict class MultiplierModelOutputOrientedWithNonDiscVars( MultiplierModelWithNonDiscVarsForDecoration): def __init__(self, model_to_decorate, categories): super(MultiplierModelOutputOrientedWithNonDiscVars, self).__init__( model_to_decorate, categories) if (len(categories) >= len(self._model_to_decorate.input_data.output_categories)): raise ValueError('Too many non-discretionary categories.' ' At least one output must be discretionary') def _get_variables(self): return self._model_to_decorate._output_variables def _get_output_variables(self): return {key: var for key, var in self._model_to_decorate._output_variables.items() if key not in self.categories} class MultiplierModelWithWeigthRestrictionsBase(MultiplierModelBase): def __init__(self, model_to_decorate, bounds): self._model_to_decorate = model_to_decorate self.bounds = bounds def __getattr__(self, name): return getattr(self._model_to_decorate, name) def _create_lp(self): self._model_to_decorate._create_lp() self.lp_model = self._model_to_decorate.lp_model for elem in self.input_data.DMU_codes: dmu_code = elem break for category, (lower_bound, upper_bound) in self.bounds.items(): multiplier = self._get_multiplier(dmu_code, category) variable = self._model_to_decorate._input_variables.get(category, None) if variable is None: variable = self._model_to_decorate._output_variables[category] if lower_bound: constraint_name = ('{0}_lower_bound_constraint_on_' 'category_{1}'.format( self._get_constraint_prefix_name(), category)) self.lp_model += (multiplier * variable >= lower_bound, constraint_name) self._store_vars_lb(category, constraint_name, variable) if upper_bound: constraint_name = ('{0}_upper_bound_constraint_on_' 'category_{1}'.format( self._get_constraint_prefix_name(), category)) self.lp_model += (multiplier * variable <= upper_bound, constraint_name) self._store_vars_ub(category, constraint_name, variable) def _update_lp(self, dmu_code): self._model_to_decorate._update_lp(dmu_code) def _store_vars_lb(self, category, constraint_name, variable): pass def _store_vars_ub(self, category, constraint_name, variable): pass def _create_solution(self): return self._model_to_decorate._create_solution() def _fill_solution(self, dmu_code, model_solution): self._model_to_decorate._fill_solution(dmu_code, model_solution) def _get_efficiency_score(self, lambda_variable): return self._model_to_decorate._get_efficiency_score(lambda_variable) def _get_multiplier(self, input_category): raise NotImplementedError() def _get_constraint_prefix_name(self): raise NotImplementedError() class MultiplierModelWithAbsoluteWeightRestrictions( MultiplierModelWithWeigthRestrictionsBase): def _get_multiplier(self, dmu_code, category): return 1 def _get_constraint_prefix_name(self): return 'Absolute' class MultiplierModelWithVirtualWeightRestrictions( MultiplierModelWithWeigthRestrictionsBase): def __init__(self, model_to_decorate, bounds): super().__init__(model_to_decorate, bounds) self.lb_weight_rest_variables = dict() self.ub_weight_rest_variables = dict() def _create_lp(self): self.lb_weight_rest_variables.clear() self.ub_weight_rest_variables.clear() super()._create_lp() def _get_multiplier(self, dmu_code, category): return self._model_to_decorate.input_data.coefficients[ dmu_code, category]
MIT License
pauliacomi/pygaps
src/pygaps/utilities/isotherm_interpolator.py
IsothermInterpolator.__call__
python
def __call__(self, data): return self.interp_fun(data)
Override direct call.
https://github.com/pauliacomi/pygaps/blob/c4d45b710e171c937471686437e382e05aec4ed5/src/pygaps/utilities/isotherm_interpolator.py#L67-L69
from scipy.interpolate import interp1d class IsothermInterpolator(): def __init__( self, known_data, interp_data, interp_branch='ads', interp_kind='linear', interp_fill=None, ): self.interp_branch = interp_branch self.interp_kind = interp_kind self.interp_fill = interp_fill if known_data is None: return if interp_fill is None: self.interp_fun = interp1d( known_data, interp_data, kind=interp_kind ) else: self.interp_fun = interp1d( known_data, interp_data, kind=interp_kind, fill_value=interp_fill, bounds_error=False )
MIT License
polyjit/benchbuild
benchbuild/source/git.py
Git.default
python
def default(self) -> base.Variant: return self.versions()[0]
Return current HEAD as default version for this Git project.
https://github.com/polyjit/benchbuild/blob/04655f86ff0b28cd0770048e1213aeca3d0ee557/benchbuild/source/git.py#L42-L46
import os import typing as tp import plumbum as pb from plumbum.commands.base import BoundCommand from benchbuild.utils.cmd import git, mkdir from . import base VarRemotes = tp.Union[str, tp.Dict[str, str]] Remotes = tp.Dict[str, str] class Git(base.FetchableSource): def __init__( self, remote: str, local: str, clone: bool = True, limit: tp.Optional[int] = 10, refspec: str = 'HEAD', shallow: bool = True, version_filter: tp.Callable[[str], bool] = lambda version: True ): super().__init__(local, remote) self.clone = clone self.limit = limit self.refspec = refspec self.shallow = shallow self.version_filter = version_filter @property
MIT License
slice/dogbot
dog/ext/quoting/cog.py
Quoting.rename
python
async def rename( self, ctx, existing: QuoteName(must_exist=True), new: QuoteName(must_not_exist=True), ): quotes = self.quotes(ctx.guild) quotes[new] = quotes[existing] del quotes[existing] await self.storage.put(str(ctx.guild.id), quotes) await ctx.send(f'Quote "{existing}" was renamed to "{new}".')
Renames a quote.
https://github.com/slice/dogbot/blob/7605093e2bd5b948884ff2065bf6ef2bd92620e0/dog/ext/quoting/cog.py#L205-L218
import datetime import time from random import choice import discord import lifesaver from discord.ext import commands from lifesaver.bot.storage import AsyncJSONStorage from lifesaver.utils import ( ListPaginator, clean_mentions, human_delta, pluralize, truncate, ) from .converters import Messages, QuoteName from .utils import stringify_message __all__ = ["Quoting"] def embed_quote(quote) -> discord.Embed: embed = discord.Embed() embed.description = quote["content"] embed.add_field(name="Jump", value=quote["jump_url"], inline=False) creator = quote["created_by"]["tag"] channel = quote["created_in"]["name"] ago = human_delta(datetime.datetime.utcfromtimestamp(quote["created"])) embed.set_footer(text=f"Created by {creator} in #{channel} {ago} ago") return embed class Quoting(lifesaver.Cog): def __init__(self, bot, *args, **kwargs): super().__init__(bot, *args, **kwargs) self.storage = AsyncJSONStorage("quotes.json", loop=bot.loop) def quotes(self, guild: discord.Guild): return self.storage.get(str(guild.id), {}) @lifesaver.command(aliases=["rq"]) @commands.guild_only() async def random_quote(self, ctx): quotes = self.quotes(ctx.guild) if not quotes: await ctx.send( "There are no quotes in this server. Create some with " f"`{ctx.prefix}quote new`. For more information, see `{ctx.prefix}" "help quote`." ) return (name, quote) = choice(list(quotes.items())) embed = embed_quote(quote) name = clean_mentions(ctx.channel, name) await ctx.send(name, embed=embed) @lifesaver.group(aliases=["q"], invoke_without_command=True) @commands.guild_only() async def quote(self, ctx, *, name: QuoteName(must_exist=True)): quotes = self.quotes(ctx.guild) quote = quotes.get(name) embed = embed_quote(quote) await ctx.send(embed=embed) @quote.command(aliases=["new"]) @commands.guild_only() async def create( self, ctx, name: QuoteName(must_not_exist=True), *messages: Messages ): quotes = self.quotes(ctx.guild) silent = name.startswith("!") if silent: name = name[1:] quoted = [] for message in messages: if isinstance(message, list): quoted += message else: quoted.append(message) strings = map(stringify_message, quoted) quote_content = "\n".join(strings) if len(quote_content) > 2048: over_limit = pluralize(character=len(quote_content) - 2048) if not await ctx.confirm( "Quote is quite large...", ( f"This quote is pretty big. ({over_limit} over limit.) " "It will be truncated to 2048 characters. Continue?" ), ): return quote = quotes[name] = { "content": truncate(quote_content, 2048), "jump_url": quoted[0].jump_url, "created": time.time(), "created_by": {"id": ctx.author.id, "tag": str(ctx.author)}, "created_in": {"id": ctx.channel.id, "name": ctx.channel.name}, "guild": {"id": ctx.guild.id}, } await self.storage.put(str(ctx.guild.id), quotes) embed = embed_quote(quote) await (ctx.author if silent else ctx).send( f'Created quote "{name}".', embed=embed ) @quote.command() @commands.guild_only() async def list(self, ctx): quotes = self.quotes(ctx.guild) if not quotes: await ctx.send("No quotes exist for this server.") return tag_names = [clean_mentions(ctx.channel, name) for name in quotes.keys()] paginator = ListPaginator( tag_names, ctx.author, ctx.channel, title="All quotes", per_page=20, bot=ctx.bot, ) await paginator.create() @quote.command() @commands.guild_only() @commands.has_permissions(manage_messages=True)
MIT License
netflix/lemur
lemur/plugins/lemur_aws/elb.py
describe_ssl_policies_v2
python
def describe_ssl_policies_v2(policy_names, **kwargs): try: return kwargs["client"].describe_ssl_policies(Names=policy_names) except Exception as e: metrics.send( "describe_ssl_policies_v2_error", "counter", 1, metric_tags={"policy_names": policy_names, "error": str(e)}, ) capture_exception(extra={"policy_names": str(policy_names)}) raise
Fetching all policies currently associated with an ELB. :param policy_names: :return:
https://github.com/netflix/lemur/blob/778c66ff6e4a82cebebffd25033b26240b7479a4/lemur/plugins/lemur_aws/elb.py#L274-L291
import botocore from flask import current_app from retrying import retry from sentry_sdk import capture_exception from lemur.extensions import metrics from lemur.exceptions import InvalidListener from lemur.plugins.lemur_aws.sts import sts_client def retry_throttled(exception): try: raise exception except Exception as e: current_app.logger.error("ELB retry_throttled triggered", exc_info=True) metrics.send("elb_retry", "counter", 1, metric_tags={"exception": str(e)}) capture_exception() if isinstance(exception, botocore.exceptions.ClientError): if exception.response["Error"]["Code"] == "LoadBalancerNotFound": return False if exception.response["Error"]["Code"] == "CertificateNotFound": return False return True def is_valid(listener_tuple): lb_port, i_port, lb_protocol, arn = listener_tuple if lb_protocol.lower() in ["ssl", "https"]: if not arn: raise InvalidListener return listener_tuple def get_all_elbs(**kwargs): elbs = [] try: while True: response = get_elbs(**kwargs) elbs += response["LoadBalancerDescriptions"] if not response.get("NextMarker"): return elbs else: kwargs.update(dict(Marker=response["NextMarker"])) except Exception as e: metrics.send("get_all_elbs_error", "counter", 1) capture_exception() raise def get_all_elbs_v2(**kwargs): elbs = [] try: while True: response = get_elbs_v2(**kwargs) elbs += response["LoadBalancers"] if not response.get("NextMarker"): return elbs else: kwargs.update(dict(Marker=response["NextMarker"])) except Exception as e: metrics.send("get_all_elbs_v2_error", "counter", 1) capture_exception() raise @sts_client("elbv2") @retry(retry_on_exception=retry_throttled, wait_fixed=2000, stop_max_attempt_number=20) def get_listener_arn_from_endpoint(endpoint_name, endpoint_port, **kwargs): try: client = kwargs.pop("client") elbs = client.describe_load_balancers(Names=[endpoint_name]) for elb in elbs["LoadBalancers"]: listeners = client.describe_listeners( LoadBalancerArn=elb["LoadBalancerArn"] ) for listener in listeners["Listeners"]: if listener["Port"] == endpoint_port: return listener["ListenerArn"] except Exception as e: metrics.send( "get_listener_arn_from_endpoint_error", "counter", 1, metric_tags={ "error": str(e), "endpoint_name": endpoint_name, "endpoint_port": endpoint_port, }, ) capture_exception( extra={ "endpoint_name": str(endpoint_name), "endpoint_port": str(endpoint_port), } ) raise @sts_client("elbv2") @retry(retry_on_exception=retry_throttled, wait_fixed=2000, stop_max_attempt_number=5) def get_load_balancer_arn_from_endpoint(endpoint_name, **kwargs): try: client = kwargs.pop("client") elbs = client.describe_load_balancers(Names=[endpoint_name]) if "LoadBalancers" in elbs and elbs["LoadBalancers"]: return elbs["LoadBalancers"][0]["LoadBalancerArn"] except Exception as e: metrics.send( "get_load_balancer_arn_from_endpoint", "counter", 1, metric_tags={ "error": str(e), "endpoint_name": endpoint_name, }, ) capture_exception( extra={ "endpoint_name": str(endpoint_name), } ) raise @sts_client("elb") @retry(retry_on_exception=retry_throttled, wait_fixed=2000, stop_max_attempt_number=20) def get_elbs(**kwargs): try: client = kwargs.pop("client") return client.describe_load_balancers(**kwargs) except Exception as e: metrics.send("get_elbs_error", "counter", 1, metric_tags={"error": str(e)}) capture_exception() raise @sts_client("elbv2") @retry(retry_on_exception=retry_throttled, wait_fixed=2000, stop_max_attempt_number=20) def get_elbs_v2(**kwargs): try: client = kwargs.pop("client") return client.describe_load_balancers(**kwargs) except Exception as e: metrics.send("get_elbs_v2_error", "counter", 1, metric_tags={"error": str(e)}) capture_exception() raise @sts_client("elbv2") @retry(retry_on_exception=retry_throttled, wait_fixed=2000, stop_max_attempt_number=20) def describe_listeners_v2(**kwargs): try: client = kwargs.pop("client") return client.describe_listeners(**kwargs) except Exception as e: metrics.send( "describe_listeners_v2_error", "counter", 1, metric_tags={"error": str(e)} ) capture_exception() raise @sts_client("elb") @retry(retry_on_exception=retry_throttled, wait_fixed=2000, stop_max_attempt_number=20) def describe_load_balancer_policies(load_balancer_name, policy_names, **kwargs): try: return kwargs["client"].describe_load_balancer_policies( LoadBalancerName=load_balancer_name, PolicyNames=policy_names ) except Exception as e: metrics.send( "describe_load_balancer_policies_error", "counter", 1, metric_tags={ "load_balancer_name": load_balancer_name, "policy_names": policy_names, "error": str(e), }, ) capture_exception( extra={ "load_balancer_name": str(load_balancer_name), "policy_names": str(policy_names), } ) raise @sts_client("elbv2") @retry(retry_on_exception=retry_throttled, wait_fixed=2000, stop_max_attempt_number=20)
Apache License 2.0
jest-community/jest-pytest
src/__tests__/integration/home-assistant/homeassistant/components/mysensors.py
async_setup
python
async def async_setup(hass, config): import mysensors.mysensors as mysensors version = config[DOMAIN].get(CONF_VERSION) persistence = config[DOMAIN].get(CONF_PERSISTENCE) async def setup_gateway( device, persistence_file, baud_rate, tcp_port, in_prefix, out_prefix): if device == MQTT_COMPONENT: if not await async_setup_component(hass, MQTT_COMPONENT, config): return None mqtt = hass.components.mqtt retain = config[DOMAIN].get(CONF_RETAIN) def pub_callback(topic, payload, qos, retain): mqtt.async_publish(topic, payload, qos, retain) def sub_callback(topic, sub_cb, qos): @callback def internal_callback(*args): sub_cb(*args) hass.async_add_job( mqtt.async_subscribe(topic, internal_callback, qos)) gateway = mysensors.AsyncMQTTGateway( pub_callback, sub_callback, in_prefix=in_prefix, out_prefix=out_prefix, retain=retain, loop=hass.loop, event_callback=None, persistence=persistence, persistence_file=persistence_file, protocol_version=version) else: try: await hass.async_add_job(is_serial_port, device) gateway = mysensors.AsyncSerialGateway( device, baud=baud_rate, loop=hass.loop, event_callback=None, persistence=persistence, persistence_file=persistence_file, protocol_version=version) except vol.Invalid: gateway = mysensors.AsyncTCPGateway( device, port=tcp_port, loop=hass.loop, event_callback=None, persistence=persistence, persistence_file=persistence_file, protocol_version=version) gateway.metric = hass.config.units.is_metric gateway.optimistic = config[DOMAIN].get(CONF_OPTIMISTIC) gateway.device = device gateway.event_callback = gw_callback_factory(hass) if persistence: await gateway.start_persistence() return gateway gateways = {} conf_gateways = config[DOMAIN][CONF_GATEWAYS] for index, gway in enumerate(conf_gateways): device = gway[CONF_DEVICE] persistence_file = gway.get( CONF_PERSISTENCE_FILE, hass.config.path('mysensors{}.pickle'.format(index + 1))) baud_rate = gway.get(CONF_BAUD_RATE) tcp_port = gway.get(CONF_TCP_PORT) in_prefix = gway.get(CONF_TOPIC_IN_PREFIX, '') out_prefix = gway.get(CONF_TOPIC_OUT_PREFIX, '') gateway = await setup_gateway( device, persistence_file, baud_rate, tcp_port, in_prefix, out_prefix) if gateway is not None: gateway.nodes_config = gway.get(CONF_NODES) gateways[id(gateway)] = gateway if not gateways: _LOGGER.error( "No devices could be setup as gateways, check your configuration") return False hass.data[MYSENSORS_GATEWAYS] = gateways hass.async_add_job(finish_setup(hass, gateways)) return True
Set up the MySensors component.
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/homeassistant/components/mysensors.py#L287-L375
import asyncio from collections import defaultdict import logging import os import socket import sys from timeit import default_timer as timer import async_timeout import voluptuous as vol from homeassistant.components.mqtt import ( valid_publish_topic, valid_subscribe_topic) from homeassistant.const import ( ATTR_BATTERY_LEVEL, CONF_NAME, CONF_OPTIMISTIC, EVENT_HOMEASSISTANT_STOP, STATE_OFF, STATE_ON) from homeassistant.core import callback from homeassistant.helpers import discovery import homeassistant.helpers.config_validation as cv from homeassistant.helpers.dispatcher import ( async_dispatcher_connect, async_dispatcher_send) from homeassistant.helpers.entity import Entity from homeassistant.setup import async_setup_component REQUIREMENTS = ['pymysensors==0.14.0'] _LOGGER = logging.getLogger(__name__) ATTR_CHILD_ID = 'child_id' ATTR_DESCRIPTION = 'description' ATTR_DEVICE = 'device' ATTR_DEVICES = 'devices' ATTR_NODE_ID = 'node_id' CONF_BAUD_RATE = 'baud_rate' CONF_DEBUG = 'debug' CONF_DEVICE = 'device' CONF_GATEWAYS = 'gateways' CONF_PERSISTENCE = 'persistence' CONF_PERSISTENCE_FILE = 'persistence_file' CONF_RETAIN = 'retain' CONF_TCP_PORT = 'tcp_port' CONF_TOPIC_IN_PREFIX = 'topic_in_prefix' CONF_TOPIC_OUT_PREFIX = 'topic_out_prefix' CONF_VERSION = 'version' CONF_NODES = 'nodes' CONF_NODE_NAME = 'name' DEFAULT_BAUD_RATE = 115200 DEFAULT_TCP_PORT = 5003 DEFAULT_VERSION = '1.4' DOMAIN = 'mysensors' GATEWAY_READY_TIMEOUT = 15.0 MQTT_COMPONENT = 'mqtt' MYSENSORS_GATEWAYS = 'mysensors_gateways' MYSENSORS_PLATFORM_DEVICES = 'mysensors_devices_{}' MYSENSORS_GATEWAY_READY = 'mysensors_gateway_ready_{}' PLATFORM = 'platform' SCHEMA = 'schema' SIGNAL_CALLBACK = 'mysensors_callback_{}_{}_{}_{}' TYPE = 'type' def is_socket_address(value): try: socket.getaddrinfo(value, None) return value except OSError: raise vol.Invalid('Device is not a valid domain name or ip address') def has_parent_dir(value): parent = os.path.dirname(os.path.realpath(value)) is_dir_writable = os.path.isdir(parent) and os.access(parent, os.W_OK) if not is_dir_writable: raise vol.Invalid( '{} directory does not exist or is not writeable'.format(parent)) return value def has_all_unique_files(value): persistence_files = [ gateway.get(CONF_PERSISTENCE_FILE) for gateway in value] if None in persistence_files and any( name is not None for name in persistence_files): raise vol.Invalid( 'persistence file name of all devices must be set if any is set') if not all(name is None for name in persistence_files): schema = vol.Schema(vol.Unique()) schema(persistence_files) return value def is_persistence_file(value): if value.endswith(('.json', '.pickle')): return value else: raise vol.Invalid( '{} does not end in either `.json` or `.pickle`'.format(value)) def is_serial_port(value): if sys.platform.startswith('win'): ports = ('COM{}'.format(idx + 1) for idx in range(256)) if value in ports: return value else: raise vol.Invalid('{} is not a serial port'.format(value)) else: return cv.isdevice(value) def deprecated(key): def validator(config): if key not in config: return config _LOGGER.warning( '%s option for %s is deprecated. Please remove %s from your ' 'configuration file', key, DOMAIN, key) config.pop(key) return config return validator NODE_SCHEMA = vol.Schema({ cv.positive_int: { vol.Required(CONF_NODE_NAME): cv.string } }) CONFIG_SCHEMA = vol.Schema({ DOMAIN: vol.Schema(vol.All(deprecated(CONF_DEBUG), { vol.Required(CONF_GATEWAYS): vol.All( cv.ensure_list, has_all_unique_files, [{ vol.Required(CONF_DEVICE): vol.Any(MQTT_COMPONENT, is_socket_address, is_serial_port), vol.Optional(CONF_PERSISTENCE_FILE): vol.All(cv.string, is_persistence_file, has_parent_dir), vol.Optional(CONF_BAUD_RATE, default=DEFAULT_BAUD_RATE): cv.positive_int, vol.Optional(CONF_TCP_PORT, default=DEFAULT_TCP_PORT): cv.port, vol.Optional(CONF_TOPIC_IN_PREFIX): valid_subscribe_topic, vol.Optional(CONF_TOPIC_OUT_PREFIX): valid_publish_topic, vol.Optional(CONF_NODES, default={}): NODE_SCHEMA, }] ), vol.Optional(CONF_OPTIMISTIC, default=False): cv.boolean, vol.Optional(CONF_PERSISTENCE, default=True): cv.boolean, vol.Optional(CONF_RETAIN, default=True): cv.boolean, vol.Optional(CONF_VERSION, default=DEFAULT_VERSION): cv.string, })) }, extra=vol.ALLOW_EXTRA) BINARY_SENSOR_SCHEMA = {PLATFORM: 'binary_sensor', TYPE: 'V_TRIPPED'} CLIMATE_SCHEMA = {PLATFORM: 'climate', TYPE: 'V_HVAC_FLOW_STATE'} LIGHT_DIMMER_SCHEMA = { PLATFORM: 'light', TYPE: 'V_DIMMER', SCHEMA: {'V_DIMMER': cv.string, 'V_LIGHT': cv.string}} LIGHT_PERCENTAGE_SCHEMA = { PLATFORM: 'light', TYPE: 'V_PERCENTAGE', SCHEMA: {'V_PERCENTAGE': cv.string, 'V_STATUS': cv.string}} LIGHT_RGB_SCHEMA = { PLATFORM: 'light', TYPE: 'V_RGB', SCHEMA: { 'V_RGB': cv.string, 'V_STATUS': cv.string}} LIGHT_RGBW_SCHEMA = { PLATFORM: 'light', TYPE: 'V_RGBW', SCHEMA: { 'V_RGBW': cv.string, 'V_STATUS': cv.string}} NOTIFY_SCHEMA = {PLATFORM: 'notify', TYPE: 'V_TEXT'} DEVICE_TRACKER_SCHEMA = {PLATFORM: 'device_tracker', TYPE: 'V_POSITION'} DUST_SCHEMA = [ {PLATFORM: 'sensor', TYPE: 'V_DUST_LEVEL'}, {PLATFORM: 'sensor', TYPE: 'V_LEVEL'}] SWITCH_LIGHT_SCHEMA = {PLATFORM: 'switch', TYPE: 'V_LIGHT'} SWITCH_STATUS_SCHEMA = {PLATFORM: 'switch', TYPE: 'V_STATUS'} MYSENSORS_CONST_SCHEMA = { 'S_DOOR': [BINARY_SENSOR_SCHEMA, {PLATFORM: 'switch', TYPE: 'V_ARMED'}], 'S_MOTION': [BINARY_SENSOR_SCHEMA, {PLATFORM: 'switch', TYPE: 'V_ARMED'}], 'S_SMOKE': [BINARY_SENSOR_SCHEMA, {PLATFORM: 'switch', TYPE: 'V_ARMED'}], 'S_SPRINKLER': [ BINARY_SENSOR_SCHEMA, {PLATFORM: 'switch', TYPE: 'V_STATUS'}], 'S_WATER_LEAK': [ BINARY_SENSOR_SCHEMA, {PLATFORM: 'switch', TYPE: 'V_ARMED'}], 'S_SOUND': [ BINARY_SENSOR_SCHEMA, {PLATFORM: 'sensor', TYPE: 'V_LEVEL'}, {PLATFORM: 'switch', TYPE: 'V_ARMED'}], 'S_VIBRATION': [ BINARY_SENSOR_SCHEMA, {PLATFORM: 'sensor', TYPE: 'V_LEVEL'}, {PLATFORM: 'switch', TYPE: 'V_ARMED'}], 'S_MOISTURE': [ BINARY_SENSOR_SCHEMA, {PLATFORM: 'sensor', TYPE: 'V_LEVEL'}, {PLATFORM: 'switch', TYPE: 'V_ARMED'}], 'S_HVAC': [CLIMATE_SCHEMA], 'S_COVER': [ {PLATFORM: 'cover', TYPE: 'V_DIMMER'}, {PLATFORM: 'cover', TYPE: 'V_PERCENTAGE'}, {PLATFORM: 'cover', TYPE: 'V_LIGHT'}, {PLATFORM: 'cover', TYPE: 'V_STATUS'}], 'S_DIMMER': [LIGHT_DIMMER_SCHEMA, LIGHT_PERCENTAGE_SCHEMA], 'S_RGB_LIGHT': [LIGHT_RGB_SCHEMA], 'S_RGBW_LIGHT': [LIGHT_RGBW_SCHEMA], 'S_INFO': [NOTIFY_SCHEMA, {PLATFORM: 'sensor', TYPE: 'V_TEXT'}], 'S_GPS': [ DEVICE_TRACKER_SCHEMA, {PLATFORM: 'sensor', TYPE: 'V_POSITION'}], 'S_TEMP': [{PLATFORM: 'sensor', TYPE: 'V_TEMP'}], 'S_HUM': [{PLATFORM: 'sensor', TYPE: 'V_HUM'}], 'S_BARO': [ {PLATFORM: 'sensor', TYPE: 'V_PRESSURE'}, {PLATFORM: 'sensor', TYPE: 'V_FORECAST'}], 'S_WIND': [ {PLATFORM: 'sensor', TYPE: 'V_WIND'}, {PLATFORM: 'sensor', TYPE: 'V_GUST'}, {PLATFORM: 'sensor', TYPE: 'V_DIRECTION'}], 'S_RAIN': [ {PLATFORM: 'sensor', TYPE: 'V_RAIN'}, {PLATFORM: 'sensor', TYPE: 'V_RAINRATE'}], 'S_UV': [{PLATFORM: 'sensor', TYPE: 'V_UV'}], 'S_WEIGHT': [ {PLATFORM: 'sensor', TYPE: 'V_WEIGHT'}, {PLATFORM: 'sensor', TYPE: 'V_IMPEDANCE'}], 'S_POWER': [ {PLATFORM: 'sensor', TYPE: 'V_WATT'}, {PLATFORM: 'sensor', TYPE: 'V_KWH'}, {PLATFORM: 'sensor', TYPE: 'V_VAR'}, {PLATFORM: 'sensor', TYPE: 'V_VA'}, {PLATFORM: 'sensor', TYPE: 'V_POWER_FACTOR'}], 'S_DISTANCE': [{PLATFORM: 'sensor', TYPE: 'V_DISTANCE'}], 'S_LIGHT_LEVEL': [ {PLATFORM: 'sensor', TYPE: 'V_LIGHT_LEVEL'}, {PLATFORM: 'sensor', TYPE: 'V_LEVEL'}], 'S_IR': [ {PLATFORM: 'sensor', TYPE: 'V_IR_RECEIVE'}, {PLATFORM: 'switch', TYPE: 'V_IR_SEND', SCHEMA: {'V_IR_SEND': cv.string, 'V_LIGHT': cv.string}}], 'S_WATER': [ {PLATFORM: 'sensor', TYPE: 'V_FLOW'}, {PLATFORM: 'sensor', TYPE: 'V_VOLUME'}], 'S_CUSTOM': [ {PLATFORM: 'sensor', TYPE: 'V_VAR1'}, {PLATFORM: 'sensor', TYPE: 'V_VAR2'}, {PLATFORM: 'sensor', TYPE: 'V_VAR3'}, {PLATFORM: 'sensor', TYPE: 'V_VAR4'}, {PLATFORM: 'sensor', TYPE: 'V_VAR5'}, {PLATFORM: 'sensor', TYPE: 'V_CUSTOM'}], 'S_SCENE_CONTROLLER': [ {PLATFORM: 'sensor', TYPE: 'V_SCENE_ON'}, {PLATFORM: 'sensor', TYPE: 'V_SCENE_OFF'}], 'S_COLOR_SENSOR': [{PLATFORM: 'sensor', TYPE: 'V_RGB'}], 'S_MULTIMETER': [ {PLATFORM: 'sensor', TYPE: 'V_VOLTAGE'}, {PLATFORM: 'sensor', TYPE: 'V_CURRENT'}, {PLATFORM: 'sensor', TYPE: 'V_IMPEDANCE'}], 'S_GAS': [ {PLATFORM: 'sensor', TYPE: 'V_FLOW'}, {PLATFORM: 'sensor', TYPE: 'V_VOLUME'}], 'S_WATER_QUALITY': [ {PLATFORM: 'sensor', TYPE: 'V_TEMP'}, {PLATFORM: 'sensor', TYPE: 'V_PH'}, {PLATFORM: 'sensor', TYPE: 'V_ORP'}, {PLATFORM: 'sensor', TYPE: 'V_EC'}, {PLATFORM: 'switch', TYPE: 'V_STATUS'}], 'S_AIR_QUALITY': DUST_SCHEMA, 'S_DUST': DUST_SCHEMA, 'S_LIGHT': [SWITCH_LIGHT_SCHEMA], 'S_BINARY': [SWITCH_STATUS_SCHEMA], 'S_LOCK': [{PLATFORM: 'switch', TYPE: 'V_LOCK_STATUS'}], }
MIT License
zefresk/deep-position-analysis
misc.py
format_nodes
python
def format_nodes(n, fmt="{:1.1f}"): if n < 10**3: return "%d"%(n) elif n < 10**6: return (fmt+"k").format(n/1000) elif n < 10**9: return (fmt+"m").format(n/10**6) elif n < 10**12: return (fmt+"g").format(n/10**9) else: return (fmt+"t").format(n/10**12)
Select appropriate form for big values.
https://github.com/zefresk/deep-position-analysis/blob/f3f24e4587cf623e8b7d19a8e5682c7b6d0efaf4/misc.py#L11-L22
import re import time import chess import hashlib import pickle
MIT License
shjung13/standardized-max-logits
network/deepv3.py
DeepWideResNet101V3PlusD_OS8
python
def DeepWideResNet101V3PlusD_OS8(args, num_classes, criterion, criterion_aux): print("Model : DeepLabv3+, Backbone : wide_resnet-101") return DeepV3Plus(num_classes, trunk='wide_resnet-101', criterion=criterion, criterion_aux=criterion_aux, variant='D', skip='m1', args=args)
Wide ResNet 101 Based Network
https://github.com/shjung13/standardized-max-logits/blob/c3ecd1d8e53d8761821dd5262c3b569ad9c9a116/network/deepv3.py#L729-L735
import logging import math import numpy as np import torch from torch import nn import torch.nn.functional as F from network import Resnet from network.mynn import initialize_weights, Norm2d, Upsample, freeze_weights, unfreeze_weights from torchvision.utils import save_image from scipy import ndimage as ndi from kornia.morphology import dilation, erosion import torchvision.models as models selem = torch.ones((3, 3)).cuda() selem_dilation = torch.FloatTensor(ndi.generate_binary_structure(2, 1)).cuda() print(f'selem:\n\n{selem}') print(f'selem_dilation:\n\n{selem_dilation}') d_k1 = torch.zeros((1, 1, 2 * 1 + 1, 2 * 1 + 1)).cuda() d_k2 = torch.zeros((1, 1, 2 * 2 + 1, 2 * 2 + 1)).cuda() d_k3 = torch.zeros((1, 1, 2 * 3 + 1, 2 * 3 + 1)).cuda() d_k4 = torch.zeros((1, 1, 2 * 4 + 1, 2 * 4 + 1)).cuda() d_k5 = torch.zeros((1, 1, 2 * 5 + 1, 2 * 5 + 1)).cuda() d_k6 = torch.zeros((1, 1, 2 * 6 + 1, 2 * 6 + 1)).cuda() d_k7 = torch.zeros((1, 1, 2 * 7 + 1, 2 * 7 + 1)).cuda() d_k8 = torch.zeros((1, 1, 2 * 8 + 1, 2 * 8 + 1)).cuda() d_k9 = torch.zeros((1, 1, 2 * 9 + 1, 2 * 9 + 1)).cuda() d_ks = {1: d_k1, 2: d_k2, 3: d_k3, 4: d_k4, 5: d_k5, 6: d_k6, 7: d_k7, 8: d_k8, 9: d_k9} for k, v in d_ks.items(): v[:,:,k,k] = 1 for i in range(k): v = dilation(v, selem_dilation) d_ks[k] = v.squeeze(0).squeeze(0) print(f'dilation kernel at {k}:\n\n{d_ks[k]}') class _AtrousSpatialPyramidPoolingModule(nn.Module): def __init__(self, in_dim, reduction_dim=256, output_stride=16, rates=(6, 12, 18)): super(_AtrousSpatialPyramidPoolingModule, self).__init__() print("output_stride = ", output_stride) if output_stride == 8: rates = [2 * r for r in rates] elif output_stride == 4: rates = [4 * r for r in rates] elif output_stride == 16: pass elif output_stride == 32: rates = [r // 2 for r in rates] else: raise 'output stride of {} not supported'.format(output_stride) self.features = [] self.features.append( nn.Sequential(nn.Conv2d(in_dim, reduction_dim, kernel_size=1, bias=False), Norm2d(reduction_dim), nn.ReLU(inplace=True))) for r in rates: self.features.append(nn.Sequential( nn.Conv2d(in_dim, reduction_dim, kernel_size=3, dilation=r, padding=r, bias=False), Norm2d(reduction_dim), nn.ReLU(inplace=True) )) self.features = torch.nn.ModuleList(self.features) self.img_pooling = nn.AdaptiveAvgPool2d(1) self.img_conv = nn.Sequential( nn.Conv2d(in_dim, 256, kernel_size=1, bias=False), Norm2d(256), nn.ReLU(inplace=True)) def forward(self, x): x_size = x.size() img_features = self.img_pooling(x) img_features = self.img_conv(img_features) img_features = Upsample(img_features, x_size[2:]) out = img_features for f in self.features: y = f(x) out = torch.cat((out, y), 1) return out def find_boundaries(label): assert len(label.shape) == 4 boundaries = (dilation(label.float(), selem_dilation) != erosion(label.float(), selem)).float() return boundaries def expand_boundaries(boundaries, r=0): if r == 0: return boundaries expanded_boundaries = dilation(boundaries, d_ks[r]) return expanded_boundaries class BoundarySuppressionWithSmoothing(nn.Module): def __init__(self, boundary_suppression=True, boundary_width=4, boundary_iteration=4, dilated_smoothing=True, kernel_size=7, dilation=6): super(BoundarySuppressionWithSmoothing, self).__init__() self.kernel_size = kernel_size self.dilation = dilation self.boundary_suppression = boundary_suppression self.boundary_width = boundary_width self.boundary_iteration = boundary_iteration sigma = 1.0 size = 7 gaussian_kernel = np.fromfunction(lambda x, y: (1/(2*math.pi*sigma**2)) * math.e ** ((-1*((x-(size-1)/2)**2+(y-(size-1)/2)**2))/(2*sigma**2)), (size, size)) gaussian_kernel /= np.sum(gaussian_kernel) gaussian_kernel = torch.Tensor(gaussian_kernel).unsqueeze(0).unsqueeze(0) self.dilated_smoothing = dilated_smoothing self.first_conv = nn.Conv2d(1, 1, kernel_size=3, stride=1, bias=False) self.first_conv.weight = torch.nn.Parameter(torch.ones_like((self.first_conv.weight))) self.second_conv = nn.Conv2d(1, 1, kernel_size=self.kernel_size, stride=1, dilation=self.dilation, bias=False) self.second_conv.weight = torch.nn.Parameter(gaussian_kernel) def forward(self, x, prediction=None): if len(x.shape) == 3: x = x.unsqueeze(1) x_size = x.size() assert len(x.shape) == 4 out = x if self.boundary_suppression: boundaries = find_boundaries(prediction.unsqueeze(1)) expanded_boundaries = None if self.boundary_iteration != 0: assert self.boundary_width % self.boundary_iteration == 0 diff = self.boundary_width // self.boundary_iteration for iteration in range(self.boundary_iteration): if len(out.shape) != 4: out = out.unsqueeze(1) prev_out = out if self.boundary_width == 0 or iteration == self.boundary_iteration - 1: expansion_width = 0 else: expansion_width = self.boundary_width - diff * iteration - 1 expanded_boundaries = expand_boundaries(boundaries, r=expansion_width) non_boundary_mask = 1. * (expanded_boundaries == 0) f_size = 1 num_pad = f_size x_masked = out * non_boundary_mask x_padded = nn.ReplicationPad2d(num_pad)(x_masked) non_boundary_mask_padded = nn.ReplicationPad2d(num_pad)(non_boundary_mask) y = self.first_conv(x_padded) num_calced_elements = self.first_conv(non_boundary_mask_padded) num_calced_elements = num_calced_elements.long() avg_y = torch.where((num_calced_elements == 0), prev_out, y / num_calced_elements) out = avg_y out = torch.where((non_boundary_mask == 0), out, prev_out) del expanded_boundaries, non_boundary_mask if self.dilated_smoothing == True: out = nn.ReplicationPad2d(self.dilation * 3)(out) out = self.second_conv(out) return out.squeeze(1) else: if self.dilated_smoothing == True: out = nn.ReplicationPad2d(self.dilation * 3)(out) out = self.second_conv(out) else: out = x return out.squeeze(1) class DeepV3Plus(nn.Module): def __init__(self, num_classes, trunk='resnet-101', criterion=None, criterion_aux=None, variant='D', skip='m1', skip_num=48, args=None): super(DeepV3Plus, self).__init__() self.criterion = criterion self.criterion_aux = criterion_aux self.variant = variant self.args = args self.trunk = trunk self.num_classes = num_classes self.score_mode=args.score_mode self.enable_post_processing = (args.enable_boundary_suppression or args.enable_dilated_smoothing) if trunk == 'shufflenetv2': channel_1st = 3 channel_2nd = 24 channel_3rd = 116 channel_4th = 232 prev_final_channel = 464 final_channel = 1024 resnet = models.shufflenet_v2_x1_0(pretrained=True) self.layer0 = nn.Sequential(resnet.conv1, resnet.maxpool) self.layer1 = resnet.stage2 self.layer2 = resnet.stage3 self.layer3 = resnet.stage4 self.layer4 = resnet.conv5 class Layer0(nn.Module): def __init__(self, iw): super(Layer0, self).__init__() self.layer = nn.Sequential(resnet.conv1, resnet.maxpool) self.instance_norm_layer = resnet.instance_norm_layer1 self.iw = iw def forward(self, x): x = self.layer(x) return x class Layer4(nn.Module): def __init__(self, iw): super(Layer4, self).__init__() self.layer = resnet.conv5 self.instance_norm_layer = resnet.instance_norm_layer2 self.iw = iw def forward(self, x): x = self.layer(x) return x self.layer0 = Layer0(iw=0) self.layer1 = resnet.stage2 self.layer2 = resnet.stage3 self.layer3 = resnet.stage4 self.layer4 = Layer4(iw=0) if self.variant == 'D': for n, m in self.layer2.named_modules(): if isinstance(m, nn.Conv2d) and m.stride==(2,2): m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1) for n, m in self.layer3.named_modules(): if isinstance(m, nn.Conv2d) and m.stride==(2,2): m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1) elif self.variant == 'D16': for n, m in self.layer3.named_modules(): if isinstance(m, nn.Conv2d) and m.stride==(2,2): m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1) else: print("Not using Dilation ") elif trunk == 'mnasnet_05' or trunk == 'mnasnet_10': if trunk == 'mnasnet_05': resnet = models.mnasnet0_5(pretrained=True) channel_1st = 3 channel_2nd = 16 channel_3rd = 24 channel_4th = 48 prev_final_channel = 160 final_channel = 1280 print("# of layers", len(resnet.layers)) self.layer0 = nn.Sequential(resnet.layers[0],resnet.layers[1],resnet.layers[2], resnet.layers[3],resnet.layers[4],resnet.layers[5],resnet.layers[6],resnet.layers[7]) self.layer1 = nn.Sequential(resnet.layers[8], resnet.layers[9]) self.layer2 = nn.Sequential(resnet.layers[10], resnet.layers[11]) self.layer3 = nn.Sequential(resnet.layers[12], resnet.layers[13]) self.layer4 = nn.Sequential(resnet.layers[14], resnet.layers[15], resnet.layers[16]) else: resnet = models.mnasnet1_0(pretrained=True) channel_1st = 3 channel_2nd = 16 channel_3rd = 40 channel_4th = 96 prev_final_channel = 320 final_channel = 1280 print("# of layers", len(resnet.layers)) self.layer0 = nn.Sequential(resnet.layers[0],resnet.layers[1],resnet.layers[2], resnet.layers[3],resnet.layers[4],resnet.layers[5],resnet.layers[6],resnet.layers[7]) self.layer1 = nn.Sequential(resnet.layers[8], resnet.layers[9]) self.layer2 = nn.Sequential(resnet.layers[10], resnet.layers[11]) self.layer3 = nn.Sequential(resnet.layers[12], resnet.layers[13]) self.layer4 = nn.Sequential(resnet.layers[14], resnet.layers[15], resnet.layers[16]) if self.variant == 'D': for n, m in self.layer2.named_modules(): if isinstance(m, nn.Conv2d) and m.stride==(2,2): m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1) for n, m in self.layer3.named_modules(): if isinstance(m, nn.Conv2d) and m.stride==(2,2): m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1) elif self.variant == 'D16': for n, m in self.layer3.named_modules(): if isinstance(m, nn.Conv2d) and m.stride==(2,2): m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1) else: print("Not using Dilation ") elif trunk == 'mobilenetv2': channel_1st = 3 channel_2nd = 16 channel_3rd = 32 channel_4th = 64 prev_final_channel = 320 final_channel = 1280 resnet = models.mobilenet_v2(pretrained=True) self.layer0 = nn.Sequential(resnet.features[0], resnet.features[1]) self.layer1 = nn.Sequential(resnet.features[2], resnet.features[3], resnet.features[4], resnet.features[5], resnet.features[6]) self.layer2 = nn.Sequential(resnet.features[7], resnet.features[8], resnet.features[9], resnet.features[10]) self.layer3 = nn.Sequential(resnet.features[11], resnet.features[12], resnet.features[13], resnet.features[14], resnet.features[15], resnet.features[16], resnet.features[17]) self.layer4 = nn.Sequential(resnet.features[18]) if self.variant == 'D': for n, m in self.layer2.named_modules(): if isinstance(m, nn.Conv2d) and m.stride==(2,2): m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1) for n, m in self.layer3.named_modules(): if isinstance(m, nn.Conv2d) and m.stride==(2,2): m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1) elif self.variant == 'D16': for n, m in self.layer3.named_modules(): if isinstance(m, nn.Conv2d) and m.stride==(2,2): m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1) else: print("Not using Dilation ") else: channel_1st = 3 channel_2nd = 64 channel_3rd = 256 channel_4th = 512 prev_final_channel = 1024 final_channel = 2048 if trunk == 'resnet-18': channel_1st = 3 channel_2nd = 64 channel_3rd = 64 channel_4th = 128 prev_final_channel = 256 final_channel = 512 resnet = Resnet.resnet18() resnet.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool) elif trunk == 'resnet-50': resnet = Resnet.resnet50() resnet.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool) elif trunk == 'resnet-101': resnet = Resnet.resnet101(pretrained=True) resnet.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu1, resnet.conv2, resnet.bn2, resnet.relu2, resnet.conv3, resnet.bn3, resnet.relu3, resnet.maxpool) elif trunk == 'resnet-152': resnet = Resnet.resnet152() resnet.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool) elif trunk == 'resnext-50': resnet = models.resnext50_32x4d(pretrained=True) resnet.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool) elif trunk == 'resnext-101': resnet = models.resnext101_32x8d(pretrained=True) resnet.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool) elif trunk == 'wide_resnet-50': resnet = models.wide_resnet50_2(pretrained=True) resnet.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool) elif trunk == 'wide_resnet-101': resnet = models.wide_resnet101_2(pretrained=True) resnet.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool) else: raise ValueError("Not a valid network arch") self.layer0 = resnet.layer0 self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4 if self.variant == 'D': for n, m in self.layer3.named_modules(): if 'conv2' in n: m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1) elif 'downsample.0' in n: m.stride = (1, 1) for n, m in self.layer4.named_modules(): if 'conv2' in n: m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1) elif 'downsample.0' in n: m.stride = (1, 1) elif self.variant == 'D4': for n, m in self.layer2.named_modules(): if 'conv2' in n: m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1) elif 'downsample.0' in n: m.stride = (1, 1) for n, m in self.layer3.named_modules(): if 'conv2' in n: m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1) elif 'downsample.0' in n: m.stride = (1, 1) for n, m in self.layer4.named_modules(): if 'conv2' in n: m.dilation, m.padding, m.stride = (8, 8), (8, 8), (1, 1) elif 'downsample.0' in n: m.stride = (1, 1) elif self.variant == 'D16': for n, m in self.layer4.named_modules(): if 'conv2' in n: m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1) elif 'downsample.0' in n: m.stride = (1, 1) else: print("Not using Dilation ") if self.variant == 'D': os = 8 elif self.variant == 'D4': os = 4 elif self.variant == 'D16': os = 16 else: os = 32 self.class_mean = None self.class_var = None self.output_stride = os self.aspp = _AtrousSpatialPyramidPoolingModule(final_channel, 256, output_stride=os) self.bot_fine = nn.Sequential( nn.Conv2d(channel_3rd, 48, kernel_size=1, bias=False), Norm2d(48), nn.ReLU(inplace=True)) self.bot_aspp = nn.Sequential( nn.Conv2d(1280, 256, kernel_size=1, bias=False), Norm2d(256), nn.ReLU(inplace=True)) self.final1 = nn.Sequential( nn.Conv2d(304, 256, kernel_size=3, padding=1, bias=False), Norm2d(256), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False), Norm2d(256), nn.ReLU(inplace=True)) self.final2 = nn.Sequential( nn.Conv2d(256, num_classes, kernel_size=1, bias=True)) self.dsn = nn.Sequential( nn.Conv2d(prev_final_channel, 512, kernel_size=3, stride=1, padding=1), Norm2d(512), nn.ReLU(inplace=True), nn.Dropout2d(0.1), nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True) ) if self.enable_post_processing: self.multi_scale = BoundarySuppressionWithSmoothing( boundary_suppression=args.enable_boundary_suppression, boundary_width=args.boundary_width, boundary_iteration=args.boundary_iteration, dilated_smoothing=args.enable_dilated_smoothing, kernel_size=args.smoothing_kernel_size, dilation=args.smoothing_kernel_dilation) initialize_weights(self.dsn) initialize_weights(self.aspp) initialize_weights(self.bot_aspp) initialize_weights(self.bot_fine) initialize_weights(self.final1) initialize_weights(self.final2) def set_statistics(self, mean, var): self.class_mean = mean self.class_var = var def forward(self, x, seg_gts=None, ood_gts=None, aux_gts=None, ignore_label=255): x_size = x.size() input_img = x.detach().cpu().clone() x = self.layer0(x) x = self.layer1(x) low_level = x x = self.layer2(x) x = self.layer3(x) aux_out = x x = self.layer4(x) represent = x x = self.aspp(x) dec0_up = self.bot_aspp(x) dec0_fine = self.bot_fine(low_level) dec0_up = Upsample(dec0_up, low_level.size()[2:]) dec0 = [dec0_fine, dec0_up] dec0 = torch.cat(dec0, 1) dec1 = self.final1(dec0) dec2 = self.final2(dec1) main_out = Upsample(dec2, x_size[2:]) if self.score_mode == 'msp': anomaly_score, prediction = nn.Softmax(dim=1)(main_out.detach()).max(1) elif self.score_mode == 'max_logit': anomaly_score, prediction = main_out.detach().max(1) elif self.score_mode == 'standardized_max_logit': if self.class_mean is None or self.class_var is None: raise Exception("Class mean and var are not set!") anomaly_score, prediction = main_out.detach().max(1) for c in range(self.num_classes): anomaly_score = torch.where(prediction == c, (anomaly_score - self.class_mean[c]) / np.sqrt(self.class_var[c]), anomaly_score) else: raise Exception(f"Not implemented score mode {self.score_mode}!") if self.enable_post_processing: with torch.no_grad(): anomaly_score = self.multi_scale(anomaly_score, prediction) if self.training: loss1 = self.criterion(main_out, seg_gts) aux_out = self.dsn(aux_out) if aux_gts.dim() == 1: aux_gts = seg_gts aux_gts = aux_gts.unsqueeze(1).float() aux_gts = nn.functional.interpolate(aux_gts, size=aux_out.shape[2:], mode='nearest') aux_gts = aux_gts.squeeze(1).long() loss2 = self.criterion_aux(aux_out, aux_gts) return loss1, loss2, anomaly_score else: return main_out, anomaly_score def get_final_layer(model): unfreeze_weights(model.final) return model.final def DeepR18V3PlusD(args, num_classes, criterion, criterion_aux): print("Model : DeepLabv3+, Backbone : ResNet-18") return DeepV3Plus(num_classes, trunk='resnet-18', criterion=criterion, criterion_aux=criterion_aux, variant='D32', skip='m1', args=args) def DeepR50V3PlusD_OS8(args, num_classes, criterion, criterion_aux): print("Model : DeepLabv3+, Backbone : ResNet-50") return DeepV3Plus(num_classes, trunk='resnet-50', criterion=criterion, criterion_aux=criterion_aux, variant='D', skip='m1', args=args) def DeepR50V3PlusD(args, num_classes, criterion, criterion_aux): print("Model : DeepLabv3+, Backbone : ResNet-50") return DeepV3Plus(num_classes, trunk='resnet-50', criterion=criterion, criterion_aux=criterion_aux, variant='D16', skip='m1', args=args) def DeepR101V3PlusD(args, num_classes, criterion, criterion_aux): print("Model : DeepLabv3+, Backbone : ResNet-101") return DeepV3Plus(num_classes, trunk='resnet-101', criterion=criterion, criterion_aux=criterion_aux, variant='D16', skip='m1', args=args) def DeepR101V3PlusD_OS8(args, num_classes, criterion, criterion_aux): print("Model : DeepLabv3+, Backbone : ResNet-101") return DeepV3Plus(num_classes, trunk='resnet-101', criterion=criterion, criterion_aux=criterion_aux, variant='D', skip='m1', args=args) def DeepR152V3PlusD_OS8(args, num_classes, criterion, criterion_aux): print("Model : DeepLabv3+, Backbone : ResNet-152") return DeepV3Plus(num_classes, trunk='resnet-152', criterion=criterion, criterion_aux=criterion_aux, variant='D', skip='m1', args=args) def DeepResNext50V3PlusD(args, num_classes, criterion, criterion_aux): print("Model : DeepLabv3+, Backbone : ResNext-50 32x4d") return DeepV3Plus(num_classes, trunk='resnext-50', criterion=criterion, criterion_aux=criterion_aux, variant='D16', skip='m1', args=args) def DeepResNext101V3PlusD(args, num_classes, criterion, criterion_aux): print("Model : DeepLabv3+, Backbone : ResNext-101 32x8d") return DeepV3Plus(num_classes, trunk='resnext-101', criterion=criterion, criterion_aux=criterion_aux, variant='D16', skip='m1', args=args) def DeepWideResNet50V3PlusD(args, num_classes, criterion, criterion_aux): print("Model : DeepLabv3+, Backbone : wide_resnet-50") return DeepV3Plus(num_classes, trunk='wide_resnet-50', criterion=criterion, criterion_aux=criterion_aux, variant='D16', skip='m1', args=args) def DeepWideResNet50V3PlusD_OS8(args, num_classes, criterion, criterion_aux): print("Model : DeepLabv3+, Backbone : wide_resnet-50") return DeepV3Plus(num_classes, trunk='wide_resnet-50', criterion=criterion, criterion_aux=criterion_aux, variant='D', skip='m1', args=args) def DeepWideResNet101V3PlusD(args, num_classes, criterion, criterion_aux): print("Model : DeepLabv3+, Backbone : wide_resnet-101") return DeepV3Plus(num_classes, trunk='wide_resnet-101', criterion=criterion, criterion_aux=criterion_aux, variant='D16', skip='m1', args=args)
Apache License 2.0
sbg/sevenbridges-python
sevenbridges/models/app.py
App.install_app
python
def install_app(cls, id, raw, api=None, raw_format=None): api = api if api else cls._API raw_format = raw_format.lower() if raw_format else AppRawFormat.JSON extra = { 'resource': cls.__name__, 'query': { 'id': id, 'data': raw } } logger.info('Installing app', extra=extra) if raw_format not in cls._CONTENT_TYPE.keys(): raise SbgError(f'Unsupported raw data format: "{raw_format}".') headers = {'Content-Type': cls._CONTENT_TYPE[raw_format]} app = api.post( url=cls._URL['raw'].format(id=id), data=raw, headers=headers, ).json() app_wrapper = api.get( url=cls._URL['get'].format(id=app['sbg:id'])).json() return App(api=api, **app_wrapper)
Installs and app. :param id: App identifier. :param raw: Raw cwl data. :param api: Api instance. :param raw_format: Format of raw app data being sent, json by default :return: App object.
https://github.com/sbg/sevenbridges-python/blob/b3e14016066563470d978c9b13e1a236a41abea8/sevenbridges/models/app.py#L104-L136
import logging import re from sevenbridges.meta.fields import ( DictField, HrefField, StringField, IntegerField, ) from sevenbridges.errors import SbgError from sevenbridges.meta.resource import Resource from sevenbridges.meta.transformer import Transform from sevenbridges.models.enums import AppRawFormat, AppCopyStrategy logger = logging.getLogger(__name__) class App(Resource): _URL = { 'query': '/apps', 'get': '/apps/{id}', 'get_revision': '/apps/{id}/{revision}', 'create_revision': '/apps/{id}/{revision}/raw', 'copy': '/apps/{id}/actions/copy', 'sync': '/apps/{id}/actions/sync', 'raw': '/apps/{id}/raw' } _CONTENT_TYPE = { AppRawFormat.JSON: 'application/json', AppRawFormat.YAML: 'application/yaml' } href = HrefField(read_only=True) _id = StringField(read_only=True, name='id') project = StringField(read_only=True) name = StringField(read_only=True) revision = IntegerField(read_only=True) raw = DictField(read_only=False) @property def id(self): _id, _rev = self._id.rsplit('/', 1) if re.match(r'^\d*$', _rev): return _id else: return self._id def __eq__(self, other): if type(other) is not type(self): return False return self is other or self.id == other.id def __str__(self): revision = self.field('revision') if revision: return f'<App: id={self.id} rev={self.revision}>' return f'<App: id={self.id}' @classmethod def query(cls, project=None, visibility=None, q=None, id=None, offset=None, limit=None, api=None): if project: project = Transform.to_project(project) api = api or cls._API return super()._query(url=cls._URL['query'], project=project, visibility=visibility, q=q, id=id, offset=offset, limit=limit, api=api) @classmethod def get_revision(cls, id, revision, api=None): api = api if api else cls._API extra = {'resource': cls.__name__, 'query': { 'id': id, 'revision': revision }} logger.info('Get revision', extra=extra) app = api.get(url=cls._URL['get_revision'].format( id=id, revision=revision)).json() return App(api=api, **app) @classmethod
Apache License 2.0
rucio/rucio
lib/rucio/tests/common.py
file_generator
python
def file_generator(size=2, namelen=10): fn = '/tmp/file_' + ''.join(choice(ascii_uppercase) for x in range(namelen)) execute('dd if=/dev/urandom of={0} count={1} bs=1'.format(fn, size)) return fn
Create a bogus file and returns it's name. :param size: size in bytes :returns: The name of the generated file.
https://github.com/rucio/rucio/blob/6a6092798bb8220dec07328d0e3f7f42d1b931cd/lib/rucio/tests/common.py#L87-L94
from __future__ import print_function import contextlib import itertools import json import os import tempfile from random import choice from string import ascii_uppercase import pytest from six import PY3 from rucio.common.config import config_get, config_get_bool, get_config_dirs from rucio.common.utils import generate_uuid as uuid, execute skip_rse_tests_with_accounts = pytest.mark.skipif(not any(os.path.exists(os.path.join(d, 'rse-accounts.cfg')) for d in get_config_dirs()), reason='fails if no rse-accounts.cfg found') skiplimitedsql = pytest.mark.skipif('RDBMS' in os.environ and os.environ['RDBMS'] == 'sqlite', reason="does not work in SQLite because of missing features") def get_long_vo(): vo_name = 'def' if config_get_bool('common', 'multi_vo', raise_exception=False, default=False): vo_name = config_get('client', 'vo', raise_exception=False, default=None) return vo_name def account_name_generator(): return 'jdoe-' + str(uuid()).lower()[:16] def scope_name_generator(): return 'mock_' + str(uuid()).lower()[:16] def rse_name_generator(size=10): return 'MOCK_' + ''.join(choice(ascii_uppercase) for x in range(size))
Apache License 2.0
crespo-otero-group/fromage
fromage/io/read_file.py
read_cp2k
python
def read_cp2k(in_name, pop="ESP"): with open(in_name) as cp2k_file: cp2k_content = cp2k_file.readlines() if pop.lower() == "mulliken": start_tag = "Mulliken Population Analysis" char_pos = 4 line_test = lambda x: x.split()[0].isdigit() if pop.lower() == "esp" or pop.lower() == "resp": start_tag = " RESP charges:" char_pos = 3 line_test = lambda x: (x.split()[0] == "RESP" and len(x.split()) == 4) if pop.lower() in ("hirshfeld", "hirsh"): start_tag = "Hirshfeld Charges" char_pos = 5 line_test = lambda x: x.split()[0].isdigit() reading = False charges = [] for line in cp2k_content: if line.strip(): if start_tag in line: reading = True if reading and line_test(line): charges.append(float(line.split()[char_pos])) if "Total" in line: reading = False if "ENERGY|" in line: energy = float(line.split()[8]) cp2k_file.close() return charges, energy
Read the charges and energy in a cp2k output file. Uses CP2K 4.1 formats. Choose between Mulliken, Hirshfeld or RESP charges. Parameters ---------- in_name : str Name of the file to read pop : str Kind of charge to read: mulliken, esp or hirshfeld Returns ------- charges : list of floats Each partial charge value in the file energy : float CP2K calculated energy (Kohn-Sham or otherwise) in Hartree
https://github.com/crespo-otero-group/fromage/blob/9b4a80698ed1672268dde292d5512c72a23cb00a/fromage/io/read_file.py#L194-L245
import numpy as np import fromage.utils.per_table as pt from fromage.utils.mol import Mol from fromage.utils import per_table as per from fromage.utils.atom import Atom from fromage.utils.dimer import Dimer from fromage.utils.volume import CubeGrid def read_vasp(in_name): with open(in_name) as vasp_file: vasp_content = vasp_file.readlines() vec1 = vasp_content[2].split() vec2 = vasp_content[3].split() vec3 = vasp_content[4].split() M = np.zeros((3, 3)) M[0] = vec1 M[1] = vec2 M[2] = vec3 species = vasp_content[5].split() amounts_str = vasp_content[6].split() amounts = map(int, amounts_str) atoms = [] for element in species: firstAt = 8 + sum(amounts[:species.index(element)]) lastAt = 8 + sum(amounts[:species.index(element) + 1]) for line in vasp_content: if vasp_content.index(line) in range(firstAt, lastAt): xAtom, yAtom, zAtom = map(float, line.split()) atoms.append(Atom(element, xAtom, yAtom, zAtom)) return M, atoms def read_xyz(in_name): with open(in_name) as xyz_file: xyz_content = xyz_file.readlines() atom_step = [] for i, line in enumerate(xyz_content): if line.strip(): if line.split()[0].isdigit(): atoms = [] for line_in_step in xyz_content[i + 2:i + int(line) + 2]: elemAtom = line_in_step.split()[0] xAtom, yAtom, zAtom = map(float, line_in_step.split()[1:]) atoms.append(Atom(elemAtom, xAtom, yAtom, zAtom)) atom_step.append(atoms) xyz_file.close() return atom_step def read_pos(in_name): atoms = read_xyz(in_name)[-1] return atoms def mol_from_file(in_name, bonding='', vectors=np.zeros((3, 3))): mol = Mol(read_pos(in_name)) mol.vectors = vectors mol.set_bonding_str(bonding) return mol def dimer_from_file(in_name, bonding=''): double_mol = mol_from_file(in_name) mol_a, mol_b = double_mol.split_in_half() dim = Dimer(mol_a, mol_b) return dim
MIT License
rodionovd/machobot
machobot/common/fileutils.py
save_macho
python
def save_macho(macho, path): f = open(path, "r+") return macho.write(f)
Serializes the provided Mach-O object to a file at the given path.
https://github.com/rodionovd/machobot/blob/60e10b63c2538a73dc8ec3ce636b3ed5bf09f524/machobot/common/fileutils.py#L10-L19
from macholib.MachO import MachO
MIT License
marrink-lab/polyply_1.0
polyply/src/top_parser.py
TOPDirector.finalize
python
def finalize(self, lineno=0): if self.current_itp: self.itp_lines.append(self.current_itp) if self.current_meta is not None: raise IOError("Your {} section is orderd incorrectly." "There is no #endif for this pragma.".format(self.current_meta)) for lines in self.itp_lines: read_itp(lines, self.force_field) total_count = 0 _make_edges(self.force_field) for mol_name, n_mol in self.molecules: block = self.force_field.blocks[mol_name] graph = MetaMolecule._block_graph_to_res_graph(block) for idx in range(0, int(n_mol)): graph_copy = graph.copy(as_view=False) new_mol = MetaMolecule(graph_copy, force_field=self.force_field, mol_name=mol_name) new_mol.molecule = self.force_field.blocks[mol_name].to_molecule() new_mol.mol_name = mol_name self.topology.add_molecule(new_mol) self.topology.mol_idx_by_name[mol_name].append(total_count) total_count += 1 super().finalize()
Called at the end of the file and checks that all pragmas are closed before calling the parent method.
https://github.com/marrink-lab/polyply_1.0/blob/4e48f86fb309b38391c73d8f9bcc1f7c6090d2cf/polyply/src/top_parser.py#L225-L258
import os import copy from itertools import zip_longest from vermouth.parser_utils import SectionLineParser from vermouth.molecule import Interaction from vermouth.gmx.itp_read import read_itp from .meta_molecule import MetaMolecule, _make_edges from tqdm import tqdm class TOPDirector(SectionLineParser): COMMENT_CHAR = ';' atom_idxs = {'bonds': [0, 1], 'bondtypes':[0, 1], 'position_restraints': [0], 'angles': [0, 1, 2], 'angletypes':[0, 1, 2], 'constraints': [0, 1], 'constrainttypes': [0, 1], 'dihedrals': [0, 1, 2, 3], 'dihedraltypes': [0, 1, 2, 3], 'pairs': [0, 1], 'pairtypes': [0, 1], 'exclusions': [slice(None, None)], 'virtual_sitesn': [0, slice(2, None)], 'virtual_sites2': [0, 1, 2, 3], 'virtual_sites3': [0, 1, 2, 3], 'pairs_nb': [0, 1], 'SETTLE': [0], 'virtual_sites4': [slice(0, 5)], 'distance_restraints': [0, 1], 'dihedral_restraints': [slice(0, 4)], 'orientation_restraints': [0, 1], 'angle_restraints': [slice(0, 4)], 'angle_restraints_z': [0, 1]} def __init__(self, topology, cwdir=None): super().__init__() self.force_field = topology.force_field self.topology = topology self.current_meta = None self.current_itp = None self.itp_lines = [] self.molecules = [] self.cwdir = cwdir self.header_actions = { ('moleculetype',): self._new_itp } self.pragma_actions = { '#define': self.parse_define, '#include': self.parse_include } def dispatch(self, line): if self.is_pragma(line): return self.parse_top_pragma elif self.is_star_comment(line): return self._skip else: return super().dispatch(line) @staticmethod def is_pragma(line): return line.startswith('#') @staticmethod def is_star_comment(line): return line.startswith('*') def parse_top_pragma(self, line, lineno=0): if line == '#endif': if self.current_itp: self.current_itp.append(line) elif self.current_meta is None: raise IOError("Your #ifdef section is orderd incorrectly." "At line {} I read {} but I haven not read" "an #ifdef before.".format(lineno, line)) else: self.current_meta = None elif line.startswith("#else"): if self.current_itp: self.current_itp.append(line) elif self.current_meta is None: raise IOError("Your #ifdef section is orderd incorrectly." "At line {} I read {} but I haven not read" "a ifdef before.".format(lineno, line)) else: inverse = {"ifdef": "ifndef", "ifndef": "ifdef"} tag = self.current_meta["tag"] condition = inverse[self.current_meta["condition"]] self.current_meta = {'tag': tag, 'condition': condition} elif line.startswith("#ifdef") or line.startswith("#ifndef"): if self.current_itp: self.current_itp.append(line) elif self.current_meta is None: condition, tag = line.split() self.current_meta = {'tag': tag, 'condition': condition.replace("#", "")} elif self.current_meta is not None: raise IOError("Your {} section is orderd incorrectly." "At line {} I read {} but there is still" "an open #ifdef/#ifndef section from" "before.".format(self.current_meta['tag'], lineno, line.split()[0])) elif line.split()[0] in self.pragma_actions: action = self.pragma_actions[line.split()[0]] action(line) else: raise IOError("Don't know how to parse pargma {} at" "line {}.".format(line, lineno)) def parse_header(self, line, lineno=0): result = super().parse_header(line, lineno) action = self.header_actions.get(tuple(self.section)) if action: action() if self.current_itp is not None: self.current_itp.append(line) return result
Apache License 2.0
saleweaver/python-amazon-sp-api
sp_api/api/services/services.py
Services.add_appointment_for_service_job_by_service_job_id
python
def add_appointment_for_service_job_by_service_job_id(self, serviceJobId, **kwargs) -> ApiResponse: return self._request(fill_query_params(kwargs.pop('path'), serviceJobId), data=kwargs)
add_appointment_for_service_job_by_service_job_id(self, serviceJobId, **kwargs) -> ApiResponse Adds an appointment to the service job indicated by the service job identifier you specify. **Usage Plan:** | Rate (requests per second) | Burst | | ---- | ---- | | 5 | 20 | For more information, see "Usage Plans and Rate Limits" in the Selling Partner API documentation. Args: serviceJobId:string | * REQUIRED An Amazon defined service job identifier. body: | * REQUIRED {'description': 'Input for add appointment operation.', 'properties': {'appointmentTime': {'$ref': '#/definitions/AppointmentTimeInput', 'description': 'Input appointment time details.'}}, 'required': ['appointmentTime'], 'type': 'object'} Returns: ApiResponse:
https://github.com/saleweaver/python-amazon-sp-api/blob/63534b7b8f8477e5c4ecfe9c77a2e35018afc79c/sp_api/api/services/services.py#L150-L175
import urllib.parse from sp_api.base import Client, sp_endpoint, fill_query_params, ApiResponse class Services(Client): @sp_endpoint('/service/v1/serviceJobs/{}', method='GET') def get_service_job_by_service_job_id(self, serviceJobId, **kwargs) -> ApiResponse: return self._request(fill_query_params(kwargs.pop('path'), serviceJobId), params=kwargs) @sp_endpoint('/service/v1/serviceJobs/{}/cancellations', method='PUT') def cancel_service_job_by_service_job_id(self, serviceJobId, **kwargs) -> ApiResponse: return self._request(fill_query_params(kwargs.pop('path'), serviceJobId), data=kwargs) @sp_endpoint('/service/v1/serviceJobs/{}/completions', method='PUT') def complete_service_job_by_service_job_id(self, serviceJobId, **kwargs) -> ApiResponse: return self._request(fill_query_params(kwargs.pop('path'), serviceJobId), data=kwargs) @sp_endpoint('/service/v1/serviceJobs', method='GET') def get_service_jobs(self, **kwargs) -> ApiResponse: return self._request(kwargs.pop('path'), params=kwargs) @sp_endpoint('/service/v1/serviceJobs/{}/appointments', method='POST')
MIT License
testproject-io/python-sdk
src/testproject/rest/messages/drivercommandreport.py
DriverCommandReport.to_json
python
def to_json(self): payload = { "commandName": self.command, "commandParameters": self.command_params, "result": self.result, "passed": self.passed, "message": self.message, "screenshot": self.screenshot, "type": ReportItemType.Command.value, } return payload
Creates a JSON representation of the current DriverCommandReport instance Returns: dict: JSON representation of the current instance
https://github.com/testproject-io/python-sdk/blob/efae0de04936bacc5358e3cdc3b86fe6c14434fb/src/testproject/rest/messages/drivercommandreport.py#L95-L111
from src.testproject.rest.messages.reportitemtype import ReportItemType class DriverCommandReport: def __init__( self, command: str, command_params: dict, result: dict, passed: bool, screenshot: str = None, message: str = None, ): self._command = command self._command_params = command_params self._result = result self._passed = passed self._screenshot = screenshot self._message = message @property def command(self) -> str: return self._command @property def command_params(self) -> dict: return self._command_params @property def result(self) -> dict: return self._result @property def passed(self) -> bool: return self._passed @property def screenshot(self) -> str: return self._screenshot @screenshot.setter def screenshot(self, value: str): self._screenshot = value @property def message(self) -> str: return self._message @message.setter def message(self, value: str): self._message = value
Apache License 2.0
pyansys/pydpf-core
ansys/dpf/core/elements.py
Elements.element_by_id
python
def element_by_id(self, id) -> Element: return self.__get_element(elementid=id)
Retrieve an element by element ID. Parameters ---------- id : int Number (ID) of the element. Returns ------- Element Element object.
https://github.com/pyansys/pydpf-core/blob/014cb8b784ad511028058a2adfd198c767c1bac8/ansys/dpf/core/elements.py#L242-L256
from enum import Enum import numpy as np from ansys.dpf.core import field, nodes, property_field, scoping from ansys.dpf.core.common import __write_enum_doc__, locations from ansys.dpf.core.element_descriptor import ElementDescriptor from ansys.dpf.core.errors import protect_grpc from ansys.grpc.dpf import meshed_region_pb2 class Element: def __init__(self, mesh, elementid, index, nodes): self._id = elementid self._index = index self._nodes = nodes self._mesh = mesh @property def node_ids(self): return [node.id for node in self._nodes] @property def id(self) -> int: return self._id @property def index(self) -> int: return self._index @property def nodes(self): return self._nodes @property def n_nodes(self) -> int: return len(self._nodes) def __str__(self): txt = "DPF Element %d\n" % self.id txt += "\tIndex: %7d\n" % self.index txt += "\tNodes: %7d\n" % self.n_nodes txt += f"\tType: {self.type}\n" txt += "\tShape: %7s\n" % self.shape.capitalize() return txt @property def type(self) -> int: return self._get_type() @protect_grpc def _get_type(self): request = meshed_region_pb2.ElementalPropertyRequest() request.mesh.CopyFrom(self._mesh._message) request.index = self.index request.property = meshed_region_pb2.ELEMENT_TYPE return element_types(self._mesh._stub.GetElementalProperty(request).prop) @property def shape(self) -> str: return self._get_shape() @protect_grpc def _get_shape(self): request = meshed_region_pb2.ElementalPropertyRequest() request.mesh.CopyFrom(self._mesh._message) request.index = self.index request.property = meshed_region_pb2.ELEMENT_SHAPE prop = self._mesh._stub.GetElementalProperty(request).prop return meshed_region_pb2.ElementShape.Name(prop).lower() @property def connectivity(self): list = [] for node in self._nodes: list.append(node.index) return list class Elements: def __init__(self, mesh): self._mesh = mesh self._mapping_id_to_index = None def __str__(self): return "DPF Elements object with %d elements" % len(self) def __getitem__(self, index): return self.element_by_index(index) def __len__(self): return self.n_elements def __iter__(self): for i in range(len(self)): yield self[i]
MIT License
hackatbrown/2015.hackatbrown.org
hack-at-brown-2015/cssutils/helper.py
path2url
python
def path2url(path): return u'file:' + urllib.pathname2url(os.path.abspath(path))
Return file URL of `path`
https://github.com/hackatbrown/2015.hackatbrown.org/blob/6e6e10b010421228deb562909a1c8bb4272b759f/hack-at-brown-2015/cssutils/helper.py#L54-L56
__docformat__ = 'restructuredtext' __version__ = '$Id: errorhandler.py 1234 2008-05-22 20:26:12Z cthedot $' import os import re import sys import urllib class Deprecated(object): def __init__(self, msg): self.msg = msg def __call__(self, func): def newFunc(*args, **kwargs): import warnings warnings.warn("Call to deprecated method %r. %s" % (func.__name__, self.msg), category=DeprecationWarning, stacklevel=2) return func(*args, **kwargs) newFunc.__name__ = func.__name__ newFunc.__doc__ = func.__doc__ newFunc.__dict__.update(func.__dict__) return newFunc _simpleescapes = re.compile(ur'(\\[^0-9a-fA-F])').sub def normalize(x): if x: def removeescape(matchobj): return matchobj.group(0)[1:] x = _simpleescapes(removeescape, x) return x.lower() else: return x
MIT License
apache/allura
Allura/allura/model/repository.py
RepositoryImplementation.log
python
def log(self, revs=None, path=None, exclude=None, id_only=True, **kw): raise NotImplementedError('log')
Returns a generator that returns information about commits reachable by revs. revs can be None or a list or tuple of identifiers, each of which can be anything parsable by self.commit(). If revs is None, the default branch head will be used. If path is not None, only commits which modify files under path will be included. Exclude can be None or a list or tuple of identifiers, each of which can be anything parsable by self.commit(). If not None, then any revisions reachable by any of the revisions in exclude will not be included. If id_only is True, returns only the commit ID (which can be faster), otherwise it returns detailed information about each commit.
https://github.com/apache/allura/blob/04f14f15a9a9364e18c61f68acdaa241a470186b/Allura/allura/model/repository.py#L150-L170
from __future__ import unicode_literals from __future__ import absolute_import import json import os import stat from operator import itemgetter import mimetypes import logging import string import re from subprocess import Popen, PIPE from hashlib import sha1 from datetime import datetime, timedelta from time import time from collections import defaultdict, OrderedDict from six.moves.urllib.parse import urljoin from threading import Thread from six.moves.queue import Queue from itertools import chain, islice from difflib import SequenceMatcher import typing import tg from paste.deploy.converters import asint, asbool from tg import tmpl_context as c from tg import app_globals as g import pymongo import pymongo.errors import bson import six from ming import schema as S from ming import Field, collection, Index from ming.utils import LazyProperty from ming.odm import FieldProperty, session, Mapper, mapper, MappedClass from ming.base import Object from allura.lib import helpers as h from allura.lib import utils from allura.lib.security import has_access from .artifact import Artifact, VersionedArtifact from .auth import User from .timeline import ActivityObject from .monq_model import MonQTask from .project import AppConfig from .session import main_doc_session from .session import repository_orm_session from io import open from six.moves import range if typing.TYPE_CHECKING: from ming.odm.mapper import Query log = logging.getLogger(__name__) config = utils.ConfigProxy( common_suffix='forgemail.domain', ) README_RE = re.compile('^README(\.[^.]*)?$', re.IGNORECASE) VIEWABLE_EXTENSIONS = frozenset([ '.php', '.py', '.js', '.java', '.html', '.htm', '.yaml', '.sh', '.rb', '.phtml', '.txt', '.bat', '.ps1', '.xhtml', '.css', '.cfm', '.jsp', '.jspx', '.pl', '.php4', '.php3', '.rhtml', '.svg', '.markdown', '.json', '.ini', '.tcl', '.vbs', '.xsl']) SUser = dict(name=str, email=str, date=datetime) SObjType = S.OneOf('blob', 'tree', 'submodule') QSIZE = 100 BINARY_EXTENSIONS = frozenset([ ".3ds", ".3g2", ".3gp", ".7z", ".a", ".aac", ".adp", ".ai", ".aif", ".apk", ".ar", ".asf", ".au", ".avi", ".bak", ".bin", ".bk", ".bmp", ".btif", ".bz2", ".cab", ".caf", ".cgm", ".cmx", ".cpio", ".cr2", ".dat", ".deb", ".djvu", ".dll", ".dmg", ".dng", ".doc", ".docx", ".dra", ".DS_Store", ".dsk", ".dts", ".dtshd", ".dvb", ".dwg", ".dxf", ".ecelp4800", ".ecelp7470", ".ecelp9600", ".egg", ".eol", ".eot", ".epub", ".exe", ".f4v", ".fbs", ".fh", ".fla", ".flac", ".fli", ".flv", ".fpx", ".fst", ".fvt", ".g3", ".gif", ".gz", ".h261", ".h263", ".h264", ".ico", ".ief", ".img", ".ipa", ".iso", ".jar", ".jpeg", ".jpg", ".jpgv", ".jpm", ".jxr", ".ktx", ".lvp", ".lz", ".lzma", ".lzo", ".m3u", ".m4a", ".m4v", ".mar", ".mdi", ".mid", ".mj2", ".mka", ".mkv", ".mmr", ".mng", ".mov", ".movie", ".mp3", ".mp4", ".mp4a", ".mpeg", ".mpg", ".mpga", ".mxu", ".nef", ".npx", ".o", ".oga", ".ogg", ".ogv", ".otf", ".pbm", ".pcx", ".pdf", ".pea", ".pgm", ".pic", ".png", ".pnm", ".ppm", ".psd", ".pya", ".pyc", ".pyo", ".pyv", ".qt", ".rar", ".ras", ".raw", ".rgb", ".rip", ".rlc", ".rz", ".s3m", ".s7z", ".scpt", ".sgi", ".shar", ".sil", ".smv", ".so", ".sub", ".swf", ".tar", ".tbz2", ".tga", ".tgz", ".tif", ".tiff", ".tlz", ".ttf", ".uvh", ".uvi", ".uvm", ".uvp", ".uvs", ".uvu", ".viv", ".vob", ".war", ".wav", ".wax", ".wbmp", ".wdp", ".weba", ".webm", ".webp", ".whl", ".wm", ".wma", ".wmv", ".wmx", ".woff", ".woff2", ".wvx", ".xbm", ".xif", ".xm", ".xpi", ".xpm", ".xwd", ".xz", ".z", ".zip", ".zipx" ]) PYPELINE_EXTENSIONS = frozenset(utils.MARKDOWN_EXTENSIONS + ['.rst', '.textile', '.creole']) DIFF_SIMILARITY_THRESHOLD = .5 class RepositoryImplementation(object): def init(self): raise NotImplementedError('init') def clone_from(self, source_url): raise NotImplementedError('clone_from') def commit(self, revision): raise NotImplementedError('commit') def all_commit_ids(self): raise NotImplementedError('all_commit_ids') def new_commits(self, all_commits=False): raise NotImplementedError('new_commits') def commit_parents(self, commit): raise NotImplementedError('commit_parents') def refresh_commit_info(self, oid, lazy=True): raise NotImplementedError('refresh_commit_info') def _setup_hooks(self, source_path=None): raise NotImplementedError('_setup_hooks')
Apache License 2.0
shreybatra/django-mongo-boilerplate
boilerplate/middlewares/request_validation.py
validate_json_body
python
def validate_json_body(body, schema): errors = [] validator = Draft4Validator(schema) for error in sorted(validator.iter_errors(body)): errors.append(error.message) return errors if errors else None
This method is used to validate request body against defined schema. Args: body: JSON like dictionary request body schema: JSON like dictionary schema for sent request body Returns: List of errors against each key in json request body Example: [ { 3 is not of type 'string', {} is not of type 'string' } ]
https://github.com/shreybatra/django-mongo-boilerplate/blob/7f522aca9b0fd605dc1f330fc06d1cce8c154d87/boilerplate/middlewares/request_validation.py#L371-L396
import json import re from datetime import datetime from jsonschema import Draft4Validator from bson.objectid import ObjectId from commons.utils.http_error import BadRequest from django.urls import resolve from .helpers import RequestValidationConfig class RequestValidationMiddleware: def __init__(self, get_response): self.get_response = get_response def __call__(self, request): request_parameters = resolve(request.path_info) request_info = { 'route_name': request_parameters.url_name, 'url_parameters': request_parameters.kwargs, 'query_parameters': dict(request.GET), 'request_body': request.body if request.body else {}, 'method': request.method } request_validator(request_info) response = self.get_response(request) return response class ValidateParamType(object): def __init__(self, document, value): self.__document = document self.__value = value self.__validation_types = { 'STRING': self.__validate_string_params, 'INTEGER': self.__validate_integer_params, 'OBJECT_ID': self.__validate_object_id_params, 'FLOAT': self.__validate_float_params, 'DATE': self.__validate_date_params } def validate(self): if not self.__validation_types.get(self.__document.get('dataType')): return [{ "message": self.__document.get('name') + " has unknown data type: " + self.__document.get('dataType') }] return self.__validation_types.get(self.__document.get('dataType'))() def __validate_integer_params(self): errors = [] if str(self.__value).isdigit(): if self.__document.get('action'): action_errors = self.__validate_param_constraint( self.__document, int(self.__value) ) if action_errors: errors += action_errors else: errors.append({ "message": self.__document.get('name') + " must be of integer type" }) return errors def __validate_float_params(self): errors = [] if re.match("^\d+?\.\d+?$", str(self.__value)) is None: error_obj = { "message": self.__document.get('name') + " must be of float type" } errors.append(error_obj) else: if self.__document.get('action'): action_errors = self.__validate_param_constraint(self.__document, float(self.__value)) if action_errors: errors += action_errors return errors def __validate_object_id_params(self): errors = [] if ObjectId.is_valid(str(self.__value)): if self.__document.get('action'): action_errors = self.__validate_param_constraint(self.__document, self.__value) if action_errors: errors += action_errors else: error_obj = { "message": self.__document.get('name') + " must be of type ObjectId" } errors.append(error_obj) return errors def __validate_string_params(self): errors = [] if isinstance(self.__value, str): if self.__document.get('regex') and re.match(self.__document.get('regex'), self.__value) is None: error_obj = { "message": self.__document.get('name') + " must follow regex " + self.__document.get('regex') } errors.append(error_obj) else: if self.__document.get('action'): action_errors = self.__validate_param_constraint(self.__document, self.__value) if action_errors: errors += action_errors else: errors.append({ "message": self.__document.get('name') + " must be of string type" }) return errors def __validate_date_params(self): errors = [] try: date = datetime.strptime(str(self.__value), str(self.__document.get('format'))) except ValueError: errors.append({ "message": (self.__document.get('name') + " must be of date type with format " + self.__document.get('format')) }) return errors if self.__document.get('action'): action_errors = self.__validate_param_constraint(self.__document, date) if action_errors: errors += action_errors return errors def __validate_param_constraint(self, param_info, param_value): error = [] action_type = self.__document['action'].get('actionType') values = self.__document['action'].get('value') if action_type == 'BETWEEN': if not(param_value >= values.get('min') and param_value <= values.get('max')): error.append({ "message": param_info.get('name') + " out of range", "expectedRange": { "min": str(values.get('min')), "max": str(values.get('max')) } }) elif action_type == 'EQUALS': if param_value != values: error.append({ "message": param_info.get('name') + " incorrect value", "expectedValue": values }) elif action_type == 'IN': if param_value not in values: error.append({ "message": param_info.get('name') + " incorrect value", "expectedValues": values }) elif action_type == 'GREATER_THAN': if param_value <= values: error.append({ "message": param_info.get('name') + " should be greater than" + values, }) elif action_type == 'LESS_THAN': if param_value >= values: error.append({ "message": param_info.get('name') + " should be less than" + values, }) return error def validate_params(param_schema, request_info, param_type): response = [] for doc in param_schema: value = None if param_type == "urlParams": value = request_info['url_parameters'].get(str(doc.get('name'))) elif param_type == "queryParams": value = request_info['query_parameters'].get(str(doc.get('name')), [''])[0] if not value and doc.get('isRequired'): response.append({ "message": doc.get('name') + " param is manadatory", "type": doc.get('dataType') }) if value: validation_status = ValidateParamType(doc, value).validate() if validation_status: response += validation_status return response if response else None
MIT License
rgtjf/semantic-texual-similarity-toolkits
stst/utils.py
euclidean_distance
python
def euclidean_distance(v1, v2, norm=False): v1, v2 = check_pairwise_vector(v1, v2, norm) diff = v1 - v2 K = np.sqrt((diff ** 2).sum()) return K
return ||v1 - v2||_2
https://github.com/rgtjf/semantic-texual-similarity-toolkits/blob/7ef271e4e4ca55330b31bce06368274c2ddbe3a9/stst/utils.py#L617-L624
from __future__ import print_function import datetime import io import time import csv, math import codecs import logging import configparser from functools import wraps from collections import Counter import numpy as np import os import pickle import six import array import pyprind from stst.libs.kernel import vector_kernel as vk logger = logging.getLogger(__name__) def fn_timer(function): @wraps(function) def function_timer(*args, **kwargs): t0 = time.time() result = function(*args, **kwargs) t1 = time.time() print("total time running %s: %s seconds" % (function.__name__, str(t1 - t0))) return result return function_timer def singleton(cls): instances = {} def _singleton(*args, **kwargs): if (cls, args) not in instances: instances[(cls, args)] = cls(*args, **kwargs) return instances[(cls, args)] return _singleton @fn_timer def Test(): pass @singleton class SingletonTest(object): pass def get_time_name(prefix): time_str = datetime.datetime.now().strftime('_%m%d_%H_%M') return prefix + time_str def get_logger(file_name): logger = logging.getLogger() logger.setLevel(logging.INFO) formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s", "%m-%d %H:%M") fh = logging.FileHandler(file_name) fh.setFormatter(formatter) logger.addHandler(fh) ch = logging.StreamHandler() ch.setFormatter(formatter) logger.addHandler(ch) return logger def get_config(config_file): config = configparser.ConfigParser(allow_no_value=True, interpolation=configparser.ExtendedInterpolation()) config.read(config_file) return config class DictVocab(object): @staticmethod def load_from_file(file_path, sep='\t'): vocab = {} with create_read_file(file_path) as f: for idx, line in enumerate(f): items = line.strip().split(sep) if len(items) == 1: vocab[items[0]] = idx elif len(items) == 2: vocab[items[0]] = items[1] else: raise NotImplementedError print('load from FILE {}'.format(file_path)) return vocab @staticmethod def dump_to_file(vocab, file_path, sep='\t', sort_by_key=True, reverse=False): with create_write_file(file_path) as fw: items = vocab.items() if sort_by_key: keys = sorted(items, cmp=lambda x: x[0], reverse=reverse) else: keys = sorted(items, cmp=lambda x: x[1], reverse=reverse) for key in keys: print("{}\t{}".format(key, vocab[key]), file=fw) print('dump to FILE {}'.format(file_path)) def split_abbreviation(word): res = [] char = '' for ch in word: if char != '' and char[-1].islower() and ch.isupper(): res.append(char) char = '' char += ch if char != '': res.append(char) return res def word2char(word_list): if type(word_list) is six.text_type: word_list = word_list.split() char_list = [] word_string = ''.join(word_list) char = '' for ch in word_string: if ord(ch) < 128: char += ch else: if char != '': char_list += split_abbreviation(char) char = '' char_list.append(ch) if char != '': char_list += split_abbreviation(char) return char_list def word2index(word_list): if type(word_list) is list: vocab = {word:i for i, word in enumerate(word_list)} elif type(word_list) is dict: vocab = {word:i for i, word in enumerate(word_list.keys())} else: raise NotImplementedError return vocab def pos2tag(pos): if pos in ['NN', 'NNS', 'NNP', 'NNPS']: pos = 'n' elif pos in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']: pos = 'v' elif pos in ['JJ', 'JJR', 'JJS']: pos = 'a' elif pos in ['RB', 'RBR', 'RBS']: pos = 'r' else: pos = '#' return pos def idf_calculator(sentence_list, min_cnt=1): doc_num = 0 word_list = [] for sequence in sentence_list: word_list += sequence doc_num += 1 word_count = Counter() for word in word_list: word_count[word] += 1 idf_dict = {} good_keys = [v for v in word_count.keys() if word_count[v] >= min_cnt] for key in good_keys: idf_dict[key] = word_count[key] for key in idf_dict.keys(): idf_dict[key] = math.log(float(doc_num) / float(idf_dict[key])) / math.log(10) return idf_dict def vectorize(sentence, idf_weight, vocab, convey='idf'): vec = np.zeros(len(vocab), dtype=np.float32) for word in sentence: if word not in vocab: continue if convey == 'idf': vec[vocab[word]] += idf_weight[word] elif convey == 'count': vec[vocab[word]] += 1 else: raise NotImplementedError return vec def vector_similarity(vec1, vec2, normlize=True): if normlize: vec1 = vk.normalize(vec1) vec2 = vk.normalize(vec2) return vk.get_all_kernel(vec1, vec2) def sentence_match_features(seq1, seq2): features, infos = [], [] features.append(jaccrad_coeff(seq1, seq2)) features.append(dice_coeff(seq1, seq2)) features.append(overlap_coeff(seq1, seq2)) features.append(overlap_coeff(seq1, seq2)) features.append(overlap_f1(seq1, seq2)) infos += ['jaccrad_coeff', 'dice_coeff', 'overlap_coeff', 'overlap_coeff', 'overlap_f1'] return features, infos def sentence_sequence_features(sa, sb): features, infos = [], [] features.append(longest_common_prefix(sa, sb)) features.append(longest_common_suffix(sa, sb)) features.append(longest_common_substring(sa, sb)) features.append(longest_common_sequence(sa, sb)) features.append(levenshtein_disttance(sa, sb)) infos += ['prefix', 'suffix', 'longest_common_substring', 'longest_common_sequence', 'levenshtein_disttance'] return features, infos def sentence_vectorize_features(sa, sb, idf_weight, vocab=None, convey='idf'): if not vocab: vocab = word2index(idf_weight) vec1 = vectorize(sa, idf_weight, vocab, convey) vec2 = vectorize(sb, idf_weight, vocab, convey) features, info = vector_similarity(vec1, vec2) return features, info class FileManager(object): @classmethod def get_file(cls, path): path, file = os.path.split(path) return file @classmethod def get_filename(cls, path): path, file = os.path.split(path) filename = os.path.splitext(file)[0] return filename def write_dict_to_csv(contents_dict, to_file): fieldnames = [] contents = [] with open(to_file, 'w') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() writer.writerows(contents) def create_write_file(file_name, mode='w', encoding='utf8'): path = os.path.split(file_name)[0] if not os.path.exists(path): os.makedirs(path) return codecs.open(file_name, mode, encoding=encoding) def create_read_file(file_name, mode='r', encoding='utf8'): return codecs.open(file_name, mode, encoding=encoding) def check_file_exist(file_path): path = os.path.split(file_path)[0] if not os.path.exists(path): print('===> create path: {}'.format(path)) os.makedirs(path) def check_dir_exist(dir_path): path = dir_path if not os.path.exists(path): print('===> create path: {}'.format(path)) os.makedirs(path) def load_word_embedding(vocab, emb_file, pad_word='__PAD__', unk_word='__UNK__'): if pad_word not in vocab: vocab = {k: v+2 for k, v in vocab.items()} vocab[pad_word] = 0 vocab[unk_word] = 1 pre_trained = {} n_words = len(vocab) embeddings = None itos, vectors, dim = [], array.array(str('d')), None binary_lines = False try: with io.open(emb_file, encoding="utf8") as f: lines = [line for line in f] except: logger.warning("Could not read {} as UTF8 file, " "reading file as bytes and skipping " "words with malformed UTF8.".format(emb_file)) with open(emb_file, 'rb') as f: lines = [line for line in f] binary_lines = True logger.info("Loading vectors from {}".format(emb_file)) process_bar = pyprind.ProgPercent(len(lines)) for line in lines: process_bar.update() entries = line.rstrip().split(b" " if binary_lines else " ") word, entries = entries[0], entries[1:] if dim is None and len(entries) > 1: dim = len(entries) embeddings = np.random.uniform(-0.25, 0.25, (n_words, dim)) embeddings[0,] = np.zeros(dim) elif len(entries) == 1: logger.warning("Skipping token {} with 1-dimensional " "vector {}; likely a header".format(word, entries)) continue elif dim != len(entries): raise RuntimeError( "Vector for token {} has {} dimensions, but previously " "read vectors have {} dimensions. All vectors must have " "the same number of dimensions.".format(word, len(entries), dim)) if binary_lines: try: if isinstance(word, six.binary_type): word = word.decode('utf-8') except: logger.info("Skipping non-UTF8 token {}".format(repr(word))) continue if word in vocab and word not in pre_trained: embeddings[vocab[word]] = [float(x) for x in entries] pre_trained[word] = 1 pre_trained_len = len(pre_trained) print('Pre-trained: {}/{} {:.2f}'.format(pre_trained_len, n_words, pre_trained_len * 100.0 / n_words)) oov_word_list = [w for w in vocab if w not in pre_trained] print('oov word list example (30): ', oov_word_list[:30]) pickle.dump(oov_word_list, open('./oov.p', 'wb')) embeddings = np.array(embeddings, dtype=np.float32) return vocab, embeddings def load_embedding_from_text(emb_file, pad_word='__PAD__', unk_word='__UNK__'): itos, vectors, dim = [], array.array(str('d')), None binary_lines = False try: with io.open(emb_file, encoding="utf8") as f: lines = [line for line in f] except: logger.warning("Could not read {} as UTF8 file, " "reading file as bytes and skipping " "words with malformed UTF8.".format(emb_file)) with open(emb_file, 'rb') as f: lines = [line for line in f] binary_lines = True logger.info("Loading vectors from {}".format(emb_file)) process_bar = pyprind.ProgPercent(len(lines)) for line in lines: process_bar.update() entries = line.rstrip().split(b" " if binary_lines else " ") word, entries = entries[0], entries[1:] if dim is None and len(entries) > 1: dim = len(entries) itos.append(pad_word) vectors.extend(np.zeros(dim, )) itos.append(unk_word) vectors.extend(np.random.uniform(-0.25, 0.25, (dim, ))) elif len(entries) == 1: logger.warning("Skipping token {} with 1-dimensional " "vector {}; likely a header".format(word, entries)) continue elif dim != len(entries): raise RuntimeError( "Vector for token {} has {} dimensions, but previously " "read vectors have {} dimensions. All vectors must have " "the same number of dimensions.".format(word, len(entries), dim)) if binary_lines: try: if isinstance(word, six.binary_type): word = word.decode('utf-8') except: logger.info("Skipping non-UTF8 token {}".format(repr(word))) continue vectors.extend(float(x) for x in entries) itos.append(word) stoi = {word: i for i, word in enumerate(itos)} vectors = np.array(vectors, dtype=np.float32).reshape((-1, dim)) return stoi, vectors def normalize(v): norm = np.linalg.norm(v) if norm == 0: return v return v / norm def check_pairwise_vector(v1, v2, norm): if isinstance(v1, list): v1 = np.array(v1) if isinstance(v2, list): v2 = np.array(v2) if v1.shape != v2.shape: raise ValueError("v1 and v2 should be of same shape. They were " "respectively %r and %r long." % (v1.shape, v2.shape)) if norm: v1 = normalize(v1) v2 = normalize(v2) return v1, v2 def cosine_distance(v1, v2, norm=True): v1, v2 = check_pairwise_vector(v1, v2, norm) cosine = (v1 * v2).sum() if np.isnan(cosine): cosine = 1. return 1. - cosine def manhattan_distance(v1, v2, norm=False): v1, v2 = check_pairwise_vector(v1, v2, norm) diff = v1 - v2 K = np.abs(diff).sum() return K
MIT License
pythainlp/pythainlp
pythainlp/tag/thainer.py
ThaiNameTagger.__init__
python
def __init__(self, version: str = "1.5") -> None: self.crf = CRFTagger() if version == "1.4": self.crf.open(get_corpus_path("thainer-1.4", version="1.4")) self.pos_tag_name = "orchid_ud" else: self.crf.open(get_corpus_path(_CORPUS_NAME, version="1.5")) self.pos_tag_name = "lst20"
Thai named-entity recognizer. :param str version: Thai NER version. It's support Thai NER 1.4 & 1.5. The defualt value is `1.5`
https://github.com/pythainlp/pythainlp/blob/2ae3f7ad77d96136690823f33bb61f0b8610900b/pythainlp/tag/thainer.py#L105-L120
__all__ = ["ThaiNameTagger"] from typing import Dict, List, Tuple, Union import warnings from pycrfsuite import Tagger as CRFTagger from pythainlp.corpus import get_corpus_path, thai_stopwords from pythainlp.tag import pos_tag from pythainlp.tokenize import word_tokenize from pythainlp.util import isthai _CORPUS_NAME = "thainer" _TOKENIZER_ENGINE = "newmm" warnings.warn( """ ThaiNameTagger class is change from pythainlp.tag.named_entity.ThaiNameTagger to pythainlp.tag.thainer.ThaiNameTagger. This old class will be deprecated in PyThaiNLP version 2.5. """, PendingDeprecationWarning ) def _is_stopword(word: str) -> bool: return word in thai_stopwords() def _doc2features(doc, i) -> Dict: word = doc[i][0] postag = doc[i][1] features = { "word.word": word, "word.stopword": _is_stopword(word), "word.isthai": isthai(word), "word.isspace": word.isspace(), "postag": postag, "word.isdigit": word.isdigit(), } if word.isdigit() and len(word) == 5: features["word.islen5"] = True if i > 0: prevword = doc[i - 1][0] prevpostag = doc[i - 1][1] prev_features = { "word.prevword": prevword, "word.previsspace": prevword.isspace(), "word.previsthai": isthai(prevword), "word.prevstopword": _is_stopword(prevword), "word.prevpostag": prevpostag, "word.prevwordisdigit": prevword.isdigit(), } features.update(prev_features) else: features["BOS"] = True if i < len(doc) - 1: nextword = doc[i + 1][0] nextpostag = doc[i + 1][1] next_features = { "word.nextword": nextword, "word.nextisspace": nextword.isspace(), "word.nextpostag": nextpostag, "word.nextisthai": isthai(nextword), "word.nextstopword": _is_stopword(nextword), "word.nextwordisdigit": nextword.isdigit(), } features.update(next_features) else: features["EOS"] = True return features class ThaiNameTagger:
Apache License 2.0
nusiq/mcblend
mcblend/panel.py
MCBLEND_PT_UVGroupPanel.draw_mask_properties
python
def draw_mask_properties( self, mask, index: int, col: bpy.types.UILayout, *, colors=False, interpolate=False, normalize=False, p1p2=False, stripes=False, relative_boundaries=False, expotent=False, strength=False, hard_edge=False, horizontal=False, seed=False,color=False, children=False, mode=False): if colors: self.draw_colors(mask, index, col) if interpolate: col.prop(mask, "interpolate") if normalize: col.prop(mask, "normalize") if p1p2: row = col.row() if mask.relative_boundaries: row.prop(mask, "p1_relative") row = col.row() row.prop(mask, "p2_relative") else: row.prop(mask, "p1") row = col.row() row.prop(mask, "p2") if relative_boundaries: col.prop(mask, "relative_boundaries") if stripes: self.draw_stripes(mask, index, col) if expotent: col.prop(mask, "expotent") if strength: col.row().prop(mask, "strength") if hard_edge: col.prop(mask, "hard_edge") if horizontal: col.prop(mask, "horizontal") if seed: row = col.row() row.prop(mask, "use_seed") if mask.use_seed: row.prop(mask, "seed") if color: col.prop(mask.color, "color") if mode: col.prop(mask, "mode") if children: col.prop(mask, "children")
Draws properties of UV-mask.
https://github.com/nusiq/mcblend/blob/3b2bbfa72b7f24058ae897e74f91a299db9e623e/mcblend/panel.py#L140-L186
from typing import List, Optional from dataclasses import dataclass import bpy from .object_data import EffectTypes from .operator_func.common import MeshType from .operator_func.texture_generator import UvMaskTypes class MCBLEND_UL_UVGroupList(bpy.types.UIList): def draw_item( self, context, layout, data, item, icon, active_data, active_propname): if self.layout_type in {'DEFAULT', 'COMPACT', 'CENTER'}: layout.prop(item, "name", text="", emboss=False) @dataclass class _UIStackItem(): ui: Optional[bpy.types.UILayout] depth: int class MCBLEND_PT_UVGroupPanel(bpy.types.Panel): bl_space_type = 'PROPERTIES' bl_region_type = 'WINDOW' bl_context = 'scene' bl_label = "Mcblend: UV-groups" def draw_colors(self, mask, mask_index: int, col: bpy.types.UILayout): box = col.box() row = box.row() row.label(text='Colors') op_props = row.operator( "mcblend.add_uv_mask_color", text="", icon='ADD') op_props.mask_index = mask_index colors_len = len(mask.colors) for color_index, color in enumerate(mask.colors): row = box.row() row.prop(color, "color", text="") up_down_row = row.row(align=True) if color_index - 1 >= 0: op_props = up_down_row.operator( "mcblend.move_uv_mask_color", icon='TRIA_UP', text='') op_props.mask_index = mask_index op_props.move_from = color_index op_props.move_to = color_index - 1 if color_index + 1 < colors_len: op_props = up_down_row.operator( "mcblend.move_uv_mask_color", icon='TRIA_DOWN', text='') op_props.mask_index = mask_index op_props.move_from = color_index op_props.move_to = color_index + 1 op_props = row.operator( "mcblend.remove_uv_mask_color", icon='X', text='') op_props.mask_index = mask_index op_props.color_index = color_index def draw_stripes(self, mask, mask_index: int, col: bpy.types.UILayout): box = col.box() row = box.row() row.label(text='Stripes') op_props = row.operator( "mcblend.add_uv_mask_stripe", text="", icon='ADD') op_props.mask_index = mask_index stripes_len = len(mask.stripes) for stripe_index, stripe in enumerate(mask.stripes): row = box.row() if ( mask.relative_boundaries and mask.mask_type != UvMaskTypes.GRADIENT_MASK.value): row.prop(stripe, "width_relative") else: row.prop(stripe, "width") row.prop(stripe, "strength") up_down_row = row.row(align=True) if stripe_index - 1 >= 0: op_props = up_down_row.operator( "mcblend.move_uv_mask_stripe", icon='TRIA_UP', text='') op_props.mask_index = mask_index op_props.move_from = stripe_index op_props.move_to = stripe_index - 1 if stripe_index + 1 < stripes_len: op_props = up_down_row.operator( "mcblend.move_uv_mask_stripe", icon='TRIA_DOWN', text='') op_props.mask_index = mask_index op_props.move_from = stripe_index op_props.move_to = stripe_index + 1 op_props = row.operator( "mcblend.remove_uv_mask_stripe", icon='X', text='') op_props.mask_index = mask_index op_props.stripe_index = stripe_index
MIT License
baguasys/bagua
bagua/torch_api/bucket.py
BaguaBucket.check_flatten
python
def check_flatten(self) -> bool: return check_contiguous(self._all_tensors)
Returns: True if the bucket's tensors are contiguous in memory.
https://github.com/baguasys/bagua/blob/6059ebf962858e07d9325afc39a50f5be9fbb31e/bagua/torch_api/bucket.py#L131-L136
from __future__ import annotations from bagua.torch_api.communication import get_backend, _get_default_group from typing import List, Callable, Optional import bagua_core as B import torch from bagua.torch_api.tensor import BaguaTensor from bagua.torch_api.utils import check_contiguous from bagua.torch_api.communication import ( broadcast, BaguaProcessGroup, _bagua_backend_comm, _rank_not_in_comm, ) class BaguaBucket: def __init__( self, tensors: List[BaguaTensor], name: str, flatten: bool, alignment: int = 1 ) -> None: self.tensors = tensors self.bagua_module_name = tensors[0].bagua_module_name for tensor in self.tensors: assert ( self.bagua_module_name == tensor.bagua_module_name ), "every tensor in the same bucket should have the same model name" self._bagua_backend = get_backend(self.bagua_module_name) self.name = name self.padding_tensor = None if alignment > 1: padding = sum(tensor.numel() for tensor in self.tensors) % alignment if padding > 0: padding = alignment - padding self.padding_tensor = torch.zeros( padding, dtype=self.tensors[0].dtype, device=self.tensors[0].device ).to_bagua_tensor("bagua_padding_tensor_bucket_" + name) self._all_tensors = ( self.tensors + [self.padding_tensor] if self.padding_tensor is not None else self.tensors ) self.backend_tensor = None self.flatten = flatten if self.flatten: self._flatten_() torch.cuda.empty_cache() self.backend_bucket = B.BaguaBucketPy( name, [tensor._bagua_backend_tensor for tensor in self._all_tensors] ) for tensor in self._all_tensors: tensor._bagua_bucket = self def flattened_tensor(self) -> BaguaTensor: total_size = 0 for tensor in self._all_tensors: total_size += tensor.numel() flatten_tensor = torch.zeros( total_size, dtype=self._all_tensors[0].dtype, device=self._all_tensors[0].device, ) offset = 0 for tensor in self._all_tensors: flatten_tensor[offset : offset + tensor.numel()] = tensor.data.reshape(-1) offset += tensor.numel() return flatten_tensor def _flatten_(self): if len(self._all_tensors) == 0: return flatten_tensor = self.flattened_tensor() if self.check_flatten(): flatten_tensor.set_(self._all_tensors[0].storage(), 0, flatten_tensor.shape) self.backend_tensor = flatten_tensor return flatten_storage = flatten_tensor.storage() offset = 0 for tensor in self._all_tensors: flatten_tensor[offset : offset + tensor.numel()] = tensor.data.reshape(-1) tensor.bagua_set_storage(flatten_storage, offset) offset += tensor.numel() self.backend_tensor = flatten_tensor assert self.check_flatten()
MIT License
minikimmy/sketchfab_download
bin/py/Lib/ctypes/_aix.py
get_ld_headers
python
def get_ld_headers(file): ldr_headers = [] p = Popen(["/usr/bin/dump", f"-X{AIX_ABI}", "-H", file], universal_newlines=True, stdout=PIPE, stderr=DEVNULL) while True: ld_header = get_ld_header(p) if ld_header: ldr_headers.append((ld_header, get_ld_header_info(p))) else: break p.stdout.close() p.wait() return ldr_headers
Parse the header of the loader section of executable and archives This function calls /usr/bin/dump -H as a subprocess and returns a list of (ld_header, ld_header_info) tuples.
https://github.com/minikimmy/sketchfab_download/blob/2ac3b253088af95db323605c450c4336228f0c88/bin/py/Lib/ctypes/_aix.py#L97-L119
__author__ = "Michael Felt <aixtools@felt.demon.nl>" import re from os import environ, path from sys import executable from ctypes import c_void_p, sizeof from subprocess import Popen, PIPE, DEVNULL AIX_ABI = sizeof(c_void_p) * 8 from sys import maxsize def _last_version(libnames, sep): def _num_version(libname): parts = libname.split(sep) nums = [] try: while parts: nums.insert(0, int(parts.pop())) except ValueError: pass return nums or [maxsize] return max(reversed(libnames), key=_num_version) def get_ld_header(p): ld_header = None for line in p.stdout: if line.startswith(('/', './', '../')): ld_header = line elif "INDEX" in line: return ld_header.rstrip('\n') return None def get_ld_header_info(p): info = [] for line in p.stdout: if re.match("[0-9]", line): info.append(line) else: break return info
Apache License 2.0
phurwicz/hover
hover/core/explorer/functionality.py
BokehSoftLabelExplorer.plot
python
def plot(self, **kwargs): for _key, _source in self.sources.items(): preset_kwargs = { "color": SOURCE_COLOR_FIELD, "fill_alpha": SOURCE_ALPHA_FIELD, } eff_kwargs = self.glyph_kwargs[_key].copy() eff_kwargs.update(preset_kwargs) eff_kwargs.update(kwargs) self.figure.circle("x", "y", name=_key, source=_source, **eff_kwargs) self._good(f"Plotted subset {_key} with {self.dfs[_key].shape[0]} points")
???+ note "Plot all data points, setting color alpha based on the soft score." | Param | Type | Description | | :--------- | :----- | :--------------------------- | | `**kwargs` | | forwarded to plotting markers |
https://github.com/phurwicz/hover/blob/aa1c9b4f7a5a3c69190303ea75597aa4d2ae90ee/hover/core/explorer/functionality.py#L361-L379
import numpy as np from bokeh.models import CDSView, IndexFilter from bokeh.palettes import Category20 from bokeh.layouts import row from hover import module_config from hover.utils.misc import current_time from hover.utils.bokeh_helper import bokeh_hover_tooltip from .local_config import SOURCE_COLOR_FIELD, SOURCE_ALPHA_FIELD, SEARCH_SCORE_FIELD from .base import BokehBaseExplorer class BokehDataFinder(BokehBaseExplorer): SUBSET_GLYPH_KWARGS = { _key: { "constant": {"line_alpha": 0.4}, "search": { "size": ("size", 10, 5, 7), "fill_alpha": ("fill_alpha", 0.4, 0.1, 0.2), "color": ("color", "coral", "linen", "gainsboro"), }, } for _key in ["raw", "train", "dev", "test"] } def _setup_widgets(self): from bokeh.models import CheckboxGroup super()._setup_widgets() self.search_filter_box = CheckboxGroup( labels=["use as selection filter"], active=[] ) def activated(): return bool(0 in self.search_filter_box.active) def filter_by_search(indices, subset): search_scores = self.sources[subset].data[SEARCH_SCORE_FIELD] matched = set(np.where(np.array(search_scores) > 0)[0]) return indices.intersection(matched) for _key in self.sources.keys(): self._selection_filters[_key].data.add( lambda indices, subset: filter_by_search(indices, subset) if activated() else indices ) self.search_pos.on_change( "value", lambda attr, old, new: self._trigger_selection_filters() if activated() else None, ) self.search_neg.on_change( "value", lambda attr, old, new: self._trigger_selection_filters() if activated() else None, ) self.search_filter_box.on_change( "active", lambda attr, old, new: self._trigger_selection_filters() ) def plot(self): for _key, _source in self.sources.items(): self.figure.circle( "x", "y", name=_key, source=_source, **self.glyph_kwargs[_key] ) self._good(f"Plotted subset {_key} with {self.dfs[_key].shape[0]} points") class BokehDataAnnotator(BokehBaseExplorer): SUBSET_GLYPH_KWARGS = { _key: { "constant": {"line_alpha": 0.3}, "search": { "size": ("size", 10, 5, 7), "fill_alpha": ("fill_alpha", 0.5, 0.1, 0.4), }, } for _key in ["raw", "train", "dev", "test"] } def _postprocess_sources(self): color_dict = self.auto_color_mapping() for _key, _df in self.dfs.items(): _color = ( _df["label"] .apply(lambda label: color_dict.get(label, "gainsboro")) .tolist() ) self.sources[_key].add(_color, SOURCE_COLOR_FIELD) def _setup_widgets(self): from bokeh.models import TextInput, Button super()._setup_widgets() self.annotator_input = TextInput(title="Label:") self.annotator_apply = Button( label="Apply", button_type="primary", height_policy="fit", width_policy="min", ) def callback_apply(): label = self.annotator_input.value selected_idx = self.sources["raw"].selected.indices if not selected_idx: self._warn( "Attempting annotation: did not select any data points. Eligible subset is 'raw'." ) return self._info(f"Applying {len(selected_idx)} annotations: {label}") self.dfs["raw"].at[selected_idx, "label"] = label for _idx in selected_idx: _idx = int(_idx) self.sources["raw"].patch({"label": [(_idx, label)]}) self._good(f"Applied {len(selected_idx)} annotations: {label}") color_dict = self.auto_color_mapping() color_list = ( self.dfs["raw"]["label"] .apply(lambda label: color_dict.get(label, "gainsboro")) .tolist() ) self.sources["raw"].patch( {SOURCE_COLOR_FIELD: [(slice(len(color_list)), color_list)]} ) self._good(f"Updated annotator plot at {current_time()}") self._callback_apply = callback_apply self.annotator_apply.on_click(self._callback_apply) self.annotator_apply.on_click(self._callback_subset_display) def plot(self): for _key, _source in self.sources.items(): self.figure.circle( "x", "y", name=_key, color=SOURCE_COLOR_FIELD, source=_source, **self.glyph_kwargs[_key], ) self._good(f"Plotted subset {_key} with {self.dfs[_key].shape[0]} points") class BokehSoftLabelExplorer(BokehBaseExplorer): SUBSET_GLYPH_KWARGS = { _key: {"constant": {"line_alpha": 0.5}, "search": {"size": ("size", 10, 5, 7)}} for _key in ["raw", "train", "dev"] } def __init__(self, df_dict, label_col, score_col, **kwargs): assert label_col != "label", "'label' field is reserved" self.label_col = label_col self.score_col = score_col super().__init__(df_dict, **kwargs) def _build_tooltip(self, extra): standard = bokeh_hover_tooltip( **self.__class__.TOOLTIP_KWARGS, custom={"Soft Label": self.label_col, "Soft Score": self.score_col}, ) return f"{standard}\n{extra}" def _setup_dfs(self, df_dict, **kwargs): super()._setup_dfs(df_dict, **kwargs) for _key, _df in self.dfs.items(): if self.label_col not in _df.columns: _df[self.label_col] = module_config.ABSTAIN_DECODED if self.score_col not in _df.columns: _df[self.score_col] = 0.5 def _postprocess_sources(self): color_dict = self.auto_color_mapping() def get_color(label): return color_dict.get(label, "gainsboro") scores = np.concatenate( [_df[self.score_col].tolist() for _df in self.dfs.values()] ) scores_mean = scores.mean() scores_std = scores.std() + 1e-4 def pseudo_percentile(confidence, lower=0.1, upper=0.9): unit_shift = upper - lower / 4 shift = unit_shift * (confidence - scores_mean) / scores_std percentile = 0.5 + shift return min(upper, max(lower, percentile)) for _key, _df in self.dfs.items(): _color = _df[self.label_col].apply(get_color).tolist() _alpha = _df[self.score_col].apply(pseudo_percentile).tolist() self.sources[_key].add(_color, SOURCE_COLOR_FIELD) self.sources[_key].add(_alpha, SOURCE_ALPHA_FIELD) def _setup_widgets(self): from bokeh.models import RangeSlider, CheckboxGroup super()._setup_widgets() self.score_range = RangeSlider( start=0.0, end=1.0, value=(0.0, 1.0), step=0.01, title="Score range", ) self.score_filter_box = CheckboxGroup( labels=["use as selection filter"], active=[] ) self.score_filter = row(self.score_range, self.score_filter_box) def activated(): return bool(0 in self.score_filter_box.active) def subroutine(df, lower, upper): keep_l = set(np.where(df[self.score_col] >= lower)[0]) keep_u = set(np.where(df[self.score_col] <= upper)[0]) kept = keep_l.intersection(keep_u) return kept def filter_by_score(indices, subset): in_range = subroutine(self.dfs[subset], *self.score_range.value) return indices.intersection(in_range) for _key in self.sources.keys(): self._selection_filters[_key].data.add( lambda indices, subset: filter_by_score(indices, subset) if activated() else indices ) self.score_range.on_change( "value", lambda attr, old, new: self._trigger_selection_filters() if activated() else None, ) self.score_filter_box.on_change( "active", lambda attr, old, new: self._trigger_selection_filters() )
MIT License
hellohaptik/multi-task-nlp
data_preparation.py
create_data_multithreaded
python
def create_data_multithreaded(data, wrtPath, tokenizer, taskObj, taskName, maxSeqLen, multithreaded): man = mp.Manager() tempFilesList = man.list() numProcess = 1 if multithreaded: numProcess = mp.cpu_count() - 1 ''' Dividing the entire data into chunks which can be sent to different processes. Each process will write its chunk into a file. After all processes are done writing, we will combine all the files into one ''' taskType = taskObj.taskTypeMap[taskName] labelMap = taskObj.labelMap[taskName] chunkSize = int(len(data) / (numProcess)) print('Data Size: ', len(data)) print('number of threads: ', numProcess) processes = [] for i in range(numProcess): dataChunk = data[chunkSize*i : chunkSize*(i+1)] if taskType == TaskType.SingleSenClassification: p = mp.Process(target = create_data_single_sen_classification, args = (dataChunk, i, tempFilesList, maxSeqLen, tokenizer, labelMap)) if taskType == TaskType.SentencePairClassification: p = mp.Process(target = create_data_sentence_pair_classification, args = (dataChunk, i, tempFilesList, maxSeqLen, tokenizer)) if taskType == TaskType.NER: p = mp.Process(target = create_data_ner, args = (dataChunk, i, tempFilesList, maxSeqLen, tokenizer, labelMap)) p.start() processes.append(p) for pr in processes: pr.join() with open(wrtPath, 'w') as f: for file in tempFilesList: with open(file, 'r') as r: for line in r: sample = json.loads(line) f.write('{}\n'.format(json.dumps(sample))) os.remove(file)
This function uses multi-processing to create the data in the required format for base models as per the task. Utilizing multiple Cores help in processing huge data with speed
https://github.com/hellohaptik/multi-task-nlp/blob/b8ae9c051437213245b51b9b1a5bea10565c38e8/data_preparation.py#L215-L267
import argparse import os import json import multiprocessing as mp from keras.preprocessing.sequence import pad_sequences from utils.data_utils import TaskType, ModelType, NLP_MODELS from utils.task_utils import TasksParam from tqdm import tqdm from ast import literal_eval def load_data(dataPath, taskType, hasLabels): allData = [] for line in open(dataPath): cols = line.strip("\n").split("\t") if taskType == TaskType.SingleSenClassification: if hasLabels is True: if not len(cols) == 3: print(line) assert len(cols) == 3, "Data is not in Single Sentence Classification format" row = {"uid": cols[0], "label": cols[1], "sentenceA": cols[2]} else: row = {"uid": cols[0], "label": '0', "sentenceA": cols[1]} elif taskType == TaskType.SentencePairClassification: if hasLabels is True: if len(cols) != 4: print('skipping row: {}'.format(cols)) continue assert len(cols) == 4, "Data is not in Sentence Pair Classification format" row = {"uid": cols[0], "label": cols[1],"sentenceA": cols[2], "sentenceB": cols[3]} else: row = {"uid": cols[0], "label": '0', "sentenceA": cols[1], "sentenceB": cols[2]} elif taskType == TaskType.NER: if hasLabels is True: assert len(cols) == 3, "Data not in NER format" row = {"uid":cols[0], "label":literal_eval(cols[1]), "sentence":literal_eval(cols[2])} assert type(row['label'])==list, "Label should be in list of token labels format in data" else: row = {"uid":cols[0], "label": ["O"]*len(literal_eval(cols[1])), "sentence":literal_eval(cols[1])} assert type(row['sentence'])==list, "Sentence should be in list of token labels format in data" else: raise ValueError(taskType) allData.append(row) return allData def standard_data_converter(maxSeqLen, tokenizer, senA, senB = None): typeIds = None mask = None if senB: out = tokenizer.encode_plus(senA, senB, add_special_tokens = True, truncation_strategy = 'only_second', max_length = maxSeqLen, pad_to_max_length = True) else: out = tokenizer.encode_plus(senA, add_special_tokens=True, truncation_strategy ='only_first', max_length = maxSeqLen, pad_to_max_length=True) tokenIds = out['input_ids'] if 'token_type_ids' in out.keys(): typeIds = out['token_type_ids'] if 'attention_mask' in out.keys(): mask = out['attention_mask'] return tokenIds, typeIds, mask def create_data_single_sen_classification(data, chunkNumber, tempList, maxSeqLen, tokenizer, labelMap): name = 'single_sen_{}.json'.format(str(chunkNumber)) with open(name, 'w') as wf: with tqdm(total = len(data), position = chunkNumber) as progress: for idx, sample in enumerate(data): ids = sample['uid'] senA = sample['sentenceA'] label = sample['label'] assert label.isnumeric() or labelMap is not None, "In Sen Classification, either labels \ should be integers or label map should be given in task file" if label.isnumeric(): label = int(label) else: label = labelMap[sample['label']] inputIds, typeIds, inputMask = standard_data_converter(maxSeqLen, tokenizer, senA) features = { 'uid': ids, 'label': label, 'token_id': inputIds, 'type_id': typeIds, 'mask': inputMask} wf.write('{}\n'.format(json.dumps(features))) progress.update(1) tempList.append(name) def create_data_sentence_pair_classification(data, chunkNumber, tempList, maxSeqLen, tokenizer): name = 'sentence_pair_{}.json'.format(str(chunkNumber)) with open(name, 'w') as wf: with tqdm(total = len(data), position = chunkNumber) as progress: for idx, sample in enumerate(data): ids = sample['uid'] senA = sample['sentenceA'] senB = sample['sentenceB'] label = sample['label'] assert label.isnumeric() or labelMap is not None, "In Sen Classification, either labels \ should be integers or label map should be given in task file" if label.isnumeric(): label = int(label) else: label = labelMap[sample['label']] inputIds, typeIds, inputMask = standard_data_converter(maxSeqLen, tokenizer, senA, senB) features = { 'uid': ids, 'label': label, 'token_id': inputIds, 'type_id': typeIds, 'mask': inputMask} wf.write('{}\n'.format(json.dumps(features))) progress.update(1) tempList.append(name) def create_data_ner(data, chunkNumber, tempList, maxSeqLen, tokenizer, labelMap): name = 'ner_{}.json'.format(str(chunkNumber)) with open(name, 'w') as wf: with tqdm(total = len(data), position = chunkNumber) as progress: for idx, sample in enumerate(data): ids = sample['uid'] tempTokens = ['[CLS]'] tempLabels = ['[CLS]'] for word, label in zip(sample['sentence'], sample['label']): tokens = tokenizer.tokenize(word) for m, token in enumerate(tokens): tempTokens.append(token) if m==0: tempLabels.append(label) else: tempLabels.append('X') tempTokens.append('[SEP]') tempLabels.append('[SEP]') out = tokenizer.encode_plus(text = tempTokens, add_special_tokens=False, truncation_strategy ='only_first', max_length = maxSeqLen, pad_to_max_length=True) typeIds = None inputMask = None tokenIds = out['input_ids'] if 'token_type_ids' in out.keys(): typeIds = out['token_type_ids'] if 'attention_mask' in out.keys(): inputMask = out['attention_mask'] tempLabelsEnc = pad_sequences([ [labelMap[l] for l in tempLabels] ], maxlen=maxSeqLen, value=labelMap["O"], padding="post", dtype="long", truncating="post").tolist()[0] assert len(tempLabelsEnc) == len(tokenIds), "mismatch between processed tokens and labels" features = { 'uid': ids, 'label': tempLabelsEnc, 'token_id': tokenIds, 'type_id': typeIds, 'mask': inputMask} wf.write('{}\n'.format(json.dumps(features))) progress.update(1) tempList.append(name)
Apache License 2.0
upsert/lutron-caseta-pro
custom_components/lutron_caseta_pro/fan.py
CasetaFan.async_set_speed
python
async def async_set_speed(self, speed: str) -> None: self._speed = speed if speed not in SPEED_MAPPING: _LOGGER.debug("Unknown speed %s, setting to %s", speed, SPEED_HIGH) self._speed = SPEED_HIGH _LOGGER.debug( "Writing fan OUTPUT value: %d %d %.2f", self._integration, Caseta.Action.SET, SPEED_MAPPING[self._speed], ) await self._data.caseta.write( Caseta.OUTPUT, self._integration, Caseta.Action.SET, SPEED_MAPPING[self._speed], )
Set the speed of the fan.
https://github.com/upsert/lutron-caseta-pro/blob/6344931f32705106bf6998df6b3e5ea20c831869/custom_components/lutron_caseta_pro/fan.py#L126-L143
import logging from homeassistant.components.fan import ( DOMAIN, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, SPEED_OFF, SUPPORT_SET_SPEED, FanEntity, ) from homeassistant.const import CONF_DEVICES, CONF_HOST, CONF_ID, CONF_MAC, CONF_NAME from . import ( ATTR_AREA_NAME, ATTR_INTEGRATION_ID, CONF_AREA_NAME, Caseta, CasetaData, CasetaEntity, ) _LOGGER = logging.getLogger(__name__) SPEED_MEDIUM_HIGH = "medium_high" SPEED_MAPPING = { SPEED_OFF: 0.00, SPEED_LOW: 25.00, SPEED_MEDIUM: 50.00, SPEED_MEDIUM_HIGH: 75.00, SPEED_HIGH: 100.00, } async def async_setup_platform(hass, config, async_add_devices, discovery_info=None): if discovery_info is None: return bridge = Caseta(discovery_info[CONF_HOST]) await bridge.open() data = CasetaData(bridge) devices = [ CasetaFan(fan, data, discovery_info[CONF_MAC]) for fan in discovery_info[CONF_DEVICES] ] data.set_devices(devices) async_add_devices(devices, True) bridge.register(data.read_output) bridge.start(hass) class CasetaFan(CasetaEntity, FanEntity): def __init__(self, fan, data, mac): self._data = data self._name = fan[CONF_NAME] self._area_name = None if CONF_AREA_NAME in fan: self._area_name = fan[CONF_AREA_NAME] self._name = fan[CONF_AREA_NAME] + " " + fan[CONF_NAME] self._integration = int(fan[CONF_ID]) self._is_on = False self._mac = mac self._speed = SPEED_OFF self._platform_domain = DOMAIN async def async_added_to_hass(self): await self.query() async def query(self): await self._data.caseta.query( Caseta.OUTPUT, self._integration, Caseta.Action.SET ) @property def device_state_attributes(self): attr = {ATTR_INTEGRATION_ID: self._integration} if self._area_name: attr[ATTR_AREA_NAME] = self._area_name return attr @property def is_on(self): return self._is_on @property def speed(self) -> str: return self._speed @property def speed_list(self) -> list: return [SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_MEDIUM_HIGH, SPEED_HIGH] @property def supported_features(self) -> int: return SUPPORT_SET_SPEED async def async_turn_on(self, speed: str = None, **kwargs) -> None: if speed is None: speed = SPEED_HIGH await self.async_set_speed(speed)
Apache License 2.0
artyompal/tpu_models
models/official/mnasnet/mixnet/mixnet_builder.py
MixnetDecoder.encode
python
def encode(self, blocks_args): block_strings = [] for block in blocks_args: block_strings.append(self._encode_block_string(block)) return block_strings
Encodes a list of Mixnet Blocks to a list of strings. Args: blocks_args: A list of namedtuples to represent Mixnet blocks arguments. Returns: a list of strings, each string is a notation of Mixnet block.
https://github.com/artyompal/tpu_models/blob/639306f30e085bb1cdb5b1118a4c96a2dbe14e3e/models/official/mnasnet/mixnet/mixnet_builder.py#L116-L127
from __future__ import absolute_import from __future__ import division from __future__ import print_function import re import tensorflow as tf from mixnet import mixnet_model class MixnetDecoder(object): def _decode_block_string(self, block_string): assert isinstance(block_string, str) ops = block_string.split('_') options = {} for op in ops: splits = re.split(r'(\d.*)', op) if len(splits) >= 2: key, value = splits[:2] options[key] = value if 's' not in options or len(options['s']) != 2: raise ValueError('Strides options should be a pair of integers.') def _parse_ksize(ss): return [int(k) for k in ss.split('.')] return mixnet_model.BlockArgs( expand_ksize=_parse_ksize(options['a']), dw_ksize=_parse_ksize(options['k']), project_ksize=_parse_ksize(options['p']), num_repeat=int(options['r']), input_filters=int(options['i']), output_filters=int(options['o']), expand_ratio=int(options['e']), id_skip=('noskip' not in block_string), se_ratio=float(options['se']) if 'se' in options else None, strides=[int(options['s'][0]), int(options['s'][1])], swish=('sw' in block_string), dilated=('dilated' in block_string)) def _encode_block_string(self, block): def _encode_ksize(arr): return '.'.join([str(k) for k in arr]) args = [ 'r%d' % block.num_repeat, 'k%s' % _encode_ksize(block.dw_ksize), 'a%s' % _encode_ksize(block.expand_ksize), 'p%s' % _encode_ksize(block.project_ksize), 's%d%d' % (block.strides[0], block.strides[1]), 'e%s' % block.expand_ratio, 'i%d' % block.input_filters, 'o%d' % block.output_filters ] if (block.se_ratio is not None and block.se_ratio > 0 and block.se_ratio <= 1): args.append('se%s' % block.se_ratio) if block.id_skip is False: args.append('noskip') if block.swish: args.append('sw') if block.dilated: args.append('dilated') return '_'.join(args) def decode(self, string_list): assert isinstance(string_list, list) blocks_args = [] for block_string in string_list: blocks_args.append(self._decode_block_string(block_string)) return blocks_args
Apache License 2.0
packtpublishing/learning-path-learn-web-development-with-python
Module 01/LearnPythonProgrammingSecondEdition_Code/Chapter10/ch10/sudoku/algo/solver.py
place
python
def place(grid, square, digit): other_vals = grid[square].replace(digit, '') if all(eliminate(grid, square, val) for val in other_vals): return grid return False
Eliminate all the other values (except digit) from grid[square] and propagate. Return grid, or False if a contradiction is detected.
https://github.com/packtpublishing/learning-path-learn-web-development-with-python/blob/fc4062d19d6041558ef7d50bae1579fc6f351812/Module 01/LearnPythonProgrammingSecondEdition_Code/Chapter10/ch10/sudoku/algo/solver.py#L56-L64
import os from itertools import zip_longest, chain from time import time def cross_product(v1, v2): return [w1 + w2 for w1 in v1 for w2 in v2] def chunk(iterable, n, fillvalue=None): args = [iter(iterable)] * n return zip_longest(*args, fillvalue=fillvalue) digits = '123456789' rows = 'ABCDEFGHI' cols = digits squares = cross_product(rows, cols) all_units = ( [cross_product(rows, c) for c in cols] + [cross_product(r, cols) for r in rows] + [cross_product(rs, cs) for rs in chunk(rows, 3) for cs in chunk(cols, 3)] ) units = dict( (square, [unit for unit in all_units if square in unit]) for square in squares ) peers = dict( (square, set(chain(*units[square])) - set([square])) for square in squares ) def parse_puzzle(puzzle): assert set(puzzle) <= set('.0123456789') assert len(puzzle) == 81 grid = dict((square, digits) for square in squares) for square, digit in zip(squares, puzzle): if digit in digits and not place(grid, square, digit): return False return grid
MIT License
cue/scales
src/greplin/scales/aggregation.py
Sorted.addValue
python
def addValue(self, source, data): self.__result.append((source, self._dataFormat.getValue(data)))
Adds a value from the given source.
https://github.com/cue/scales/blob/0aced26eb050ceb98ee9d5d6cdca8db448666986/src/greplin/scales/aggregation.py#L250-L252
from collections import defaultdict import datetime import json import os import re import six class DefaultFormat(object): def getCount(self, data): return data['count'] def getValue(self, data): return data['average'] class DirectFormat(object): def getCount(self, _): return 1 def getValue(self, data): return data class TimerFormat(object): def getCount(self, data): assert data['type'] == "timer" return data['rate']['count'] def getValue(self, data): assert data['type'] == "timer" return data['duration']['median'] class TimerMeanFormat(object): def getCount(self, data): assert data['type'] == "timer" return data['rate']['count'] def getValue(self, data): assert data['type'] == "timer" return data['duration']['mean'] class CounterFormat(object): def getCount(self, data): assert data['type'] == "counter" return data['count'] def getValue(self, data): assert data['type'] == "counter" return data['count'] class MeterFormat(object): def getCount(self, data): assert data['type'] == "meter" return data['count'] def getValue(self, data): assert data['type'] == "meter" return data['mean'] class GaugeFormat(object): def getValue(self, data): assert data['type'] == 'gauge' return data['value'] class DataFormats(object): DEFAULT = DefaultFormat() DIRECT = DirectFormat() TIMER = TimerFormat() TIMER_MEAN = TimerMeanFormat() COUNTER = CounterFormat() METER = MeterFormat() GAUGE = GaugeFormat() class Aggregator(object): def __init__(self, name = None, dataFormat = DataFormats.DEFAULT): self.name = name or self.DEFAULT_NAME self._dataFormat = dataFormat def clone(self): return type(self)(name = self.name, dataFormat = self._dataFormat) class Average(Aggregator): DEFAULT_NAME = "average" _count = 0 _total = 0 def addValue(self, _, value): if value is not None: try: self._count += self._dataFormat.getCount(value) self._total += self._dataFormat.getValue(value) * self._dataFormat.getCount(value) except TypeError: self._count += 1 self._total += value def result(self): return { "count": self._count, "total": self._total, "average": float(self._total) / self._count if self._count else 0 } class Sum(Aggregator): DEFAULT_NAME = "sum" total = 0 def addValue(self, _, value): self.total += self._dataFormat.getValue(value) def result(self): return self.total def _humanSortKey(s): if isinstance(s, str): return [w.isdigit() and int(w) or w for w in re.split(r'(\d+)', s)] else: return s class InverseMap(Aggregator): DEFAULT_NAME = "inverse" def __init__(self, *args, **kw): Aggregator.__init__(self, *args, **kw) self.__result = defaultdict(list) def addValue(self, source, data): self.__result[self._dataFormat.getValue(data)].append(source) def result(self): for value in six.itervalues(self.__result): value.sort(key = _humanSortKey) return self.__result class Sorted(Aggregator): DEFAULT_NAME = "sorted" def __init__(self, cmp=None, key=None, reverse=False, *args, **kw): Aggregator.__init__(self, *args, **kw) self.__result = [] self.__cmp = cmp self.__key = key self.__reverse = reverse
Apache License 2.0
kelvinguu/lang2program
strongsup/example.py
Example.logical_form
python
def logical_form(self): return self._logical_form
The correct logical form for the example. A list of Predicates. Raises: AttributeError, if no logical form present Returns: list[Predicate]
https://github.com/kelvinguu/lang2program/blob/dd4eb8439d29f0f72dd057946287551ed0f046a3/strongsup/example.py#L46-L57
from collections import Sequence from itertools import izip import sys from gtd.io import JSONPicklable from gtd.utils import cached_property, UnicodeMixin from strongsup.predicate import Predicate from strongsup.utils import PredicateList from strongsup.value import Value from strongsup.world import World class Example(JSONPicklable): def __init__(self, context, answer=None, logical_form=None): assert isinstance(context, BaseContext) self._context = context if answer: assert all(isinstance(x, Value) for x in answer) self._answer = answer if logical_form: assert all(isinstance(x, Predicate) for x in logical_form) self._logical_form = logical_form @property def context(self): return self._context @property def answer(self): return self._answer @property
Apache License 2.0
nastools/homeassistant
homeassistant/components/ffmpeg.py
get_binary
python
def get_binary(): return FFMPEG_CONFIG.get(CONF_FFMPEG_BIN)
Return ffmpeg binary from config. Async friendly.
https://github.com/nastools/homeassistant/blob/7ca1180bd42713f2d77bbc3f0b27b231ba8784aa/homeassistant/components/ffmpeg.py#L51-L56
import asyncio import logging import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.util.async import run_coroutine_threadsafe DOMAIN = 'ffmpeg' REQUIREMENTS = ["ha-ffmpeg==0.15"] _LOGGER = logging.getLogger(__name__) CONF_INPUT = 'input' CONF_FFMPEG_BIN = 'ffmpeg_bin' CONF_EXTRA_ARGUMENTS = 'extra_arguments' CONF_OUTPUT = 'output' CONF_RUN_TEST = 'run_test' DEFAULT_BINARY = 'ffmpeg' DEFAULT_RUN_TEST = True CONFIG_SCHEMA = vol.Schema({ DOMAIN: vol.Schema({ vol.Optional(CONF_FFMPEG_BIN, default=DEFAULT_BINARY): cv.string, vol.Optional(CONF_RUN_TEST, default=DEFAULT_RUN_TEST): cv.boolean, }), }, extra=vol.ALLOW_EXTRA) FFMPEG_CONFIG = { CONF_FFMPEG_BIN: DEFAULT_BINARY, CONF_RUN_TEST: DEFAULT_RUN_TEST, } FFMPEG_TEST_CACHE = {} def setup(hass, config): if DOMAIN in config: FFMPEG_CONFIG.update(config.get(DOMAIN)) return True
MIT License
christopher-dg/gpymusic
gpymusic/client.py
Client.queue
python
def queue(self, arg=None): if arg is None: if not common.q: common.w.error_msg('The queue is empty') else: if common.w.curses: limit = common.w.ylimit - 2 else: limit = -1 common.v.replace(common.q.collect(limit)) return if arg in ('c', 'C'): del common.q[:] common.w.outbar_msg('Cleared queue.') return if common.v.is_empty(): common.w.error_msg('Wrong context for queue') return try: num = int(arg) except ValueError: try: nums = [int(i) for i in arg.split()] except ValueError: common.w.error_msg('Invalid argument to queue') else: common.w.outbar_msg('Adding items to the queue...') items = [self.get_option(num) for num in nums] count = common.q.extend( [item for item in items if item is not None] ) common.w.outbar_msg( 'Added %d song%s to the queue.' % (count, '' if count is 1 else 's') ) else: if ( num > len(common.v['songs']) and num <= len(common.v['songs']) + len(common.v['artists']) ): common.w.error_msg( 'Can only add songs or albums to the queue.' ) else: item = self.get_option(num) if item is not None: count = common.q.append(item) common.w.outbar_msg( 'Added %d song%s to the queue.' % (count, '' if count is 1 else 's') )
Display the current queue, or add an item to the queue. Keyword arguments: arg=None: Index of the MusicObject in the main window to add to the queue, 'c' to clear the queue, None to display the queue, or a space-delimited list of indices to add to the queue, i.e. '1 2 3'.
https://github.com/christopher-dg/gpymusic/blob/e16ee3122bfd15dd5558824dd9781aee2edf79e1/gpymusic/client.py#L142-L209
from . import common from . import music_objects import json import zipfile from os.path import exists, isfile, join from random import shuffle from gmusicapi import Musicmanager class Client: def transition(self, input=""): commands = { 'h': self.help, 'help': self.help, 'e': self.expand, 'expand': self.expand, 'radio': self.radio, 's': self.search, 'search': self.search, 'p': self.play, 'play': self.play, 'q': self.queue, 'queue': self.queue, 'w': self.write, 'write': self.write, 'r': self.restore, 'restore': self.restore, } arg = None if common.v.is_empty(): common.w.addstr( common.w.infobar, 'Enter \'h\' or \'help\' if you need help.' ) else: common.w.now_playing() user_input = common.w.get_input() if not input else input try: command, arg = (s.strip() for s in user_input.split(maxsplit=1)) except ValueError: command = user_input.strip() if command in commands: commands[command](arg) common.w.display() else: common.w.error_msg('Nonexistent command') def help(self, arg=0): common.v.clear() if not common.w.curses: return common.w.main.erase() common.w.main.addstr( """ Commands: s/search search-term: Search for search-term e/expand 123: Expand item number 123 radio 123: Create radio station around item number 123 p/play: Play the current queue p/play s: Shuffle and play the current queue p/play 123: Play item number 123 q/queue: Show the current queue q/queue 123: Add item number 123 to the queue q/queue 1 2 3: Add items 1, 2, and 3 to the queue q/queue c: Clear the current queue w/write playlist-name: Write current queue to playlist playlist-name r/restore playlist-name: Replace the current queue with a playlist h/help: Show this help message Ctrl-C: Exit gpymusic """ ) common.w.main.refresh() def write(self, fn=None): path = join(common.DATA_DIR, 'playlists') if not common.q: common.w.error_msg('Queue is empty') elif fn is None: common.w.error_msg('Missing argument to write') elif not exists(path): common.w.error_msg('Path to playlists does not exist') elif exists(join(path, fn)): common.w.error_msg('Playist %s already exists' % fn) else: with open(join(path, fn), 'w') as f: json.dump(common.q, f) common.w.outbar_msg('Wrote queue to %s.' % fn) def restore(self, fn=None): path = join(common.DATA_DIR, 'playlists') if fn is None: common.w.error_msg('Missing argument to restore') elif not isfile(join(path, fn)): common.w.error_msg('Playlist %s does not exist' % fn) else: common.w.outbar_msg('Restoring queue from %s...' % fn) try: with open(join(path, fn)) as f: json_songs = json.load(f) except json.decoder.JSONDecodeError: common.w.error_msg('%s is not a valid playlist file' % fn) else: common.q.restore(json_songs)
MIT License
anydesk/rest-api
anydesk/api.py
API.query_sessions
python
def query_sessions(self, **params): param_strs = [] for key in params: if key == "start": param_strs.append("from=" + str(params[key])) elif key == "end": param_strs.append("to=" + str(params[key])) else: param_strs.append(key + "=" + str(params[key])) resource = "sessions?" + "&".join(param_strs) if len(param_strs) == 0: resource = "sessions" data = json.loads(self.request(resource).decode('utf-8')) sessions = [] for session in data["list"]: sessions.append(self._session_from_data(session)) return sessions
List sessions according to given parameters. direction - in/out/inout - List only incoming or outgoing sessions start - Only list sessions after the given Unix-Timestamp end - Only list sessions before the given Unix-Timestamp sort - from.cid/to.cid/start-time/end-time/duration - Sort list by given parameter offset - Index of the first item listed limit - Maximum number of items listed
https://github.com/anydesk/rest-api/blob/97615eb77bb5cfc6c954d9c748ca981365ab3319/anydesk/api.py#L137-L164
import urllib.request import hashlib, hmac, base64 import time, json import configparser from .session import * from .addressbook import * from .client import * from .sysinfo import * class API: def __init__(self, license="", key="", url="https://v1.api.anydesk.com:8081/", path=""): self.url = url if path != "": config = configparser.ConfigParser() config.read(path) self.license = config["Auth"]["license"] self.key = config["Auth"]["key"] else: self.license = license self.key = key def auth(self, resource, content="", method="GET"): sha1 = hashlib.sha1() sha1.update(content.encode("utf8")) content_hash = str(base64.b64encode(sha1.digest()), "UTF-8") timestamp = str(int(time.time())) request_string = method + "\n" + resource + "\n" + timestamp + "\n" + content_hash token = str(base64.b64encode(hmac.new( self.key.encode("utf-8"), request_string.encode("utf-8"), hashlib.sha1 ).digest()), "UTF-8" ) return "AD " + self.license + ":" + timestamp + ":" + token def request(self, resource): req = urllib.request.Request(self.url + resource, headers={ "Authorization": self.auth("/" + resource) }) res = urllib.request.urlopen(req) return res.read() def put(self, resource, body): req = urllib.request.Request(self.url + resource, body.encode("utf-8"), headers={ "Authorization": self.auth("/" + resource, content=body, method="PUT"), "Content-Type": "application/json" }) req.get_method = lambda: "PUT" res = urllib.request.urlopen(req) return res.read() def delete(self, resource, body): req = urllib.request.Request(self.url + resource, body.encode("utf-8"), headers={ "Authorization": self.auth("/" + resource, content=body, method="DELETE"), "Content-Type": "application/json" }) req.get_method = lambda: "DELETE" res = urllib.request.urlopen(req) return res.read() def patch(self, resource, body): req = urllib.request.Request(self.url + resource, body.encode("utf-8"), headers={ "Authorization": self.auth("/" + resource, content=body, method="PATCH"), "Content-Type": "application/json" }) req.get_method = lambda: "PATCH" res = urllib.request.urlopen(req) return res.read() def post(self, resource, body): req = urllib.request.Request(self.url + resource, body.encode("utf-8"), headers={ "Authorization": self.auth("/" + resource, content=body, method="POST"), "Content-Type": "application/json" }) res = urllib.request.urlopen(req) return res.read() def _client_from_data(self, client): comment = None if "comment" in client: comment = client["comment"] return Client( self, client["cid"], client["alias"], client["client-version"], client["online"], client["online-time"], comment ) def all_clients(self): data = json.loads(self.request("clients").decode('utf-8')) clients = [] for client in data["list"]: clients.append(self._client_from_data(client)) return clients def _session_from_data(self, session): return Session( self, session["sid"], ClientId( self, session["from"]["cid"], session["from"]["alias"] ), ClientId( self, session["to"]["cid"], session["to"]["alias"] ), session["active"], session["start-time"], session["end-time"], session["duration"], session["comment"] ) def all_sessions(self, client=None): if client != None: data = json.loads(self.request("sessions?cid=" + str(client.id)).decode('utf-8')) else: data = json.loads(self.request("sessions").decode('utf-8')) sessions = [] for session in data["list"]: sessions.append(self._session_from_data(session)) print(str(session)) return sessions
MIT License
frank-xwang/cld-unsupervisedlearning
lib/utils.py
DistributedShufle.get_shuffle_ids
python
def get_shuffle_ids(bsz, epoch): torch.manual_seed(epoch) forward_inds = torch.randperm(bsz).long().cuda() backward_inds = torch.zeros(forward_inds.shape[0]).long().cuda() value = torch.arange(bsz).long().cuda() backward_inds.index_copy_(0, forward_inds, value) return forward_inds, backward_inds
generate shuffle ids for ShuffleBN
https://github.com/frank-xwang/cld-unsupervisedlearning/blob/5c5a5ac87b848a8f2b826455a5af04add7ad4cf9/lib/utils.py#L112-L123
import torch import argparse import torch.distributed as dist import numpy as np class AverageMeter(object): def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def dist_collect(x): x = x.contiguous() out_list = [torch.zeros_like(x, device=x.device, dtype=x.dtype) for _ in range(dist.get_world_size())] dist.all_gather(out_list, x) return torch.cat(out_list, dim=0) def reduce_tensor(tensor): rt = tensor.clone() dist.all_reduce(rt, op=dist.ReduceOp.SUM) rt /= dist.get_world_size() return rt class DistributedShufle: @staticmethod def forward_shuffle(x, epoch): x_all = dist_collect(x) forward_inds, backward_inds = DistributedShufle.get_shuffle_ids(x_all.shape[0], epoch) forward_inds_local = DistributedShufle.get_local_id(forward_inds) return x_all[forward_inds_local], backward_inds @staticmethod def backward_shuffle(x, backward_inds, return_local=True, branch_two=None): x_all = dist_collect(x) if return_local: backward_inds_local = DistributedShufle.get_local_id(backward_inds) if branch_two is not None: branch_two_all = dist_collect(branch_two) return x_all[backward_inds], x_all[backward_inds_local], branch_two_all[backward_inds_local] else: return x_all[backward_inds], x_all[backward_inds_local] else: return x_all[backward_inds] @staticmethod def get_local_id(ids): return ids.chunk(dist.get_world_size())[dist.get_rank()] @staticmethod
MIT License
holoviz/panel
panel/auth.py
decode_response_body
python
def decode_response_body(response): try: body = codecs.decode(response.body, 'ascii') except Exception: body = codecs.decode(response.body, 'utf-8') body = re.sub('"', '\"', body) body = re.sub("'", '"', body) body = json.loads(body) return body
Decodes the JSON-format response body Arguments --------- response: tornado.httpclient.HTTPResponse Returns ------- Decoded response content
https://github.com/holoviz/panel/blob/c35200e885942a68ff152f7b3180e0aea72eafc4/panel/auth.py#L27-L47
import codecs import json import logging import os import pkg_resources import re import uuid from urllib.parse import urlencode import tornado from bokeh.server.auth_provider import AuthProvider from tornado.auth import OAuth2Mixin from tornado.httpclient import HTTPRequest, HTTPError from tornado.httputil import url_concat from .config import config from .io import state from .util import base64url_encode, base64url_decode log = logging.getLogger(__name__) STATE_COOKIE_NAME = 'panel-oauth-state'
BSD 3-Clause New or Revised License
vertexproject/synapse
synapse/lib/view.py
View.fork
python
async def fork(self, ldef=None, vdef=None): if ldef is None: ldef = {} if vdef is None: vdef = {} ldef = await self.core.addLayer(ldef) layriden = ldef.get('iden') vdef['parent'] = self.iden vdef['layers'] = [layriden] + [lyr.iden for lyr in self.layers] return await self.core.addView(vdef)
Make a new view inheriting from this view with the same layers and a new write layer on top Args: ldef: layer parameter dict vdef: view parameter dict Passed through to cortex.addLayer Returns: new view object, with an iden the same as the new write layer iden
https://github.com/vertexproject/synapse/blob/a9d62ffacd9cc236ac52f92a734deef55c66ecf3/synapse/lib/view.py#L453-L477
import asyncio import logging import itertools import collections import synapse.exc as s_exc import synapse.common as s_common import synapse.lib.cell as s_cell import synapse.lib.coro as s_coro import synapse.lib.snap as s_snap import synapse.lib.nexus as s_nexus import synapse.lib.config as s_config import synapse.lib.spooled as s_spooled import synapse.lib.trigger as s_trigger import synapse.lib.stormctrl as s_stormctrl import synapse.lib.stormtypes as s_stormtypes logger = logging.getLogger(__name__) reqValidVdef = s_config.getJsValidator({ 'type': 'object', 'properties': { 'iden': {'type': 'string', 'pattern': s_config.re_iden}, 'name': {'type': 'string'}, 'parent': {'type': ['string', 'null'], 'pattern': s_config.re_iden}, 'creator': {'type': 'string', 'pattern': s_config.re_iden}, 'layers': { 'type': 'array', 'items': {'type': 'string', 'pattern': s_config.re_iden} }, }, 'additionalProperties': True, 'required': ['iden', 'parent', 'creator', 'layers'], }) class ViewApi(s_cell.CellApi): async def __anit__(self, core, link, user, view): await s_cell.CellApi.__anit__(self, core, link, user) self.view = view layriden = view.layers[0].iden self.allowedits = user.allowed(('layer', 'write'), gateiden=layriden) async def storNodeEdits(self, edits, meta): if not self.allowedits: mesg = 'storNodeEdits() not allowed without layer.write on layer.' raise s_exc.AuthDeny(mesg=mesg) if meta is None: meta = {} meta['time'] = s_common.now() meta['user'] = self.user.iden return await self.view.storNodeEdits(edits, meta) async def getCellIden(self): return self.view.iden class View(s_nexus.Pusher): snapctor = s_snap.Snap.anit async def __anit__(self, core, node): self.node = node self.iden = node.name() self.info = await node.dict() self.core = core trignode = await node.open(('triggers',)) self.trigdict = await trignode.dict() self.triggers = s_trigger.Triggers(self) for _, tdef in self.trigdict.items(): try: await self.triggers.load(tdef) except asyncio.CancelledError: raise except Exception: logger.exception(f'Failed to load trigger {tdef!r}') await s_nexus.Pusher.__anit__(self, iden=self.iden, nexsroot=core.nexsroot) self.layers = [] self.invalid = None self.parent = None self.permCheck = { 'node:add': self._nodeAddConfirm, 'prop:set': self._propSetConfirm, 'tag:add': self._tagAddConfirm, 'tag:prop:set': self._tagPropSetConfirm, } await self._initViewLayers() async def getStorNodes(self, buid): return await self.core._getStorNodes(buid, self.layers) def init2(self): parent = self.info.get('parent') if parent is not None: self.parent = self.core.getView(parent) def isafork(self): return self.parent is not None async def pack(self): d = {'iden': self.iden} d.update(self.info.pack()) layrinfo = [await lyr.pack() for lyr in self.layers] d['layers'] = layrinfo triginfo = [t.pack() for _, t in self.triggers.list()] d['triggers'] = triginfo return d async def getFormCounts(self): counts = collections.defaultdict(int) for layr in self.layers: for name, valu in (await layr.getFormCounts()).items(): counts[name] += valu return counts async def getEdgeVerbs(self): async with await s_spooled.Set.anit(dirn=self.core.dirn) as vset: for layr in self.layers: async for verb in layr.getEdgeVerbs(): await asyncio.sleep(0) if verb in vset: continue await vset.add(verb) yield verb async def getEdges(self, verb=None): async with await s_spooled.Set.anit(dirn=self.core.dirn) as eset: for layr in self.layers: async for edge in layr.getEdges(verb=verb): await asyncio.sleep(0) if edge in eset: continue await eset.add(edge) yield edge async def _initViewLayers(self): for iden in self.info.get('layers'): layr = self.core.layers.get(iden) if layr is None: self.invalid = iden logger.warning('view %r has missing layer %r' % (self.iden, iden)) continue self.layers.append(layr) async def eval(self, text, opts=None): opts = self.core._initStormOpts(opts) user = self.core._userFromOpts(opts) self.core._logStormQuery(text, user) info = {'query': text, 'opts': opts} await self.core.boss.promote('storm', user=user, info=info) async with await self.snap(user=user) as snap: async for node in snap.eval(text, opts=opts, user=user): yield node async def callStorm(self, text, opts=None): user = self.core._userFromOpts(opts) try: async for item in self.eval(text, opts=opts): await asyncio.sleep(0) except s_stormctrl.StormReturn as e: return await s_stormtypes.toprim(e.item) except asyncio.CancelledError: logger.warning(f'callStorm cancelled', extra={'synapse': {'text': text, 'username': user.name, 'user': user.iden}}) raise except Exception: logger.exception(f'Error during callStorm execution for {{ {text} }}', extra={'synapse': {'text': text, 'username': user.name, 'user': user.iden}}) raise async def nodes(self, text, opts=None): return [n async for n in self.eval(text, opts=opts)] async def storm(self, text, opts=None): opts = self.core._initStormOpts(opts) user = self.core._userFromOpts(opts) MSG_QUEUE_SIZE = 1000 chan = asyncio.Queue(MSG_QUEUE_SIZE) synt = await self.core.boss.promote('storm', user=user, info={'query': text}) show = opts.get('show', set()) mode = opts.get('mode', 'storm') editformat = opts.get('editformat', 'nodeedits') if editformat not in ('nodeedits', 'splices', 'count', 'none'): raise s_exc.BadConfValu(mesg='editformat') async def runStorm(): cancelled = False tick = s_common.now() count = 0 try: await chan.put(('init', {'tick': tick, 'text': text, 'task': synt.iden})) await self.core.getStormQuery(text, mode=mode) shownode = (not show or 'node' in show) async with await self.snap(user=user) as snap: if not show: snap.link(chan.put) else: [snap.on(n, chan.put) for n in show] if shownode: async for pode in snap.iterStormPodes(text, opts=opts, user=user): await chan.put(('node', pode)) count += 1 else: async for item in snap.storm(text, opts=opts, user=user): count += 1 except s_stormctrl.StormExit: pass except asyncio.CancelledError: logger.warning('Storm runtime cancelled.', extra={'synapse': {'text': text, 'username': user.name, 'user': user.iden}}) cancelled = True raise except Exception as e: logger.exception(f'Error during storm execution for {{ {text} }}', extra={'synapse': {'text': text, 'username': user.name, 'user': user.iden}}) enfo = s_common.err(e) enfo[1].pop('esrc', None) enfo[1].pop('ename', None) await chan.put(('err', enfo)) finally: if not cancelled: tock = s_common.now() took = tock - tick await chan.put(('fini', {'tock': tock, 'took': took, 'count': count})) await synt.worker(runStorm()) editformat = opts.get('editformat', 'nodeedits') while True: mesg = await chan.get() kind = mesg[0] if kind == 'node': yield mesg continue if kind == 'node:edits': if editformat == 'nodeedits': nodeedits = s_common.jsonsafe_nodeedits(mesg[1]['edits']) mesg[1]['edits'] = nodeedits yield mesg continue if editformat == 'none': continue if editformat == 'count': count = sum(len(edit[2]) for edit in mesg[1].get('edits', ())) mesg = ('node:edits:count', {'count': count}) yield mesg continue assert editformat == 'splices' nodeedits = mesg[1].get('edits', [()]) async for _, splice in self.layers[0].makeSplices(0, nodeedits, None): if not show or splice[0] in show: yield splice continue if kind == 'fini': yield mesg break yield mesg async def iterStormPodes(self, text, opts=None): opts = self.core._initStormOpts(opts) user = self.core._userFromOpts(opts) await self.core.boss.promote('storm', user=user, info={'query': text}) async with await self.snap(user=user) as snap: async for pode in snap.iterStormPodes(text, opts=opts, user=user): yield pode async def snap(self, user): if self.invalid is not None: raise s_exc.NoSuchLayer(iden=self.invalid) return await self.snapctor(self, user) @s_nexus.Pusher.onPushAuto('view:set') async def setViewInfo(self, name, valu): if name not in ('name', 'desc',): mesg = f'{name} is not a valid view info key' raise s_exc.BadOptValu(mesg=mesg) await self.info.set(name, valu) return valu async def addLayer(self, layriden, indx=None): if any(layriden == layr.iden for layr in self.layers): raise s_exc.DupIden(mesg='May not have the same layer in a view twice') return await self._push('view:addlayer', layriden, indx) @s_nexus.Pusher.onPush('view:addlayer') async def _addLayer(self, layriden, indx=None): for view in self.core.views.values(): if view.parent is self: raise s_exc.ReadOnlyLayer(mesg='May not change layers that have been forked from') if self.parent is not None: raise s_exc.ReadOnlyLayer(mesg='May not change layers of forked view') layr = self.core.layers.get(layriden) if layr is None: raise s_exc.NoSuchLayer(iden=layriden) if layr in self.layers: return if indx is None: self.layers.append(layr) else: self.layers.insert(indx, layr) await self.info.set('layers', [lyr.iden for lyr in self.layers]) @s_nexus.Pusher.onPushAuto('view:setlayers') async def setLayers(self, layers): for view in self.core.views.values(): if view.parent is self: raise s_exc.ReadOnlyLayer(mesg='May not change layers that have been forked from') if self.parent is not None: raise s_exc.ReadOnlyLayer(mesg='May not change layers of forked view') layrs = [] for iden in layers: layr = self.core.layers.get(iden) if layr is None: raise s_exc.NoSuchLayer(iden=iden) if not layrs and layr.readonly: raise s_exc.ReadOnlyLayer(mesg=f'First layer {layr.iden} must not be read-only') layrs.append(layr) self.invalid = None self.layers = layrs await self.info.set('layers', layers)
Apache License 2.0
srittau/python-htmlgen
htmlgen/form.py
DateInput.date
python
def date(self, date): self.value = date.strftime("%Y-%m-%d") if date else ""
Set the element's value to the given date. If date is None, clear the value.
https://github.com/srittau/python-htmlgen/blob/6d29896caed54a4a6ef1dbab1dbb2ad9d2861f01/htmlgen/form.py#L203-L206
import datetime import re from enum import Enum from htmlgen.attribute import ( html_attribute, boolean_html_attribute, int_html_attribute, float_html_attribute, time_html_attribute, list_html_attribute, enum_attribute, ) from htmlgen.block import Division from htmlgen.element import Element, VoidElement, is_element from htmlgen.timeutil import parse_rfc3339_partial_time _ENC_TYPE_URL_ENCODED = "application/x-www-form-urlencoded" _ENC_TYPE_MULTI_PART = "multipart/form-data" class Autocomplete(Enum): OFF = "off" ON = "on" class Form(Element): def __init__(self, method="GET", url=""): super().__init__("form") self.method = method self.url = url method = html_attribute("method", default="GET") url = html_attribute("action", default="") target = html_attribute("target", "_self") encryption_type = html_attribute("enctype", _ENC_TYPE_URL_ENCODED) autocomplete = enum_attribute("autocomplete", Autocomplete) def set_blank_target(self): self.target = "_blank" @property def multipart(self): return self.encryption_type == _ENC_TYPE_MULTI_PART @multipart.setter def multipart(self, multipart): if multipart: self.encryption_type = _ENC_TYPE_MULTI_PART else: self.encryption_type = _ENC_TYPE_URL_ENCODED class Input(VoidElement): def __init__(self, type_="text", name=""): super().__init__("input") self.type = type_ self.name = name name = html_attribute("name", default="") value = html_attribute("value", default="") readonly = boolean_html_attribute("readonly") disabled = boolean_html_attribute("disabled") type = html_attribute("type") autocomplete = html_attribute("autocomplete") placeholder = html_attribute("placeholder") size = int_html_attribute("size") focus = boolean_html_attribute("autofocus") class TextInput(Input): def __init__(self, name="", value=""): super().__init__("text", name) self.value = value class SearchInput(Input): def __init__(self, name=""): super().__init__("search", name) class PasswordInput(Input): def __init__(self, name=""): super().__init__("password", name) class NumberInput(Input): def __init__(self, name="", number=None): super().__init__("number", name) if number is not None: self.number = number number = float_html_attribute("value") minimum = float_html_attribute("min") maximum = float_html_attribute("max") step = float_html_attribute("step") class DateInput(Input): def __init__(self, name="", date=None): super().__init__("date", name) self.date = date @property def date(self): return self._parse_date(self.value) @date.setter
MIT License
planetfederal/geogig-py
src/geogigpy/repo.py
Repository.commit
python
def commit(self, message, paths=[]): self.connector.commit(message, paths) self.cleancache()
Creates a new commit with the changes in the specified paths. If no paths are passed, it will commit all staged features Raises an UnconfiguredUserException if there is no user configured and it cannot commit
https://github.com/planetfederal/geogig-py/blob/7542cf136837ef080924ba4ddbfd3b9baf21bee7/src/geogigpy/repo.py#L371-L379
__author__ = 'Victor Olaya' __date__ = 'November 2013' __copyright__ = '(C) 2013-2016 Boundless, http://boundlessgeo.com' __revision__ = '$Format:%H$' import re from commitish import Commitish from tag import Tag import geogig from geogigexception import GeoGigException from feature import Feature from tree import Tree from utils import mkdir from py4jconnector import Py4JCLIConnector from geogigserverconnector import GeoGigServerConnector import tempfile import datetime def _resolveref(ref): if ref is None: return None if isinstance(ref, Commitish): return ref.ref elif isinstance(ref, basestring): return ref else: return str(ref) SHA_MATCHER = re.compile(r"\b([a-f0-9]{40})\b") class Repository(object): _logcache = None def __init__(self, url, connector=None, init=False, initParams=None): self.url = url self.connector = Py4JCLIConnector() if connector is None else connector if init: try: mkdir(url) except Exception, e: raise GeoGigException("Cannot create repository folder.\nCheck that path is correct and you have permission") self.connector.setRepository(self) try: self.connector.checkisrepo() isAlreadyRepo = True except GeoGigException, e: isAlreadyRepo = False if init: if isAlreadyRepo: raise GeoGigException("Cannot init, the folder is already a geogig repository") else: self.init(initParams) self.connector.checkisrepo() self.cleancache() @staticmethod def newrepofromclone(url, path, connector=None, username=None, password=None): connector = Py4JCLIConnector() if connector is None else connector connector.clone(url, path, username, password) return Repository(path, connector) def createdat(self): return self.connector.createdat() def cleancache(self): self._logcache = None def description(self): return '' def revparse(self, rev): if SHA_MATCHER.match(rev) is not None: return rev else: return self.connector.revparse(rev) @property def head(self): return self.connector.head() @property def index(self): return Commitish(self, geogig.STAGE_HEAD) @property def workingtree(self): return Commitish(self, geogig.WORK_HEAD) @property def master(self): return Commitish(self, geogig.MASTER) def isdetached(self): return self.head.id == self.head.ref def synced(self, branch=geogig.HEAD, credentials=None): if (branch == geogig.HEAD and self.isdetached()): raise GeoGigException("Cannot use current branch. The repository has a detached HEAD") remotes = self.remotes if remotes: if "origin" in remotes: remote = remotes["origin"] remotename = "origin" else: remotename = remotes.keys()[0] remote = remotes.values()[0] else: raise GeoGigException("No remotes defined") if isremoteurl(remote): repo = Repository(remote, GeoGigServerConnector(credentials)) else: conn = self.connector.__class__() repo = Repository(remote[len("file:/"):], conn) localtip = self.revparse(branch) remotetip = repo.revparse(branch) if remotetip == localtip: return 0, 0 if remotetip == geogig.NULL_ID: log = self.log(branch) push = len(log) pull = 0 else: trackedbranchhead = self.revparse("refs/remotes/" + remotename + "/" + branch) log = self.log(branch, trackedbranchhead) push = len(log) log = repo.log(branch, trackedbranchhead) pull = len(log) return push, pull def mergemessage(self): return self.connector.mergemessage() def log(self, tip=None, sincecommit=None, until=None, since=None, path=None, n=None): tip = tip or geogig.HEAD if path is not None or tip != geogig.HEAD or n is not None or since is not None or until is not None or sincecommit is not None: return self.connector.log(_resolveref(tip), _resolveref(sincecommit), _resolveref(until), _resolveref(since), path, n) if self._logcache is None: self._logcache = self.connector.log(_resolveref(tip), _resolveref(sincecommit), _resolveref(until), _resolveref(since), path, n) return self._logcache def commitatdate(self, t): epoch = datetime.datetime.utcfromtimestamp(0) delta = t - epoch milisecs = int(delta.total_seconds()) * 1000 log = self.connector.log(geogig.HEAD, until=str(milisecs), n=1) if log: return log[0] else: raise GeoGigException("Invalid date for this repository") @property def trees(self): return self._trees() def _trees(self, ref=geogig.HEAD, path=None, recursive=False): return [e for e in self.children(ref, path, recursive) if isinstance(e, Tree)] def features(self, ref=geogig.HEAD, path=None, recursive=False): return [e for e in self.children(ref, path, recursive) if isinstance(e, Feature)] def children(self, ref=geogig.HEAD, path=None, recursive=False): return self.connector.children(_resolveref(ref), path, recursive) @property def branches(self): return self.connector.branches() @property def tags(self): tags = self.connector.tags() tags = {k: Tag(self, v, k) for k, v in tags.iteritems()} return tags def clone(self, path): url = self.url.replace('\\', '/') self.connector.clone(url, path) return Repository(path, self.connector.__class__(), False) def createbranch(self, ref, name, force=False, checkout=False): if checkout: self.cleancache() return self.connector.createbranch(_resolveref(ref), name, force, checkout) def deletebranch(self, name, remote=False): self.connector.deletebranch(name, remote) def createtag(self, ref, name, message): self.connector.createtag(_resolveref(ref), name, message) def deletetag(self, name): self.connector.deletetag(name) def diff(self, refa=geogig.HEAD, refb=geogig.WORK_HEAD, path=None): return self.connector.diff(_resolveref(refa), _resolveref(refb), path) def difftreestats(self, refa=geogig.HEAD, refb=geogig.WORK_HEAD): return self.connector.difftreestats(_resolveref(refa), _resolveref(refb)) def treediff(self, path, refa=geogig.HEAD, refb=geogig.WORK_HEAD): return self.connector.treediff(path, _resolveref(refa), _resolveref(refb)) def unstaged(self): return self.diff(geogig.STAGE_HEAD, geogig.WORK_HEAD) def staged(self): return self.diff(geogig.HEAD, geogig.STAGE_HEAD) def notindatabase(self): return self.diff(geogig.HEAD, geogig.WORK_HEAD) def conflicts(self): conflicts = {} _conflicts = self.connector.conflicts() for path, c in _conflicts.iteritems(): c = tuple(Feature(self, ref, path) for ref in c) conflicts[path] = c return conflicts def checkout(self, ref, paths=None, force=False): self.connector.checkout(_resolveref(ref), paths, force) self.cleancache() def updatepathtoref(self, ref, paths): ref = _resolveref(ref) for path in paths: self.connector.reset(ref, path=path) return self.connector.checkout(ref, paths) def solveconflict(self, path, attributes): self.reset(geogig.HEAD, path=path) self.insertfeature(path, attributes) self.add([path]) def solveconflicts(self, paths, version=geogig.OURS): self.connector.solveconflicts(paths, version) def add(self, paths=[]): self.connector.add(paths) def addandcommit(self, message, paths=[]): self.add(paths) return self.commit(message, paths)
BSD 3-Clause New or Revised License
justdoit0823/pywxclient
examples/thread_client.py
build_contact
python
def build_contact(client): contacts = client.get_contact() for user in contacts: _client_contacts[user['UserName']] = user _client_contacts[client.user['UserName']] = client.user
Get user WeChat contact.
https://github.com/justdoit0823/pywxclient/blob/9a61c4c0c26d6566e6121641ab37c35b176d8e20/examples/thread_client.py#L54-L60
import click import queue import sys import threading import time from logging import config, getLogger from pywxclient.core import Session, SyncClient, TextMessage, parse_message from pywxclient.core.exception import ( WaitScanQRCode, RequestError, APIResponseError, SessionExpiredError, UnsupportedMessage) LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'verbose': { 'format': ( '[%(levelname)1.1s %(asctime)s %(process)d %(module)s:' '%(lineno)d] %(message)s') }, 'simple': { 'format': '%(levelname)s %(message)s' }, }, 'filters': { }, 'handlers': { 'console_log': { 'level': 'DEBUG', 'filters': [], 'class': 'logging.FileHandler', 'filename': 'wechat_client.log', 'formatter': 'verbose' } }, 'loggers': { 'client': { 'handlers': ['console_log'], 'level': 'DEBUG' } } } _client_contacts = {}
Apache License 2.0
tdda/tdda
tdda/rexpy/rexpy.py
Categories.build_cat_map
python
def build_cat_map(self): self.code2cat = {} for k in self.__dict__: cat = self.__dict__[k] code = getattr(cat, 'code', None) if code: self.code2cat[code] = cat
Lazily builds (on first use) mapping from single-character category codes to Category Objects, stores in self.code2cat, which is used by __getitem__. e.g. 'N' --> self.ALPHANUMERIC 'X' --> self.Hex
https://github.com/tdda/tdda/blob/2148de2b042cc34ad33a66df3349c6d5e4a1500b/tdda/rexpy/rexpy.py#L264-L278
from __future__ import division from __future__ import print_function from __future__ import absolute_import from __future__ import unicode_literals import random import re import string import sys from collections import Counter, defaultdict, namedtuple, OrderedDict from pprint import pprint from tdda import __version__ isPython2 = sys.version_info[0] < 3 str_type = unicode if isPython2 else str bytes_type = str if isPython2 else bytes INT_ARRAY = b'i' if sys.version_info[0] < 3 else 'i' UNESCAPES = '''!"%',/:;<=>@_` ''' USAGE = re.sub(r'^(.*)Python API.*$', '', __doc__.replace('Usage::', 'Usage:')) MIN_MERGE_SIMILARITY = 0.5 TERMINATE = True N_ALIGNMENT_LEVELS = 1 MAX_GROUPS = 99 MAX_VRLE_RANGE = 2 VARIABLE_LENGTH_FRAGS = False VERBOSITY = 0 MAX_PATTERNS = None MIN_DIFF_STRINGS_PER_PATTERN = 1 MIN_STRINGS_PER_PATTERN = 1 USE_SAMPLING = False RE_FLAGS = re.UNICODE | re.DOTALL DIALECTS = ['perl'] class SIZE(object): if USE_SAMPLING: DO_ALL = 100 else: DO_ALL = 100000000 DO_ALL_EXCEPTIONS = 4000 N_PER_LENGTH = 64 MAX_SAMPLED_ATTEMPTS = 2 MAX_PUNC_IN_GROUP = 5 MAX_STRINGS_IN_GROUP = 10 nCalls = 0 memo = {} def cre(rex): global nCalls, memo nCalls += 1 c = memo.get(rex) if c: return c else: memo[rex] = c = re.compile(rex, RE_FLAGS) return c def terminated_cre(expr): return cre('^%s$' % expr) def terminated_re(expr): return '^%s$' % expr if TERMINATE: poss_term_cre = terminated_cre poss_term_re = terminated_re else: def poss_term_re(expr): return expr def poss_term_cre(expr): return cre(expr) class CODE(object): ANY = '?' PUNC = '.' class Category(object): def __init__(self, name, code, re_string): self.name = name self.code = code self.re_string = re_string self.re_single = poss_term_cre(re_string) self.re_multiple = poss_term_cre(re_string + '+') UNICHRS = True UNIC = 'Ḉ' COARSEST_ALPHANUMERIC_CODE = UNIC if UNICHRS else 'C' class Categories(object): escapableCodes = '.*?' def __init__(self, extra_letters=None, full_escape=False, dialect=None): if extra_letters: assert all(L in '_.-' for L in extra_letters) extra_letters = ''.join(e for e in '_.-' if e in extra_letters) el_re = extra_letters el_re_exc = '' if '_' in extra_letters else '_' else: el_re = '' el_re_exc = '_' el_re_inc = (extra_letters or '').replace('_', '') punctuation = self.PunctuationChars(el_re) self.dialect = dialect self.extra_letters = extra_letters or '' self.full_escape = full_escape self.LETTER = Category('LETTER', 'A', '[A-Z]') self.letter = Category('letter', 'a', '[a-z]') self.Letter = Category('Letter', 'L', '[A-Za-z]') self.ULetter = Category('ULetter', 'Ḹ', r'[^\W0-9_]') if extra_letters: self.LETTER_ = Category('LETTER_', 'B', '[A-Z%s]' % el_re) self.letter_ = Category('letter_', 'b', '[a-z%s]' % el_re) self.Letter_ = Category('Letter_', 'M', '[A-Za-z%s]' % el_re) if extra_letters == '_': self.ULetter_ = Category('ULetter_', 'Ṃ', r'[^\W0-9]') else: p = u_alpha_numeric_re(el_re_inc, el_re_exc, digits=False, dialect=dialect) self.ULetter_ = Category('ULetter_', 'Ṃ', p) ExtraLetterGroups = ['LETTER_', 'letter_', 'Letter_'] + ( ['ULetter_'] if UNICHRS else [] ) else: self.ULetter_ = Category('ULetter_', 'Ṃ', r'[^\W0-9_]') ExtraLetterGroups = [] self.Digit = Category('Digit', 'D', r'\d') self.hex = Category('hex', 'h', '[0-9a-f]') self.HEX = Category('HEX', 'H', '[0-9A-F]') self.Hex = Category('Hex', 'X', '[0-9a-fA-F]') self.ALPHANUMERIC = Category('ALPHANUMERIC', 'N', '[A-Z0-9%s]' % el_re) self.alphanumeric = Category('alphanumeric', 'n', '[a-z0-9%s]' % el_re) self.AlphaNumeric = Category('AlphaNumeric', 'C', '[A-Za-z0-9%s]' % el_re) self.UAlphaNumeric = Category('UAlphaNumeric', 'Ḉ', u_alpha_numeric_re(el_re_inc, el_re_exc, dialect=dialect)) self.Whitespace = Category('Whitespace', ' ', r'\s') self.Punctuation = Category('Punctuation', CODE.PUNC, escaped_bracket(punctuation, dialect=dialect)) self.Other = Category('Other', '*', r'[^!-~\s]') self.Any = Category('Any', CODE.ANY, '.') self.SpecificCoarseCats = [self.UAlphaNumeric if UNICHRS else self.AlphaNumeric, self.Whitespace, self.Punctuation] self.AllCoarseCats = self.SpecificCoarseCats + [self.Other] self.IncreasinglyGeneralAlphanumerics = [ 'Digit', 'LETTER', 'letter', 'Letter', ] + ( ['ULetter'] if UNICHRS else [] ) + ExtraLetterGroups + [ 'HEX', 'hex', 'Hex', 'ALPHANUMERIC', 'alphanumeric', 'AlphaNumeric', ] + ( ['UAlphaNumeric'] if UNICHRS else [] ) def PunctuationChars(self, el_re): specials = re.compile(r'[A-Za-z0-9\s%s]' % el_re, RE_FLAGS) return [chr(c) for c in range(32, 127) if not re.match(specials, chr(c))]
MIT License
hetida/hetida-designer
runtime/hetdesrun/runtime/engine/plain/workflow.py
Workflow.add_constant_providing_node
python
def add_constant_providing_node( self, values: List[NamedDataTypedValue], add_new_provider_node_to_workflow: bool = True, id_suffix: str = "", ) -> None: try: parsed_values = parse_dynamically_from_datatypes(values).dict() except ValidationError as e: raise WorkflowInputDataValidationError( "The provided data or some constant values could not be parsed into the " "respective workflow input datatypes" ) from e Const_Node = ComputationNode( func=lambda: parsed_values, inputs={}, operator_name="constant_provider", operator_hierarchical_id=self.operator_hierarchical_id + ":constant_provider" + "_" + id_suffix, ) if add_new_provider_node_to_workflow: self.sub_nodes.append(Const_Node) self.add_inputs({key: (Const_Node, key) for key in parsed_values.keys()})
Add a node with no inputs providing workflow input data
https://github.com/hetida/hetida-designer/blob/4a306ea855fb6f009f5180cf8befe09365c71fd8/runtime/hetdesrun/runtime/engine/plain/workflow.py#L336-L362
from typing import ( Protocol, Dict, Tuple, Any, List, Callable, Coroutine, Optional, Union, ) import logging from inspect import signature, Parameter from cached_property import cached_property from pydantic import ValidationError from hetdesrun.datatypes import NamedDataTypedValue, parse_dynamically_from_datatypes from hetdesrun.runtime import runtime_component_logger from hetdesrun.runtime.logging import execution_context_filter from hetdesrun.runtime.engine.plain.execution import run_func_or_coroutine from hetdesrun.runtime.exceptions import ( RuntimeExecutionError, CircularDependency, MissingOutputException, MissingInputSource, WorkflowInputDataValidationError, ) logger = logging.getLogger(__name__) logger.addFilter(execution_context_filter) runtime_component_logger.addFilter(execution_context_filter) class Node(Protocol): _in_computation: bool = False operator_hierarchical_id: str = "UNKNOWN" operator_name: str = "UNKNOWN" @cached_property async def result(self) -> Dict[str, Any]: ... def add_inputs(self, new_inputs: Dict[str, Tuple["Node", str]]) -> None: ... class ComputationNode: def __init__( self, func: Union[Coroutine, Callable], inputs: Optional[Dict[str, Tuple[Node, str]]] = None, operator_hierarchical_id: str = "UNKNOWN", component_id: str = "UNKNOWN", operator_name: str = "UNKNOWN", ) -> None: self.inputs: Dict[str, Tuple[Node, str]] = {} if inputs is not None: self.add_inputs(inputs) self.func = func self.required_params = self._infer_required_params() self._in_computation = False self.operator_hierarchical_id = operator_hierarchical_id self.operator_name = operator_name self.component_id = component_id self._in_computation = False def add_inputs(self, new_inputs: Dict[str, Tuple[Node, str]]) -> None: self.inputs.update(new_inputs) def _infer_required_params(self) -> List[str]: kwargable_params = [ param for param in signature(self.func).parameters.values() if ( param.kind == Parameter.POSITIONAL_OR_KEYWORD or param.kind == Parameter.KEYWORD_ONLY ) ] return [ param.name for param in kwargable_params if param.default is Parameter.empty ] def all_required_inputs_set(self) -> bool: return set(self.required_params).issubset(set(self.inputs.keys())) def _check_inputs(self) -> None: if not self.all_required_inputs_set(): logger.info("Computation node execution failed due to missing input source") raise MissingInputSource( f"Inputs of computation node operator {self.operator_hierarchical_id} are missing" ).set_context( node_instance_id=self.operator_hierarchical_id, component_uuid=self.component_id, ) async def _gather_data_from_inputs(self) -> Dict[str, Any]: input_value_dict: Dict[str, Any] = {} for (input_name, (another_node, output_name)) in self.inputs.items(): if another_node._in_computation: msg = ( f"Circular Dependency detected at operator {self.operator_hierarchical_id}" f" whith input '{input_name}' pointing to output '{output_name}'" f" of operator {another_node.operator_hierarchical_id}" ) logger.info(msg) raise CircularDependency(msg).set_context( node_instance_id=self.operator_hierarchical_id, component_uuid=self.component_id, ) try: input_value_dict[input_name] = (await another_node.result)[output_name] except KeyError as e: logger.info( "Execution failed due to missing output of a node", exc_info=True, ) raise MissingOutputException( "Could not obtain output result from another node while preparing to " "run operator" ).set_context( node_instance_id=self.operator_hierarchical_id, component_uuid=self.component_id, ) from e return input_value_dict async def _run_comp_func(self, input_values: Dict[str, Any]) -> Dict[str, Any]: try: function_result: Dict[str, Any] = await run_func_or_coroutine( self.func, input_values ) function_result = function_result if function_result is not None else {} except RuntimeExecutionError as e: e.set_context(self.operator_hierarchical_id, self.component_id) logger.info( ( "User raised Runtime execution exception during component execution" " of component operator %s with UUID %s with component UUID %s" ), self.operator_name, self.operator_hierarchical_id, self.component_id, exc_info=True, ) raise except Exception as e: logger.info( "Exception during Component execution of component instance %s", self.operator_hierarchical_id, exc_info=True, ) raise RuntimeExecutionError( f"Exception during Component execution of component instance" f" {self.operator_hierarchical_id} from component {self.component_id}: {str(e)}" ).set_context(self.operator_hierarchical_id, self.component_id) from e if not isinstance( function_result, dict ): msg = ( f"Component function of component instance {self.operator_hierarchical_id} from " f"component {self.component_id} did not return an output dict!" ) logger.info(msg) raise RuntimeExecutionError(msg).set_context( self.operator_hierarchical_id, self.component_id ) return function_result async def _compute_result(self) -> Dict[str, Any]: execution_context_filter.bind_context( currently_executed_instance_id=self.operator_hierarchical_id, currently_executed_component_id=self.component_id, currently_executed_component_node_name=self.operator_name, ) logger.info( "Starting computation for operator %s of type component with operator id %s", self.operator_name, self.operator_hierarchical_id, ) self._in_computation = True self._check_inputs() input_values = await self._gather_data_from_inputs() function_result = await self._run_comp_func(input_values) self._in_computation = False execution_context_filter.clear_context() return function_result @cached_property async def result(self) -> Dict[str, Any]: return await self._compute_result() class Workflow: def __init__( self, sub_nodes: List[Node], input_mappings: Dict[str, Tuple[Node, str]], output_mappings: Dict[ str, Tuple[Node, str] ], inputs: Optional[Dict[str, Tuple[Node, str]]] = None, operator_hierarchical_id: str = "UNKNOWN", operator_name: str = "UNKNOWN", ): self.sub_nodes = sub_nodes self.input_mappings = ( input_mappings ) self.output_mappings = ( output_mappings ) self.inputs: Dict[str, Tuple[Node, str]] = {} if inputs is not None: self.add_inputs(inputs) self._in_computation: bool = False self.operator_hierarchical_id = operator_hierarchical_id self.operator_name = operator_name def add_inputs(self, new_inputs: Dict[str, Tuple[Node, str]]) -> None: self.inputs.update(new_inputs) for key, (another_node, output_name) in new_inputs.items(): sub_node, sub_node_input_name = self.input_mappings[key] sub_node.add_inputs({sub_node_input_name: (another_node, output_name)})
MIT License
stevezheng23/sequence_labeling_tf
sequence_labeling/model/seq_crf.py
CharFeat.__init__
python
def __init__(self, vocab_size, embed_dim, unit_dim, window_size, activation, pooling_type, dropout, num_gpus=1, default_gpu_id=0, regularizer=None, random_seed=0, trainable=True, scope="char_feat"): self.vocab_size = vocab_size self.embed_dim = embed_dim self.unit_dim = unit_dim self.window_size = window_size self.activation = activation self.pooling_type = pooling_type self.dropout = dropout self.num_gpus = num_gpus self.default_gpu_id = default_gpu_id self.regularizer = regularizer self.random_seed = random_seed self.trainable = trainable self.scope = scope with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE): self.embedding_layer = create_embedding_layer(self.vocab_size, self.embed_dim, None, False, self.num_gpus, self.default_gpu_id, None, self.random_seed, self.trainable) self.conv_layer = create_convolution_layer("stacked_multi_1d", 1, self.embed_dim, self.unit_dim, self.window_size, 1, "SAME", self.activation, [0.0], None, False, False, True, self.num_gpus, self.default_gpu_id, self.regularizer, self.random_seed, self.trainable) self.dropout_layer = create_dropout_layer(self.dropout, self.num_gpus, self.default_gpu_id, self.random_seed) self.pooling_layer = create_pooling_layer(self.pooling_type, -1, 1, self.num_gpus, self.default_gpu_id)
initialize char-level featurization layer
https://github.com/stevezheng23/sequence_labeling_tf/blob/05fcbec15e359e3db86af6c3798c13be8a6c58ee/sequence_labeling/model/seq_crf.py#L485-L524
import collections import functools import os.path import operator import time import numpy as np import tensorflow as tf from functools import reduce from util.default_util import * from util.sequence_labeling_util import * from util.layer_util import * from model.base_model import * __all__ = ["SequenceCRF"] class SequenceCRF(BaseModel): def __init__(self, logger, hyperparams, data_pipeline, external_data, mode="train", scope="seq_crf"): super(SequenceCRF, self).__init__(logger=logger, hyperparams=hyperparams, data_pipeline=data_pipeline, external_data=external_data, mode=mode, scope=scope) with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): text_word = self.data_pipeline.input_text_word text_word_mask = self.data_pipeline.input_text_word_mask text_char = self.data_pipeline.input_text_char text_char_mask = self.data_pipeline.input_text_char_mask text_ext = None text_ext_mask = None label_inverted_index = self.data_pipeline.label_inverted_index self.word_vocab_size = self.data_pipeline.word_vocab_size self.char_vocab_size = self.data_pipeline.char_vocab_size self.sequence_length = tf.cast(tf.reduce_sum(text_word_mask, axis=[-1, -2]), dtype=tf.int32) """build graph for sequence crf model""" self.logger.log_print("# build graph") predict, predict_mask, transition_matrix = self._build_graph(text_word, text_word_mask, text_char, text_char_mask, text_ext, text_ext_mask) masked_predict = predict * predict_mask self.index_predict, _ = tf.contrib.crf.crf_decode(masked_predict, transition_matrix, self.sequence_length) self.text_predict = label_inverted_index.lookup(tf.cast(self.index_predict, dtype=tf.int64)) self.variable_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) self.variable_lookup = {v.op.name: v for v in self.variable_list} self.transferable_list = tf.get_collection(TRANSFERABLE_VARIABLES) self.transferable_lookup = {v.op.name: v for v in self.transferable_list} if self.hyperparams.train_ema_enable == True: self.ema = self._get_exponential_moving_average(self.global_step) self.variable_lookup = {self.ema.average_name(v): v for v in self.variable_list} self.transferable_lookup = {self.ema.average_name(v): v for v in self.transferable_list} if self.mode == "train": self.global_step = tf.get_variable("global_step", shape=[], dtype=tf.int32, initializer=tf.zeros_initializer, trainable=False) label = tf.squeeze(self.data_pipeline.input_label, axis=-1) label_mask = tf.squeeze(self.data_pipeline.input_label_mask, axis=-1) masked_label = tf.cast(label * label_mask, dtype=tf.int32) """compute optimization loss""" self.logger.log_print("# setup loss computation mechanism") self.train_loss = self._compute_loss(masked_label, masked_predict, self.sequence_length, transition_matrix) if self.hyperparams.train_regularization_enable == True: regularization_variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) regularization_loss = tf.contrib.layers.apply_regularization(self.regularizer, regularization_variables) self.train_loss = self.train_loss + regularization_loss """apply learning rate warm-up & decay""" self.logger.log_print("# setup initial learning rate mechanism") self.initial_learning_rate = tf.constant(self.hyperparams.train_optimizer_learning_rate) if self.hyperparams.train_optimizer_warmup_enable == True: self.logger.log_print("# setup learning rate warm-up mechanism") self.warmup_learning_rate = self._apply_learning_rate_warmup(self.initial_learning_rate) else: self.warmup_learning_rate = self.initial_learning_rate if self.hyperparams.train_optimizer_decay_enable == True: self.logger.log_print("# setup learning rate decay mechanism") self.decayed_learning_rate = self._apply_learning_rate_decay(self.warmup_learning_rate) else: self.decayed_learning_rate = self.warmup_learning_rate self.learning_rate = self.decayed_learning_rate """initialize optimizer""" self.logger.log_print("# setup training optimizer") self.optimizer = self._initialize_optimizer(self.learning_rate) """minimize optimization loss""" self.logger.log_print("# setup loss minimization mechanism") self.opt_op, self.clipped_gradients, self.gradient_norm = self._minimize_loss(self.train_loss) if self.hyperparams.train_ema_enable == True: with tf.control_dependencies([self.opt_op]): self.update_op = self.ema.apply(self.variable_list) self.variable_lookup = {self.ema.average_name(v): self.ema.average(v) for v in self.variable_list} else: self.update_op = self.opt_op """create train summary""" self.train_summary = self._get_train_summary() if self.mode == "online": if not tf.gfile.Exists(self.hyperparams.train_model_output_dir): tf.gfile.MakeDirs(self.hyperparams.train_model_output_dir) model_version = "{0}.{1}".format(self.hyperparams.train_model_version, time.time()) self.model_dir = os.path.join(self.hyperparams.train_model_output_dir, model_version) self.model_builder = tf.saved_model.builder.SavedModelBuilder(self.model_dir) """create checkpoint saver""" if not tf.gfile.Exists(self.hyperparams.train_ckpt_output_dir): tf.gfile.MakeDirs(self.hyperparams.train_ckpt_output_dir) self.ckpt_debug_dir = os.path.join(self.hyperparams.train_ckpt_output_dir, "debug") self.ckpt_epoch_dir = os.path.join(self.hyperparams.train_ckpt_output_dir, "epoch") self.ckpt_transfer_dir = os.path.join(self.hyperparams.train_ckpt_output_dir, "transfer") if not tf.gfile.Exists(self.ckpt_debug_dir): tf.gfile.MakeDirs(self.ckpt_debug_dir) if not tf.gfile.Exists(self.ckpt_epoch_dir): tf.gfile.MakeDirs(self.ckpt_epoch_dir) if not tf.gfile.Exists(self.ckpt_transfer_dir): tf.gfile.MakeDirs(self.ckpt_transfer_dir) self.ckpt_debug_name = os.path.join(self.ckpt_debug_dir, "model_debug_ckpt") self.ckpt_epoch_name = os.path.join(self.ckpt_epoch_dir, "model_epoch_ckpt") self.ckpt_debug_saver = tf.train.Saver(self.variable_lookup) self.ckpt_epoch_saver = tf.train.Saver(self.variable_lookup, max_to_keep=self.hyperparams.train_num_epoch) self.ckpt_transfer_saver = (tf.train.Saver(self.transferable_lookup) if any(self.transferable_lookup) else tf.train.Saver(self.variable_lookup)) def _build_representation_layer(self, text_word, text_word_mask, text_char, text_char_mask, text_ext, text_ext_mask): word_embed_dim = self.hyperparams.model_word_embed_dim word_dropout = self.hyperparams.model_word_dropout if self.mode == "train" else 0.0 word_embed_pretrained = self.hyperparams.model_word_embed_pretrained word_feat_trainable = self.hyperparams.model_word_feat_trainable word_feat_enable = self.hyperparams.model_word_feat_enable char_embed_dim = self.hyperparams.model_char_embed_dim char_unit_dim = self.hyperparams.model_char_unit_dim char_window_size = self.hyperparams.model_char_window_size char_hidden_activation = self.hyperparams.model_char_hidden_activation char_dropout = self.hyperparams.model_char_dropout if self.mode == "train" else 0.0 char_pooling_type = self.hyperparams.model_char_pooling_type char_feat_trainable = self.hyperparams.model_char_feat_trainable char_feat_enable = self.hyperparams.model_char_feat_enable ext_embed_dim = self.hyperparams.model_ext_embed_dim ext_feat_enable = self.hyperparams.model_ext_feat_enable ext_feat_mode = self.hyperparams.model_ext_feat_mode fusion_type = self.hyperparams.model_fusion_type fusion_num_layer = self.hyperparams.model_fusion_num_layer fusion_unit_dim = self.hyperparams.model_fusion_unit_dim fusion_hidden_activation = self.hyperparams.model_fusion_hidden_activation fusion_dropout = self.hyperparams.model_fusion_dropout if self.mode == "train" else 0.0 fusion_trainable = self.hyperparams.model_fusion_trainable with tf.variable_scope("representation", reuse=tf.AUTO_REUSE): text_feat_list = [] text_feat_mask_list = [] if word_feat_enable == True: self.logger.log_print("# build word-level representation layer") word_feat_layer = WordFeat(vocab_size=self.word_vocab_size, embed_dim=word_embed_dim, dropout=word_dropout, pretrained=word_embed_pretrained, embed_data=self.word_embedding, num_gpus=self.num_gpus, default_gpu_id=self.default_gpu_id, regularizer=self.regularizer, random_seed=self.random_seed, trainable=word_feat_trainable) (text_word_feat, text_word_feat_mask) = word_feat_layer(text_word, text_word_mask) text_feat_list.append(text_word_feat) text_feat_mask_list.append(text_word_feat_mask) word_unit_dim = word_embed_dim else: word_unit_dim = 0 if char_feat_enable == True: self.logger.log_print("# build char-level representation layer") char_feat_layer = CharFeat(vocab_size=self.char_vocab_size, embed_dim=char_embed_dim, unit_dim=char_unit_dim, window_size=char_window_size, activation=char_hidden_activation, pooling_type=char_pooling_type, dropout=char_dropout, num_gpus=self.num_gpus, default_gpu_id=self.default_gpu_id, regularizer=self.regularizer, random_seed=self.random_seed, trainable=char_feat_trainable) (text_char_feat, text_char_feat_mask) = char_feat_layer(text_char, text_char_mask) text_feat_list.append(text_char_feat) text_feat_mask_list.append(text_char_feat_mask) else: char_unit_dim = 0 if ext_feat_enable == True and ext_feat_mode == "fusion": self.logger.log_print("# build extended representation layer") text_feat_list.append(text_ext) text_feat_mask_list.append(text_ext_mask) ext_unit_dim = ext_embed_dim else: ext_unit_dim = 0 feat_unit_dim = word_unit_dim + char_unit_dim + ext_unit_dim feat_fusion_layer = FusionModule(input_unit_dim=feat_unit_dim, output_unit_dim=fusion_unit_dim, fusion_type=fusion_type, num_layer=fusion_num_layer, activation=fusion_hidden_activation, dropout=fusion_dropout, num_gpus=self.num_gpus, default_gpu_id=self.default_gpu_id, regularizer=self.regularizer, random_seed=self.random_seed, trainable=fusion_trainable) text_feat, text_feat_mask = feat_fusion_layer(text_feat_list, text_feat_mask_list) return text_feat, text_feat_mask def _build_modeling_layer(self, text_feat, text_feat_mask, text_ext, text_ext_mask): ext_embed_dim = self.hyperparams.model_ext_embed_dim ext_feat_enable = self.hyperparams.model_ext_feat_enable ext_feat_mode = self.hyperparams.model_ext_feat_mode sequence_num_layer = self.hyperparams.model_sequence_num_layer sequence_unit_dim = self.hyperparams.model_sequence_unit_dim sequence_cell_type = self.hyperparams.model_sequence_cell_type sequence_hidden_activation = self.hyperparams.model_sequence_hidden_activation sequence_dropout = self.hyperparams.model_sequence_dropout if self.mode == "train" else 0.0 sequence_forget_bias = self.hyperparams.model_sequence_forget_bias sequence_residual_connect = self.hyperparams.model_sequence_residual_connect sequence_trainable = self.hyperparams.model_sequence_trainable labeling_unit_dim = self.hyperparams.model_labeling_unit_dim labeling_dropout = self.hyperparams.model_labeling_dropout labeling_trainable = self.hyperparams.model_labeling_trainable labeling_transferable = self.hyperparams.model_labeling_transferable with tf.variable_scope("modeling", reuse=tf.AUTO_REUSE): self.logger.log_print("# build sequence modeling layer") sequence_modeling_layer = create_recurrent_layer("bi", sequence_num_layer, sequence_unit_dim, sequence_cell_type, sequence_hidden_activation, sequence_dropout, sequence_forget_bias, sequence_residual_connect, None, self.num_gpus, self.default_gpu_id, self.random_seed, sequence_trainable) (text_sequence_modeling, text_sequence_modeling_mask, _, _) = sequence_modeling_layer(text_feat, text_feat_mask) if labeling_transferable == False: pre_labeling_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if ext_feat_enable == True and ext_feat_mode == "direct": text_sequence_modeling = tf.concat([text_sequence_modeling, text_ext], axis=-1) text_sequence_modeling_mask = tf.reduce_max(tf.concat( [text_sequence_modeling_mask, text_ext_mask], axis=-1), axis=-1, keepdims=True) labeling_modeling_layer = create_dense_layer("single", 1, labeling_unit_dim, 1, "", [labeling_dropout], None, False, False, True, self.num_gpus, self.default_gpu_id, self.regularizer, self.random_seed, labeling_trainable) (text_labeling_modeling, text_labeling_modeling_mask) = labeling_modeling_layer(text_sequence_modeling, text_sequence_modeling_mask) text_modeling = text_labeling_modeling text_modeling_mask = text_labeling_modeling_mask weight_initializer = create_variable_initializer("glorot_uniform", self.random_seed) text_modeling_matrix = tf.get_variable("transition_matrix", shape=[labeling_unit_dim, labeling_unit_dim], initializer=weight_initializer, regularizer=self.regularizer, trainable=labeling_trainable, dtype=tf.float32) if labeling_transferable == False: post_labeling_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) [tf.add_to_collection(TRANSFERABLE_VARIABLES, v) for v in post_labeling_variables if v in pre_labeling_variables] return text_modeling, text_modeling_mask, text_modeling_matrix def _build_graph(self, text_word, text_word_mask, text_char, text_char_mask, text_ext, text_ext_mask): with tf.variable_scope("graph", reuse=tf.AUTO_REUSE): text_feat, text_feat_mask = self._build_representation_layer(text_word, text_word_mask, text_char, text_char_mask, text_ext, text_ext_mask) """build modeling layer for sequence crf model""" (text_modeling, text_modeling_mask, text_modeling_matrix) = self._build_modeling_layer(text_feat, text_feat_mask, text_ext, text_ext_mask) predict = text_modeling predict_mask = text_modeling_mask transition_matrix = text_modeling_matrix return predict, predict_mask, transition_matrix def _compute_loss(self, label, predict, sequence_length, transition_matrix): log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(predict, label, sequence_length, transition_matrix) loss = tf.reduce_mean(-1.0 * log_likelihood) return loss def build(self, sess): external_index_enable = self.hyperparams.data_external_index_enable if external_index_enable == True: input_word = tf.saved_model.utils.build_tensor_info(self.data_pipeline.input_word_placeholder) input_char = tf.saved_model.utils.build_tensor_info(self.data_pipeline.input_char_placeholder) output_predict = tf.saved_model.utils.build_tensor_info(self.index_predict) output_sequence_length = tf.saved_model.utils.build_tensor_info(self.sequence_length) predict_signature = (tf.saved_model.signature_def_utils.build_signature_def( inputs={ 'input_word': input_word, 'input_char': input_char, }, outputs={ 'output_predict': output_predict, 'output_sequence_length': output_sequence_length }, method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)) else: input_text = tf.saved_model.utils.build_tensor_info(self.data_pipeline.input_text_placeholder) output_predict = tf.saved_model.utils.build_tensor_info(self.text_predict) output_sequence_length = tf.saved_model.utils.build_tensor_info(self.sequence_length) predict_signature = (tf.saved_model.signature_def_utils.build_signature_def( inputs={ 'input_text': input_text }, outputs={ 'output_predict': output_predict, 'output_sequence_length': output_sequence_length }, method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)) self.model_builder.add_meta_graph_and_variables( sess, [tf.saved_model.tag_constants.SERVING], signature_def_map={ tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: predict_signature }, clear_devices=True, main_op=tf.tables_initializer()) self.model_builder.save(as_text=False) def save(self, sess, global_step, save_mode): if save_mode == "debug": self.ckpt_debug_saver.save(sess, self.ckpt_debug_name, global_step=global_step) elif save_mode == "epoch": self.ckpt_epoch_saver.save(sess, self.ckpt_epoch_name, global_step=global_step) else: raise ValueError("unsupported save mode {0}".format(save_mode)) def restore(self, sess, ckpt_file, ckpt_type): if ckpt_file is None: raise FileNotFoundError("checkpoint file doesn't exist") if ckpt_type == "debug": self.ckpt_debug_saver.restore(sess, ckpt_file) elif ckpt_type == "epoch": self.ckpt_epoch_saver.restore(sess, ckpt_file) elif ckpt_type == "transfer": self.ckpt_transfer_saver.restore(sess, ckpt_file) else: raise ValueError("unsupported checkpoint type {0}".format(ckpt_type)) def get_latest_ckpt(self, ckpt_type): if ckpt_type == "debug": ckpt_file = tf.train.latest_checkpoint(self.ckpt_debug_dir) elif ckpt_type == "epoch": ckpt_file = tf.train.latest_checkpoint(self.ckpt_epoch_dir) elif ckpt_type == "transfer": ckpt_file = tf.train.latest_checkpoint(self.ckpt_transfer_dir) else: raise ValueError("unsupported checkpoint type {0}".format(ckpt_type)) if ckpt_file is None: raise FileNotFoundError("latest checkpoint file doesn't exist") return ckpt_file def get_ckpt_list(self, ckpt_type): if ckpt_type == "debug": ckpt_state = tf.train.get_checkpoint_state(self.ckpt_debug_dir) elif ckpt_type == "epoch": ckpt_state = tf.train.get_checkpoint_state(self.ckpt_epoch_dir) else: raise ValueError("unsupported checkpoint type {0}".format(ckpt_type)) if ckpt_state is None: raise FileNotFoundError("checkpoint files doesn't exist") return ckpt_state.all_model_checkpoint_paths class WordFeat(object): def __init__(self, vocab_size, embed_dim, dropout, pretrained, embed_data=None, num_gpus=1, default_gpu_id=0, regularizer=None, random_seed=0, trainable=True, scope="word_feat"): self.vocab_size = vocab_size self.embed_dim = embed_dim self.dropout = dropout self.pretrained = pretrained self.embed_data = embed_data self.num_gpus = num_gpus self.default_gpu_id = default_gpu_id self.regularizer = regularizer self.random_seed = random_seed self.trainable = trainable self.scope = scope with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE): self.embedding_layer = create_embedding_layer(self.vocab_size, self.embed_dim, self.embed_data, self.pretrained, self.num_gpus, self.default_gpu_id, None, self.random_seed, self.trainable) self.dropout_layer = create_dropout_layer(self.dropout, self.num_gpus, self.default_gpu_id, self.random_seed) def __call__(self, input_word, input_word_mask): with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE): input_word_embedding_mask = input_word_mask input_word_embedding = tf.squeeze(self.embedding_layer(input_word), axis=-2) (input_word_dropout, input_word_dropout_mask) = self.dropout_layer(input_word_embedding, input_word_embedding_mask) input_word_feat = input_word_dropout input_word_feat_mask = input_word_dropout_mask return input_word_feat, input_word_feat_mask class CharFeat(object):
Apache License 2.0
hkust-knowcomp/aser
aser/client/__init__.py
ASERClient.fetch_related_eventualities
python
def fetch_related_eventualities(self, data): if isinstance(data, str): data = data.encode("utf-8") else: data = data.eid.encode("utf-8") request_id = self._send(ASERCmd.fetch_related_eventualities, data) msg = self._recv(request_id) return [ (Eventuality().decode(e_encoded, encoding=None), Relation().decode(r_encoded, encoding=None)) for e_encoded, r_encoded in msg ]
Fetch all related eventualities of the given eventuality :param data: the given eventuality or eid :type data: Union[str, aser.eventuality.Eventuality] :return: all related eventualities associated with corresponding relations :rtype: List[Tuple[aser.eventuality.Eventuality, aser.relation.Relation]]
https://github.com/hkust-knowcomp/aser/blob/d25552bdc0c36ed045f84edd83fd79b4158230bb/aser/client/__init__.py#L282-L300
import time import uuid import zmq import json from functools import wraps from ..concept import ASERConcept from ..eventuality import Eventuality from ..relation import Relation from ..utils.config import ASERCmd, ASERError class ASERClient(object): def __init__(self, ip="localhost", port=8000, port_out=8001, timeout=-1): self.client_id = str(uuid.uuid4()).encode("utf-8") context = zmq.Context() self.sender = context.socket(zmq.PUSH) self.sender.setsockopt(zmq.LINGER, 0) self.sender.connect("tcp://{}:{}".format(ip, port)) self.receiver = context.socket(zmq.SUB) self.receiver.setsockopt(zmq.LINGER, 0) self.receiver.setsockopt(zmq.SUBSCRIBE, self.client_id) self.receiver.connect("tcp://{}:{}".format(ip, port_out)) self.request_num = 0 self.timeout = timeout time.sleep(1) def close(self): self.sender.close() self.receiver.close() def _timeout(func): @wraps(func) def arg_wrapper(self, *args, **kw): if 'blocking' in kw and not kw['blocking']: self.receiver.setsockopt(zmq.RCVTIMEO, -1) else: self.receiver.setsockopt(zmq.RCVTIMEO, self.timeout) try: return func(self, *args, **kw) except zmq.error.Again as _e: t_e = TimeoutError( 'no response from the server (with "timeout"=%d ms), please check the following:' 'is the server still online? is the network broken? are "port" and "port_out" correct? ' 'are you encoding a huge amount of data whereas the timeout is too small for that?' % self.timeout ) raise t_e finally: self.receiver.setsockopt(zmq.RCVTIMEO, -1) return arg_wrapper def _send(self, cmd, data): request_id = b"%d" % self.request_num self.sender.send_multipart([self.client_id, request_id, cmd, data]) self.request_num += 1 return request_id @_timeout def _recv(self, request_id): try: while True: response = self.receiver.recv_multipart() if len(response) > 1: if response[1] == request_id: msg = json.loads(response[-1].decode(encoding="utf-8")) if isinstance(msg, str) and msg.startswith(ASERError): msg = msg[len(ASERError):-1] start_idx = msg.index("(") if msg[:start_idx] == "ValueError": raise ValueError(msg[start_idx + 1:]) elif msg[:start_idx] == "TimeoutError": raise TimeoutError(msg[start_idx + 1:]) elif msg[:start_idx] == "AttributeError": raise AttributeError(msg[start_idx + 1:]) return msg else: return [] except BaseException as e: raise e def parse_text(self, text): if not isinstance(text, str): raise ValueError("Error: the input of parse_text should be a raw text.") text = text.encode("utf-8") request_id = self._send(ASERCmd.parse_text, text) msg = self._recv(request_id) if not msg: return None return msg def extract_eventualities(self, data): if isinstance(data, str): data = data.encode("utf-8") else: data = json.dumps(data).encode("utf-8") request_id = self._send(ASERCmd.extract_eventualities, data) msg = self._recv(request_id) if not msg: return None ret_data = [] for sent_eventualities in msg: rst = list() for e_encoded in sent_eventualities: eventuality = Eventuality().decode(e_encoded, encoding=None) rst.append(eventuality) ret_data.append(rst) return ret_data def extract_relations(self, data): if isinstance(data, str): data = data.encode("utf-8") else: if len(data) == 2: data = [ data[0], [[e.encode(encoding=None) for e in sent_eventualities] for sent_eventualities in data[1]] ] data = json.dumps(data).encode("utf-8") else: raise ValueError("Error: your message should be text or (para_parsed_result, para_eventualities).") request_id = self._send(ASERCmd.extract_relations, data) msg = self._recv(request_id) if not msg: return None ret_data = [] for sent_relations in msg: rst = list() for r_encoded in sent_relations: relation = Relation().decode(r_encoded, encoding=None) rst.append(relation) ret_data.append(rst) return ret_data def extract_eventualities_and_relations(self, data): if isinstance(data, str): data = data.encode("utf-8") else: data = json.dumps(data).encode("utf-8") request_id = self._send(ASERCmd.extract_eventualities_and_relations, data) msg = self._recv(request_id) if not msg: return None ret_eventualities, ret_relations = [], [] for sent_eventualities, sent_relations in msg: rst_eventualities, rst_relations = list(), list() for e_encoded in sent_eventualities: eventuality = Eventuality().decode(e_encoded, encoding=None) rst_eventualities.append(eventuality) ret_eventualities.append(rst_eventualities) for r_encoded in sent_relations: relation = Relation().decode(r_encoded, encoding=None) rst_relations.append(relation) ret_relations.append(rst_relations) return ret_eventualities, ret_relations def conceptualize_eventuality(self, eventuality): request_id = self._send(ASERCmd.conceptualize_eventuality, eventuality.encode("utf-8")) msg = self._recv(request_id) if not msg: return None ret_data = list() for c_encoded, score in msg: concept = ASERConcept().decode(c_encoded, encoding=None) ret_data.append((concept, score)) return ret_data def exact_match_eventuality(self, data): if isinstance(data, str): data = data.encode("utf-8") else: data = data.eid.encode("utf-8") request_id = self._send(ASERCmd.exact_match_eventuality, data) msg = self._recv(request_id) if msg == ASERCmd.none: return None else: return Eventuality().decode(msg, encoding=None) def predict_eventuality_relation(self, eventuality1, eventuality2): if isinstance(eventuality1, str): hid = eventuality1 else: hid = eventuality1.eid if isinstance(eventuality2, str): tid = eventuality2 else: tid = eventuality2.eid rid = Relation.generate_rid(hid, tid).encode("utf-8") request_id = self._send(ASERCmd.exact_match_eventuality_relation, rid) msg = self._recv(request_id) if msg == ASERCmd.none: return None else: return Relation().decode(msg, encoding=None)
MIT License
morganstanley/testplan
testplan/common/utils/sockets/server.py
Server.bind
python
def bind(self): self._server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if self._input_port != 0: self._server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._server.bind((self._input_host, self._input_port)) self._ip, self._port = self._server.getsockname()
Bind to a socket.
https://github.com/morganstanley/testplan/blob/8cb6a0ed0682698b2d6af82382fbb66d8d9e3ff7/testplan/common/utils/sockets/server.py#L65-L71
import time import socket import select import threading from testplan.common.utils.timing import wait class Server(object): def __init__(self, host="localhost", port=0, listen=1): self._input_host = host self._input_port = port self._listen = listen self._ip = None self._port = None self._listening = False self._server = None self._server_thread = None self._lock = threading.Lock() self._connection_by_fd = {} self._fds = {} self.active_connections = 0 self.accepted_connections = 0 @property def host(self): return self._input_host @property def ip(self): return self._ip @property def port(self): return self._port @property def socket(self): return self._server
Apache License 2.0
arx-game/arxcode
typeclasses/wearable/wearable.py
Wearable.at_before_move
python
def at_before_move(self, destination, **kwargs): caller = kwargs.get("caller", None) if caller and self.is_worn: caller.msg("%s is currently worn and cannot be moved." % self) return False return super(Wearable, self).at_before_move(destination, **kwargs)
Checks if the object can be moved
https://github.com/arx-game/arxcode/blob/5299f1f75c4ee5ee19e1a26195aa24832f7ca817/typeclasses/wearable/wearable.py#L53-L59
from typeclasses.objects import Object from time import time from typeclasses.containers.container import Container from world.fashion.mixins import FashionableMixins from typeclasses.exceptions import EquipError from world.crafting.craft_data_handlers import WearableDataHandler class Wearable(FashionableMixins, Object): item_data_class = WearableDataHandler default_desc = "A piece of clothing or armor." baseval_scaling_divisor = 10.0 default_scaling = 0.2 default_currently_worn = False default_worn_time = 0.0 def at_object_creation(self): self.is_worn = False self.at_init() def softdelete(self): if self.is_worn: wearer = self.location self.is_worn = False self.at_post_remove(wearer) modi = self.modusornamenta_set.all() for mo in modi: outfit = mo.fashion_outfit mo.delete() if outfit.pk: outfit.invalidate_outfit_caches() outfit.check_existence() self.invalidate_snapshots_cache() super(Wearable, self).softdelete()
MIT License
jest-community/jest-pytest
src/__tests__/integration/home-assistant/homeassistant/components/sensor/uk_transport.py
UkTransportSensor._do_api_request
python
def _do_api_request(self, params): request_params = dict({ 'app_id': self._api_app_id, 'app_key': self._api_app_key, }, **params) response = requests.get(self._url, params=request_params) if response.status_code != 200: _LOGGER.warning('Invalid response from API') elif 'error' in response.json(): if 'exceeded' in response.json()['error']: self._state = 'Usage limits exceeded' if 'invalid' in response.json()['error']: self._state = 'Credentials invalid' else: self._data = response.json()
Perform an API request.
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/homeassistant/components/sensor/uk_transport.py#L123-L139
import logging import re from datetime import datetime, timedelta import requests import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import CONF_MODE from homeassistant.helpers.entity import Entity from homeassistant.util import Throttle _LOGGER = logging.getLogger(__name__) ATTR_ATCOCODE = 'atcocode' ATTR_LOCALITY = 'locality' ATTR_STOP_NAME = 'stop_name' ATTR_REQUEST_TIME = 'request_time' ATTR_NEXT_BUSES = 'next_buses' ATTR_STATION_CODE = 'station_code' ATTR_CALLING_AT = 'calling_at' ATTR_NEXT_TRAINS = 'next_trains' CONF_API_APP_KEY = 'app_key' CONF_API_APP_ID = 'app_id' CONF_QUERIES = 'queries' CONF_ORIGIN = 'origin' CONF_DESTINATION = 'destination' _QUERY_SCHEME = vol.Schema({ vol.Required(CONF_MODE): vol.All(cv.ensure_list, [vol.In(list(['bus', 'train']))]), vol.Required(CONF_ORIGIN): cv.string, vol.Required(CONF_DESTINATION): cv.string, }) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_API_APP_ID): cv.string, vol.Required(CONF_API_APP_KEY): cv.string, vol.Required(CONF_QUERIES): [_QUERY_SCHEME], }) def setup_platform(hass, config, add_devices, discovery_info=None): sensors = [] number_sensors = len(config.get(CONF_QUERIES)) interval = timedelta(seconds=87*number_sensors) for query in config.get(CONF_QUERIES): if 'bus' in query.get(CONF_MODE): stop_atcocode = query.get(CONF_ORIGIN) bus_direction = query.get(CONF_DESTINATION) sensors.append( UkTransportLiveBusTimeSensor( config.get(CONF_API_APP_ID), config.get(CONF_API_APP_KEY), stop_atcocode, bus_direction, interval)) elif 'train' in query.get(CONF_MODE): station_code = query.get(CONF_ORIGIN) calling_at = query.get(CONF_DESTINATION) sensors.append( UkTransportLiveTrainTimeSensor( config.get(CONF_API_APP_ID), config.get(CONF_API_APP_KEY), station_code, calling_at, interval)) add_devices(sensors, True) class UkTransportSensor(Entity): TRANSPORT_API_URL_BASE = "https://transportapi.com/v3/uk/" ICON = 'mdi:train' def __init__(self, name, api_app_id, api_app_key, url): self._data = {} self._api_app_id = api_app_id self._api_app_key = api_app_key self._url = self.TRANSPORT_API_URL_BASE + url self._name = name self._state = None @property def name(self): return self._name @property def state(self): return self._state @property def unit_of_measurement(self): return "min" @property def icon(self): return self.ICON
MIT License
pioreactor/pioreactor
pioreactor/utils/streaming_calculations.py
CultureGrowthEKF._jacobian_observation
python
def _jacobian_observation(self): import numpy as np n = self.n_sensors J = np.zeros((n, 3)) J[:, 0] = 1 return J
measurement model is: m_{1,t} = g1(OD_{t-1}) m_{2,t} = g2(OD_{t-1}) ... gi are generic functions. Often they are the identity function, but if using a 180deg sensor then it would be the inverse function. One day it could model saturation, too. jac(h) = [ [1, 0, 0], [1, 0, 0], ... ]
https://github.com/pioreactor/pioreactor/blob/1f856c7f2e472e865460193ea5db15767fa0f3cd/pioreactor/utils/streaming_calculations.py#L302-L325
from json import dumps from typing import Optional from threading import Timer from pioreactor.pubsub import publish class ExponentialMovingAverage: def __init__(self, alpha: float): self.value: Optional[float] = None self.alpha = alpha def update(self, new_value: float) -> float: if self.value is None: self.value = new_value else: self.value = (1 - self.alpha) * new_value + self.alpha * self.value return self.value def __call__(self): return self.value class CultureGrowthEKF: def __init__( self, initial_state, initial_covariance, process_noise_covariance, observation_noise_covariance, ): import numpy as np initial_state = np.asarray(initial_state) assert initial_state.shape[0] == 3 assert ( initial_state.shape[0] == initial_covariance.shape[0] == initial_covariance.shape[1] ), f"Shapes are not correct,{initial_state.shape[0]}, {initial_covariance.shape[0]}, {initial_covariance.shape[1]}" assert process_noise_covariance.shape == initial_covariance.shape assert self._is_positive_definite(process_noise_covariance) assert self._is_positive_definite(initial_covariance) assert self._is_positive_definite(observation_noise_covariance) self.process_noise_covariance = process_noise_covariance self.observation_noise_covariance = observation_noise_covariance self.state_ = initial_state self.covariance_ = initial_covariance self.n_sensors = observation_noise_covariance.shape[0] self.n_states = initial_state.shape[0] self._currently_scaling_covariance = False self._currently_scaling_process_covariance = False self._scale_covariance_timer = None self._covariance_pre_scale = None def predict(self, dt): return ( self._predict_state(self.state_, self.covariance_, dt), self._predict_covariance(self.state_, self.covariance_, dt), ) def update(self, observation, dt): import numpy as np observation = np.asarray(observation) assert observation.shape[0] == self.n_sensors, (observation, self.n_sensors) state_prediction, covariance_prediction = self.predict(dt) residual_state = observation - state_prediction[:-2] H = self._jacobian_observation() residual_covariance = ( H @ covariance_prediction @ H.T + self.state_[0] * self.observation_noise_covariance ) kalman_gain_ = np.linalg.solve( residual_covariance.T, (H @ covariance_prediction.T) ).T self.state_ = state_prediction + kalman_gain_ @ residual_state self.covariance_ = ( np.eye(self.n_states) - kalman_gain_ @ H ) @ covariance_prediction return def scale_OD_variance_for_next_n_seconds(self, factor, seconds): import numpy as np def reverse_scale_covariance(): self._currently_scaling_covariance = False self.covariance_ = self._covariance_pre_scale self._covariance_pre_scale = None def forward_scale_covariance(): if not self._currently_scaling_covariance: self._covariance_pre_scale = self.covariance_.copy() self._currently_scaling_covariance = True self.covariance_ = np.diag(self._covariance_pre_scale.diagonal()) self.covariance_[0] *= factor def forward_scale_process_covariance(): if not self._currently_scaling_process_covariance: self._dummy = self.process_noise_covariance[2, 2] self._currently_scaling_process_covariance = True self.process_noise_covariance[0, 0] = 1e-7 * self.state_[0] self.process_noise_covariance[2, 2] = 0 def reverse_scale_process_covariance(): self._currently_scaling_process_covariance = False self.process_noise_covariance[0, 0] = 0 self.process_noise_covariance[2, 2] = self._dummy if self._currently_scaling_covariance: self._scale_covariance_timer.cancel() if self._currently_scaling_process_covariance: self._scale_process_covariance_timer.cancel() self._scale_covariance_timer = Timer(seconds, reverse_scale_covariance) self._scale_covariance_timer.daemon = True self._scale_covariance_timer.start() self._scale_process_covariance_timer = Timer( 2.5 * seconds, reverse_scale_process_covariance ) self._scale_process_covariance_timer.daemon = True self._scale_process_covariance_timer.start() forward_scale_covariance() forward_scale_process_covariance() def _predict_state(self, state, covariance, dt): import numpy as np od, rate, acc = state return np.array([od * np.exp(rate * dt), rate + acc * dt, acc]) def _predict_covariance(self, state, covariance, dt): jacobian = self._jacobian_process(state, dt) return jacobian @ covariance @ jacobian.T + self.process_noise_covariance def _jacobian_process(self, state, dt): import numpy as np J = np.zeros((3, 3)) od, rate, acc = state J[0, 0] = np.exp(rate * dt) J[1, 1] = 1 J[2, 2] = 1 J[0, 1] = od * np.exp(rate * dt) * dt J[1, 2] = dt return J
MIT License
google-research/pyreach
pyreach/impl/arm_impl.py
_SetAnalogOut.to_reach_script
python
def to_reach_script( self, arm_type: arm.ArmType, support_vacuum: bool, support_blowoff: bool, ik_lib: Optional[Union[ikfast.IKFast, ik_pybullet.IKPybullet]], ik_hints: Dict[int, List[float]], state: arm.ArmState, arm_origin: Optional[np.ndarray], tip_adjust_transform: Optional[np.ndarray], ) -> List[types_gen.ReachScriptCommand]: return [ types_gen.ReachScriptCommand( controller_name=self.controller_name, set_analog_out=types_gen.SetAnalogOutArgs( output=self._output, value=self._value)) ]
Convert a SetAnalogOut into some ReachScript commands. Args: arm_type: The type of arm to use. support_vacuum: True if vacuum is supported. support_blowoff: True if blowoff is supported. ik_lib: An optional inverse kinematics object. ik_hints: The ik hints. state: The arm state. arm_origin: The origin of the arm. tip_adjust_transform: The transform of the adjusted tip. Returns: A list Reach Commands to perform the translation.
https://github.com/google-research/pyreach/blob/83cac8e235ba1392dcdc6b8d19202c3eff3ad9a6/pyreach/impl/arm_impl.py#L677-L708
import enum import json import logging import threading from typing import Callable, Dict, List, Optional, Set, Tuple, Union, cast import numpy as np from pyreach import arm from pyreach import calibration from pyreach import constraints from pyreach import core from pyreach import internal from pyreach.common.base import transform_util from pyreach.common.python import types_gen from pyreach.ik_pybullet import ik_pybullet from pyreach.ikfast import ikfast from pyreach.impl import actions_impl from pyreach.impl import calibration_impl from pyreach.impl import constraints_impl from pyreach.impl import device_base from pyreach.impl import requester from pyreach.impl import thread_util from pyreach.impl import utils from pyreach.common.proto_gen import workcell_io_pb2 as workcell_io class ActionVacuumState(enum.Enum): OFF = 0 VACUUM = 1 BLOWOFF = 2 class ArmTypeImpl(arm.ArmType): _urdf_file: str _joint_count: int def __init__(self, urdf_file: str, joint_count: int): self._urdf_file = urdf_file self._joint_count = joint_count @property def urdf_file(self) -> str: return self._urdf_file @property def joint_count(self) -> int: return self._joint_count @classmethod def from_urdf_file(cls, urdf_file: str) -> arm.ArmType: if urdf_file in { "ur5.urdf", "ur5e.urdf", "ur5e.urdf", "ur10e.urdf", "lrmate200ic.urdf", "lrmate200id.urdf", "FanucCR7ia.urdf", "FanucLrmate200id7l.urdf", "FanucR2000ia165f.urdf", "XArm6.urdf" }: return ArmTypeImpl(urdf_file, 6) raise ValueError("invalid URDF file: " + urdf_file) class _Command: def __init__(self, controller_name: str) -> None: self._controller_name = controller_name @property def controller_name(self) -> str: return self._controller_name def to_reach_script( self, arm_type: arm.ArmType, support_vacuum: bool, support_blowoff: bool, ik_lib: Optional[Union[ikfast.IKFast, ik_pybullet.IKPybullet]], ik_hints: Dict[int, List[float]], state: arm.ArmState, arm_origin: Optional[np.ndarray], tip_adjust_transform: Optional[np.ndarray], ) -> List[types_gen.ReachScriptCommand]: raise NotImplementedError class _MoveJoints(_Command): _joints: List[float] _velocity: float _acceleration: float _servo: bool _servo_time_seconds: float _servo_lookahead_time_seconds: float _servo_gain: float def __init__(self, controller_name: str, joints: List[float], velocity: float = 0.0, acceleration: float = 0.0, servo: bool = False, servo_time_seconds: float = 0.0, servo_lookahead_time_seconds: float = 0.0, servo_gain: float = 0.0) -> None: super().__init__(controller_name) self._joints = joints self._velocity = velocity self._acceleration = acceleration self._servo = servo self._servo_time_seconds = servo_time_seconds self._servo_lookahead_time_seconds = servo_lookahead_time_seconds self._servo_gain = servo_gain def to_reach_script( self, arm_type: arm.ArmType, support_vacuum: bool, support_blowoff: bool, ik_lib: Optional[Union[ikfast.IKFast, ik_pybullet.IKPybullet]], ik_hints: Dict[int, List[float]], state: arm.ArmState, arm_origin: Optional[np.ndarray], tip_adjust_transform: Optional[np.ndarray], ) -> List[types_gen.ReachScriptCommand]: if arm_type.joint_count != len(self._joints): raise core.PyReachError("Invalid joint count in MoveJoints") return [ types_gen.ReachScriptCommand( controller_name=self.controller_name, move_j_path=types_gen.MoveJPathArgs(waypoints=[ types_gen.MoveJWaypointArgs( rotation=self._joints, velocity=self._velocity, acceleration=self._acceleration, servo=self._servo, servo_t_secs=self._servo_time_seconds, servo_lookahead_time_secs=self ._servo_lookahead_time_seconds, servo_gain=self._servo_gain) ])) ] class _MoveLinear(_Command): _joints: List[float] _velocity: float _acceleration: float _servo: bool def __init__(self, controller_name: str, joints: List[float], velocity: float = 0.0, acceleration: float = 0.0, servo: bool = False) -> None: super().__init__(controller_name) self._joints = joints self._velocity = velocity self._acceleration = acceleration self._servo = servo def to_reach_script( self, arm_type: arm.ArmType, support_vacuum: bool, support_blowoff: bool, ik_lib: Optional[Union[ikfast.IKFast, ik_pybullet.IKPybullet]], ik_hints: Dict[int, List[float]], state: arm.ArmState, arm_origin: Optional[np.ndarray], tip_adjust_transform: Optional[np.ndarray], ) -> List[types_gen.ReachScriptCommand]: if arm_type.joint_count != len(self._joints): raise core.PyReachError("Invalid joint count in MoveLinear") return [ types_gen.ReachScriptCommand( controller_name=self.controller_name, move_l_path=types_gen.MoveLPathArgs(waypoints=[ types_gen.MoveLWaypointArgs( rotation=self._joints, velocity=self._velocity, acceleration=self._acceleration, servo=self._servo) ])) ] class _MovePose(_Command): _translation: types_gen.Vec3d _rotation: types_gen.Vec3d _velocity: float _acceleration: float _use_linear: bool _servo: bool _use_unity_ik: bool _apply_tip_adjust_transform: bool _servo_time_seconds: float _servo_lookahead_time_seconds: float _servo_gain: float def __init__(self, controller_name: str, translation: types_gen.Vec3d, rotation: types_gen.Vec3d, velocity: float = 0.0, acceleration: float = 0.0, use_linear: bool = False, servo: bool = False, use_unity_ik: bool = False, apply_tip_adjust_transform: bool = False, servo_time_seconds: float = 0.0, servo_lookahead_time_seconds: float = 0.0, servo_gain: float = 0.0) -> None: super().__init__(controller_name) self._translation = translation self._rotation = rotation self._velocity = velocity self._acceleration = acceleration self._use_linear = use_linear self._servo = servo self._use_unity_ik = use_unity_ik self._apply_tip_adjust_transform = apply_tip_adjust_transform self._servo_time_seconds = servo_time_seconds self._servo_lookahead_time_seconds = servo_lookahead_time_seconds self._servo_gain = servo_gain def to_reach_script( self, arm_type: arm.ArmType, support_vacuum: bool, support_blowoff: bool, ik_lib: Optional[Union[ikfast.IKFast, ik_pybullet.IKPybullet]], ik_hints: Dict[int, List[float]], state: arm.ArmState, arm_origin: Optional[np.ndarray], tip_adjust_transform: Optional[np.ndarray] ) -> List[types_gen.ReachScriptCommand]: pose = np.array([ self._translation.x, self._translation.y, self._translation.z, self._rotation.x, self._rotation.y, self._rotation.z ]) if self._apply_tip_adjust_transform: if tip_adjust_transform is not None: if self._use_unity_ik: quaternion_const = transform_util.inverse_quat( np.array([0.0000, 0.7071, -0.7071, 0.0000])) tip_adjust_transform = transform_util.inverse_pose( tip_adjust_transform) tip_adjust_unity = transform_util.unity_pos_quaternion_to_pose( tip_adjust_transform[:3], transform_util.axis_angle_to_quaternion(tip_adjust_transform[3:])) tip_adjust_unity_translation = tip_adjust_unity[:3] tip_adjust_unity_rotation = tip_adjust_unity[3:] tip_adjust_unity_rotation_quat = transform_util.axis_angle_to_quaternion( tip_adjust_unity_rotation) tip_adjust_unity_rotation = transform_util.quaternion_multiply( tip_adjust_unity_rotation_quat, quaternion_const) tip_adjust_pose_unity = transform_util.pos_quaternion_to_pose( tip_adjust_unity_translation, tip_adjust_unity_rotation) tip_adjust_pose_unity_matrix = np.linalg.inv( transform_util.pose_to_matrix(tip_adjust_pose_unity)) pose_unity = transform_util.multiply_pose( pose, transform_util.matrix_to_pose(tip_adjust_pose_unity_matrix)) pose = transform_util.unity_pos_quaternion_to_pose( pose_unity[:3], transform_util.axis_angle_to_quaternion(pose_unity[3:])) else: pose = transform_util.multiply_pose(pose, tip_adjust_transform) else: raise core.PyReachError("Calibration was not loaded") if ik_lib is not None: if not ik_hints: raise core.PyReachError("IKhints have not been loaded") if isinstance(ik_lib, ikfast.IKFast): if self._use_unity_ik: joints = ik_lib.unity_ik_solve_search(pose, list(state.joint_angles), ik_hints) else: joints = ik_lib.ik_search(pose, ik_hints) elif isinstance(ik_lib, ik_pybullet.IKPybullet): joints = ik_lib.ik_search(pose, np.array(state.joint_angles)) else: raise core.PyReachError("Unsupported IK library") if joints is None: raise core.PyReachError("IK failed to find solution") if self._use_linear: return _MoveLinear(self.controller_name, joints.tolist(), self._velocity, self._acceleration, self._servo).to_reach_script( arm_type, support_vacuum, support_blowoff, ik_lib, ik_hints, state, arm_origin, tip_adjust_transform) else: return _MoveJoints( self.controller_name, joints.tolist(), self._velocity, self._acceleration, self._servo, servo_time_seconds=self._servo_time_seconds, servo_lookahead_time_seconds=self._servo_lookahead_time_seconds, servo_gain=self._servo_gain).to_reach_script( arm_type, support_vacuum, support_blowoff, ik_lib, ik_hints, state, arm_origin, tip_adjust_transform) else: return [ types_gen.ReachScriptCommand( controller_name=self.controller_name, move_pose_path=types_gen.MovePosePathArgs(waypoints=[ types_gen.MovePoseWaypointArgs( translation=types_gen.Vec3d(pose[0], pose[1], pose[2]), rotation=types_gen.Vec3d(pose[3], pose[4], pose[5]), velocity=self._velocity, acceleration=self._acceleration) ])) ] class _SetVacuumState(_Command): _state: ActionVacuumState def __init__(self, controller_name: str, state: ActionVacuumState) -> None: super().__init__(controller_name) self._state = state def to_reach_script( self, arm_type: arm.ArmType, support_vacuum: bool, support_blowoff: bool, ik_lib: Optional[Union[ikfast.IKFast, ik_pybullet.IKPybullet]], ik_hints: Dict[int, List[float]], state: arm.ArmState, arm_origin: Optional[np.ndarray], tip_adjust_transform: Optional[np.ndarray], ) -> List[types_gen.ReachScriptCommand]: if not support_vacuum: raise core.PyReachError("Robot does not support vacuum") cmds = [ types_gen.ReachScriptCommand( controller_name=self.controller_name, set_output=types_gen.SetOutput( py_type="vacuum", name="", args=[ types_gen.CapabilityState( pin="", int_value=int(self._state == ActionVacuumState.VACUUM)) ])) ] if support_blowoff: cmds.append( types_gen.ReachScriptCommand( controller_name=self.controller_name, set_output=types_gen.SetOutput( py_type="blowoff", name="", args=[ types_gen.CapabilityState( pin="", int_value=int( self._state == ActionVacuumState.BLOWOFF)) ]))) elif self._state == ActionVacuumState.BLOWOFF: raise core.PyReachError("Robot does not support blowoff") return cmds class _SetDigitalOut(_Command): _output: int _value: bool def __init__(self, controller_name: str, output: int, value: bool) -> None: super().__init__(controller_name) self._output = output self._value = value def to_reach_script( self, arm_type: arm.ArmType, support_vacuum: bool, support_blowoff: bool, ik_lib: Optional[Union[ikfast.IKFast, ik_pybullet.IKPybullet]], ik_hints: Dict[int, List[float]], state: arm.ArmState, arm_origin: Optional[np.ndarray], tip_adjust_transform: Optional[np.ndarray], ) -> List[types_gen.ReachScriptCommand]: return [ types_gen.ReachScriptCommand( controller_name=self.controller_name, set_digital_out=types_gen.SetDigitalOutArgs( output=self._output, value=self._value)) ] class _SetToolDigitalOut(_Command): _output: int _value: bool def __init__(self, controller_name: str, output: int, value: bool) -> None: super().__init__(controller_name) self._output = output self._value = value def to_reach_script( self, arm_type: arm.ArmType, support_vacuum: bool, support_blowoff: bool, ik_lib: Optional[Union[ikfast.IKFast, ik_pybullet.IKPybullet]], ik_hints: Dict[int, List[float]], state: arm.ArmState, arm_origin: Optional[np.ndarray], tip_adjust_transform: Optional[np.ndarray], ) -> List[types_gen.ReachScriptCommand]: return [ types_gen.ReachScriptCommand( controller_name=self.controller_name, set_tool_digital_out=types_gen.SetDigitalOutArgs( output=self._output, value=self._value)) ] class _SetAnalogOut(_Command): _output: int _value: float def __init__(self, controller_name: str, output: int, value: float) -> None: super().__init__(controller_name) self._output = output self._value = value
Apache License 2.0
opsdroid/opsdroid
opsdroid/connector/matrix/create_events.py
MatrixEventCreator.create_room_name
python
async def create_room_name(self, event, roomid): return events.RoomName( name=event["content"]["name"], user=await self.connector.get_nick(roomid, event["sender"]), user_id=event["sender"], target=roomid, connector=self.connector, event_id=event["event_id"], raw_event=event, )
Send a RoomDescriptionEvent.
https://github.com/opsdroid/opsdroid/blob/9a48364869ded7cdd2420b43b0c2c153e846439e/opsdroid/connector/matrix/create_events.py#L164-L174
import logging from collections import defaultdict from opsdroid import events from . import events as matrix_events _LOGGER = logging.getLogger(__name__) __all__ = ["MatrixEventCreator"] def trim_reply_fallback_text(text): if not text.startswith("> ") or "\n" not in text: return text lines = text.split("\n") while len(lines) > 0 and lines[0].startswith("> "): lines.pop(0) return "\n".join(lines).strip() class MatrixEventCreator(events.EventCreator): async def create_event_from_eventid(self, eventid, roomid): room_context = await self.connector.connection.room_context(roomid, eventid, 1) event_json = room_context.event.source return await self.create_event(event_json, roomid) def __init__(self, connector, *args, **kwargs): super().__init__(connector, *args, **kwargs) self.event_types["m.room.message"] = self.create_room_message self.event_types["m.room.topic"] = self.create_room_description self.event_types["m.room.name"] = self.create_room_name self.event_types["m.reaction"] = self.create_reaction self.event_types["m.room.member"] = self.create_join_room self.message_events = defaultdict(lambda: self.skip) self.message_events.update( { "m.text": self.create_message, "m.image": self.create_image, "m.file": self.create_file, } ) async def skip(self, event, roomid): kwargs = dict( content=event["content"], event_type=event["type"], user_id=event["sender"], user=await self.connector.get_nick(roomid, event["sender"]), target=roomid, connector=self.connector, raw_event=event, event_id=event["event_id"], ) event_type = matrix_events.GenericMatrixRoomEvent if "state_key" in event: event_type = matrix_events.MatrixStateEvent kwargs["state_key"] = event["state_key"] try: event = event_type(**kwargs) return event except Exception: _LOGGER.exception( f"Matrix connector failed to parse event {event} as a room event." ) return None async def create_room_message(self, event, roomid): msgtype = event["content"]["msgtype"] return await self.message_events[msgtype](event, roomid) async def create_message(self, event, roomid): kwargs = dict( text=event["content"]["body"], user_id=event["sender"], user=await self.connector.get_nick(roomid, event["sender"]), target=roomid, connector=self.connector, event_id=event["event_id"], raw_event=event, ) if "m.relates_to" in event["content"]: relates_to = event["content"]["m.relates_to"] if relates_to.get("rel_type", "") == "m.replace": kwargs["text"] = event["content"]["m.new_content"]["body"] kwargs["linked_event"] = await self.create_event_from_eventid( relates_to["event_id"], roomid ) return events.EditedMessage(**kwargs) if relates_to.get("m.in_reply_to"): kwargs["text"] = trim_reply_fallback_text(kwargs["text"]) kwargs["linked_event"] = await self.create_event_from_eventid( relates_to["m.in_reply_to"]["event_id"], roomid ) return events.Reply(**kwargs) return events.Message(**kwargs) async def _file_kwargs(self, event, roomid): if "url" in event["content"]: url = event["content"]["url"] else: url = event["content"]["file"]["url"] url = await self.connector.connection.mxc_to_http(url) user = await self.connector.get_nick(roomid, event["sender"]) return dict( url=url, name=event["content"]["body"], user_id=event["sender"], user=user, target=roomid, connector=self.connector, event_id=event["event_id"], raw_event=event, ) async def create_file(self, event, roomid): kwargs = await self._file_kwargs(event, roomid) return events.File(**kwargs) async def create_image(self, event, roomid): kwargs = await self._file_kwargs(event, roomid) return events.Image(**kwargs) async def create_room_description(self, event, roomid): return events.RoomDescription( description=event["content"]["topic"], user=await self.connector.get_nick(roomid, event["sender"]), user_id=event["sender"], target=roomid, connector=self.connector, event_id=event["event_id"], raw_event=event, )
Apache License 2.0
wavefronthq/python-client
wavefront_api_client/models/chart_source_query.py
ChartSourceQuery.source_color
python
def source_color(self, source_color): self._source_color = source_color
Sets the source_color of this ChartSourceQuery. The color used to draw all results from this source (auto if unset) # noqa: E501 :param source_color: The source_color of this ChartSourceQuery. # noqa: E501 :type: str
https://github.com/wavefronthq/python-client/blob/e410ce0dd8a2334e995456f4f3d44e0f04664a3a/wavefront_api_client/models/chart_source_query.py#L312-L321
import pprint import re import six from wavefront_api_client.configuration import Configuration class ChartSourceQuery(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'disabled': 'bool', 'name': 'str', 'query': 'str', 'query_type': 'str', 'querybuilder_enabled': 'bool', 'querybuilder_serialization': 'str', 'scatter_plot_source': 'str', 'secondary_axis': 'bool', 'source_color': 'str', 'source_description': 'str' } attribute_map = { 'disabled': 'disabled', 'name': 'name', 'query': 'query', 'query_type': 'queryType', 'querybuilder_enabled': 'querybuilderEnabled', 'querybuilder_serialization': 'querybuilderSerialization', 'scatter_plot_source': 'scatterPlotSource', 'secondary_axis': 'secondaryAxis', 'source_color': 'sourceColor', 'source_description': 'sourceDescription' } def __init__(self, disabled=None, name=None, query=None, query_type=None, querybuilder_enabled=None, querybuilder_serialization=None, scatter_plot_source=None, secondary_axis=None, source_color=None, source_description=None, _configuration=None): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._disabled = None self._name = None self._query = None self._query_type = None self._querybuilder_enabled = None self._querybuilder_serialization = None self._scatter_plot_source = None self._secondary_axis = None self._source_color = None self._source_description = None self.discriminator = None if disabled is not None: self.disabled = disabled self.name = name self.query = query if query_type is not None: self.query_type = query_type if querybuilder_enabled is not None: self.querybuilder_enabled = querybuilder_enabled if querybuilder_serialization is not None: self.querybuilder_serialization = querybuilder_serialization if scatter_plot_source is not None: self.scatter_plot_source = scatter_plot_source if secondary_axis is not None: self.secondary_axis = secondary_axis if source_color is not None: self.source_color = source_color if source_description is not None: self.source_description = source_description @property def disabled(self): return self._disabled @disabled.setter def disabled(self, disabled): self._disabled = disabled @property def name(self): return self._name @name.setter def name(self, name): if self._configuration.client_side_validation and name is None: raise ValueError("Invalid value for `name`, must not be `None`") self._name = name @property def query(self): return self._query @query.setter def query(self, query): if self._configuration.client_side_validation and query is None: raise ValueError("Invalid value for `query`, must not be `None`") self._query = query @property def query_type(self): return self._query_type @query_type.setter def query_type(self, query_type): allowed_values = ["WQL", "PROMQL", "HYBRID"] if (self._configuration.client_side_validation and query_type not in allowed_values): raise ValueError( "Invalid value for `query_type` ({0}), must be one of {1}" .format(query_type, allowed_values) ) self._query_type = query_type @property def querybuilder_enabled(self): return self._querybuilder_enabled @querybuilder_enabled.setter def querybuilder_enabled(self, querybuilder_enabled): self._querybuilder_enabled = querybuilder_enabled @property def querybuilder_serialization(self): return self._querybuilder_serialization @querybuilder_serialization.setter def querybuilder_serialization(self, querybuilder_serialization): self._querybuilder_serialization = querybuilder_serialization @property def scatter_plot_source(self): return self._scatter_plot_source @scatter_plot_source.setter def scatter_plot_source(self, scatter_plot_source): allowed_values = ["X", "Y"] if (self._configuration.client_side_validation and scatter_plot_source not in allowed_values): raise ValueError( "Invalid value for `scatter_plot_source` ({0}), must be one of {1}" .format(scatter_plot_source, allowed_values) ) self._scatter_plot_source = scatter_plot_source @property def secondary_axis(self): return self._secondary_axis @secondary_axis.setter def secondary_axis(self, secondary_axis): self._secondary_axis = secondary_axis @property def source_color(self): return self._source_color @source_color.setter
Apache License 2.0
ecsim/opem
opem/Static/Amphlett.py
CH2_Calc
python
def CH2_Calc(PH2, T): try: result = PH2 / (1.09 * (10 ** 6) * math.exp(77 / T)) return result except (TypeError, ZeroDivisionError, OverflowError, ValueError): print( "[Error] CH2 Calculation Failed (PH2:%s, T:%s)" % (str(PH2), str(T)))
Calculate CH2. :param PH2: partial pressure [atm] :type PH2 : float :param T: cell operation temperature [K] :type T:float :return: CH2 [mol/cm^3] as float
https://github.com/ecsim/opem/blob/3b386b14bbd5d2cc5a376a7fc80c7d93dd7523f5/opem/Static/Amphlett.py#L158-L174
import math from opem.Params import Amphlett_InputParams as InputParams from opem.Params import Amphlett_OutputParams as OutputParams from opem.Params import Amphlett_Params_Default as Defaults from opem.Params import xi1, xi3, xi4, HHV, uF, R, F, Amphlett_Description, Overall_Params_Max_Description, Overall_Params_Linear_Description, Eth, Report_Message import opem.Functions import os def B_Calc(T, n=2): try: return (R * T) / (n * F) except (TypeError, ZeroDivisionError): return None def Power_Thermal_Calc(VStack, N, i): try: return i * ((N * Eth) - VStack) except TypeError: return None def Power_Total_Calc(VStack_List, i_step, N): try: Filtered_List = list(filter(lambda x: x is not None, VStack_List)) Filtered_List_Not = list(map(lambda x: (N * Eth) - x, Filtered_List)) Total_Elec_Power = opem.Functions.integrate(Filtered_List, i_step) Total_Thermal_Power = opem.Functions.integrate( Filtered_List_Not, i_step) return [Total_Elec_Power, Total_Thermal_Power] except Exception: return [None, None] def Linear_Aprox_Params_Calc(B0, B1): Wmax = 0 Vcell_Wmax = 0 try: Wmax = (B0**2) / (4 * B1) except Exception: Wmax = None try: Vcell_Wmax = (B0 / 2) except Exception: Vcell_Wmax = None if Wmax is not None: Wmax = abs(Wmax) if Vcell_Wmax is not None: Vcell_Wmax = abs(Vcell_Wmax) return [Wmax, Vcell_Wmax] def Max_Params_Calc(Power_List, EFF_List, VStack_List): Max_Power = max(list(filter(lambda x: x is not None, Power_List))) Max_EFF = EFF_List[Power_List.index(Max_Power)] Max_VStack = VStack_List[Power_List.index(Max_Power)] return { "Max_Power": Max_Power, "Max_EFF": Max_EFF, "Max_VStack": Max_VStack} def R_Calc(V, i): try: return V / i except (TypeError, ZeroDivisionError): print( "[Error] R Total Calculation Failed (V:%s ,i:%s)" % (str(V), str(i))) def Enernst_Calc(T, PH2, PO2): try: result = 1.229 - (8.5 * (10 ** -4)) * (T - 298.15) + (4.308 * (10 ** -5)) * T * (math.log(PH2) + 0.5 * math.log(PO2)) return result except (TypeError, OverflowError, ValueError): print( "[Error] Enernst Calculation Failed (T:%s , PH2:%s, PO2:%s)" % (str(T), str(PH2), str(PO2)))
MIT License
googleapis/python-logging
google/cloud/logging_v2/services/logging_service_v2/async_client.py
LoggingServiceV2AsyncClient.list_monitored_resource_descriptors
python
async def list_monitored_resource_descriptors( self, request: logging.ListMonitoredResourceDescriptorsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListMonitoredResourceDescriptorsAsyncPager: request = logging.ListMonitoredResourceDescriptorsRequest(request) rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_monitored_resource_descriptors, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.InternalServerError, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, ) response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) response = pagers.ListMonitoredResourceDescriptorsAsyncPager( method=rpc, request=request, response=response, metadata=metadata, ) return response
r"""Lists the descriptors for monitored resource types used by Logging. Args: request (:class:`google.cloud.logging_v2.types.ListMonitoredResourceDescriptorsRequest`): The request object. The parameters to ListMonitoredResourceDescriptors retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.logging_v2.services.logging_service_v2.pagers.ListMonitoredResourceDescriptorsAsyncPager: Result returned from ListMonitoredResourceDescriptors. Iterating over this object will yield results and resolve additional pages automatically.
https://github.com/googleapis/python-logging/blob/3ab386102d06637c3b0ba100c7a36a30d0ada26e/google/cloud/logging_v2/services/logging_service_v2/async_client.py#L570-L632
from collections import OrderedDict import functools import re from typing import ( Dict, AsyncIterable, Awaitable, AsyncIterator, Sequence, Tuple, Type, Union, ) import pkg_resources import google.api_core.client_options as ClientOptions from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import retry as retries from google.auth import credentials as ga_credentials from google.oauth2 import service_account from google.api import monitored_resource_pb2 from google.cloud.logging_v2.services.logging_service_v2 import pagers from google.cloud.logging_v2.types import log_entry from google.cloud.logging_v2.types import logging from .transports.base import LoggingServiceV2Transport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import LoggingServiceV2GrpcAsyncIOTransport from .client import LoggingServiceV2Client class LoggingServiceV2AsyncClient: _client: LoggingServiceV2Client DEFAULT_ENDPOINT = LoggingServiceV2Client.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = LoggingServiceV2Client.DEFAULT_MTLS_ENDPOINT log_path = staticmethod(LoggingServiceV2Client.log_path) parse_log_path = staticmethod(LoggingServiceV2Client.parse_log_path) common_billing_account_path = staticmethod( LoggingServiceV2Client.common_billing_account_path ) parse_common_billing_account_path = staticmethod( LoggingServiceV2Client.parse_common_billing_account_path ) common_folder_path = staticmethod(LoggingServiceV2Client.common_folder_path) parse_common_folder_path = staticmethod( LoggingServiceV2Client.parse_common_folder_path ) common_organization_path = staticmethod( LoggingServiceV2Client.common_organization_path ) parse_common_organization_path = staticmethod( LoggingServiceV2Client.parse_common_organization_path ) common_project_path = staticmethod(LoggingServiceV2Client.common_project_path) parse_common_project_path = staticmethod( LoggingServiceV2Client.parse_common_project_path ) common_location_path = staticmethod(LoggingServiceV2Client.common_location_path) parse_common_location_path = staticmethod( LoggingServiceV2Client.parse_common_location_path ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): return LoggingServiceV2Client.from_service_account_info.__func__(LoggingServiceV2AsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): return LoggingServiceV2Client.from_service_account_file.__func__(LoggingServiceV2AsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file @property def transport(self) -> LoggingServiceV2Transport: return self._client.transport get_transport_class = functools.partial( type(LoggingServiceV2Client).get_transport_class, type(LoggingServiceV2Client) ) def __init__( self, *, credentials: ga_credentials.Credentials = None, transport: Union[str, LoggingServiceV2Transport] = "grpc_asyncio", client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: self._client = LoggingServiceV2Client( credentials=credentials, transport=transport, client_options=client_options, client_info=client_info, ) async def delete_log( self, request: logging.DeleteLogRequest = None, *, log_name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: has_flattened_params = any([log_name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = logging.DeleteLogRequest(request) if log_name is not None: request.log_name = log_name rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_log, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.InternalServerError, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, ) metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("log_name", request.log_name),)), ) await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) async def write_log_entries( self, request: logging.WriteLogEntriesRequest = None, *, log_name: str = None, resource: monitored_resource_pb2.MonitoredResource = None, labels: Sequence[logging.WriteLogEntriesRequest.LabelsEntry] = None, entries: Sequence[log_entry.LogEntry] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> logging.WriteLogEntriesResponse: has_flattened_params = any([log_name, resource, labels, entries]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = logging.WriteLogEntriesRequest(request) if log_name is not None: request.log_name = log_name if resource is not None: request.resource = resource if labels: request.labels.update(labels) if entries: request.entries.extend(entries) rpc = gapic_v1.method_async.wrap_method( self._client._transport.write_log_entries, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.InternalServerError, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, ) response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) return response async def list_log_entries( self, request: logging.ListLogEntriesRequest = None, *, resource_names: Sequence[str] = None, filter: str = None, order_by: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListLogEntriesAsyncPager: has_flattened_params = any([resource_names, filter, order_by]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = logging.ListLogEntriesRequest(request) if filter is not None: request.filter = filter if order_by is not None: request.order_by = order_by if resource_names: request.resource_names.extend(resource_names) rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_log_entries, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.InternalServerError, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, ) response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) response = pagers.ListLogEntriesAsyncPager( method=rpc, request=request, response=response, metadata=metadata, ) return response
Apache License 2.0
openstack/cinder
cinder/volume/drivers/hitachi/hbsd_rest_fc.py
HBSDRESTFC.terminate_connection
python
def terminate_connection(self, volume, connector): conn_info = super(HBSDRESTFC, self).terminate_connection( volume, connector) if self.conf.hitachi_zoning_request: if conn_info and conn_info['data']['target_wwn']: init_targ_map = utils.build_initiator_target_map( connector, conn_info['data']['target_wwn'], self._lookup_service) if init_targ_map: conn_info['data']['initiator_target_map'] = init_targ_map fczm_utils.remove_fc_zone(conn_info) return conn_info
Terminate connection between the server and the volume.
https://github.com/openstack/cinder/blob/4558e4b53a7e41dc1263417a4824f39bb6fd30e1/cinder/volume/drivers/hitachi/hbsd_rest_fc.py#L228-L240
from oslo_log import log as logging from cinder.volume.drivers.hitachi import hbsd_rest as rest from cinder.volume.drivers.hitachi import hbsd_utils as utils from cinder.zonemanager import utils as fczm_utils _FC_HMO_DISABLE_IO = 91 LOG = logging.getLogger(__name__) MSG = utils.HBSDMsg class HBSDRESTFC(rest.HBSDREST): def __init__(self, conf, storage_protocol, db): super(HBSDRESTFC, self).__init__(conf, storage_protocol, db) self._lookup_service = fczm_utils.create_lookup_service() def connect_storage(self): target_ports = self.conf.hitachi_target_ports compute_target_ports = self.conf.hitachi_compute_target_ports available_ports = [] available_compute_ports = [] super(HBSDRESTFC, self).connect_storage() params = {'portAttributes': 'TAR'} port_list = self.client.get_ports(params=params) for port in set(target_ports + compute_target_ports): if port not in [port_data['portId'] for port_data in port_list]: utils.output_log(MSG.INVALID_PORT, port=port, additional_info='portAttributes: not TAR') for port_data in port_list: port = port_data['portId'] if port not in set(target_ports + compute_target_ports): continue secure_fc_port = True if (port_data['portType'] not in ['FIBRE', 'FCoE'] or not port_data['lunSecuritySetting']): secure_fc_port = False if not secure_fc_port: utils.output_log( MSG.INVALID_PORT, port=port, additional_info='portType: %s, lunSecuritySetting: %s, ' 'fabricMode: %s, portConnection: %s' % (port_data['portType'], port_data.get('lunSecuritySetting'), port_data.get('fabricMode'), port_data.get('portConnection'))) if not secure_fc_port: continue wwn = port_data.get('wwn') if target_ports and port in target_ports: available_ports.append(port) self.storage_info['wwns'][port] = wwn if compute_target_ports and port in compute_target_ports: available_compute_ports.append(port) self.storage_info['wwns'][port] = wwn if target_ports: for port in target_ports: if port in available_ports: self.storage_info['controller_ports'].append(port) if compute_target_ports: for port in compute_target_ports: if port in available_compute_ports: self.storage_info['compute_ports'].append(port) self.check_ports_info() utils.output_log(MSG.SET_CONFIG_VALUE, object='port-wwn list', value=self.storage_info['wwns']) def create_target_to_storage(self, port, connector, hba_ids): wwpns = self.get_hba_ids_from_connector(connector) target_name = '%(prefix)s-%(wwpns)s' % { 'prefix': utils.DRIVER_PREFIX, 'wwpns': min(wwpns), } try: body = {'portId': port, 'hostGroupName': target_name} gid = self.client.add_host_grp(body, no_log=True) except Exception: params = {'portId': port} host_grp_list = self.client.get_host_grps(params) for host_grp_data in host_grp_list: if host_grp_data['hostGroupName'] == target_name: return target_name, host_grp_data['hostGroupNumber'] raise return target_name, gid def set_hba_ids(self, port, gid, hba_ids): registered_wwns = [] for wwn in hba_ids: try: self.client.add_hba_wwn(port, gid, wwn, no_log=True) registered_wwns.append(wwn) except utils.HBSDError: utils.output_log(MSG.ADD_HBA_WWN_FAILED, port=port, gid=gid, wwn=wwn) if not registered_wwns: msg = utils.output_log(MSG.NO_HBA_WWN_ADDED_TO_HOST_GRP, port=port, gid=gid) raise utils.HBSDError(msg) def set_target_mode(self, port, gid): body = {'hostMode': 'LINUX/IRIX', 'hostModeOptions': [_FC_HMO_DISABLE_IO]} self.client.modify_host_grp(port, gid, body, ignore_all_errors=True) def _get_hwwns_in_hostgroup(self, port, gid, wwpns): hwwns_in_hostgroup = [] for hba_wwn in self.client.get_hba_wwns(port, gid): hwwn = hba_wwn['hostWwn'] if hwwn in wwpns: hwwns_in_hostgroup.append(hwwn) return hwwns_in_hostgroup def _set_target_info(self, targets, host_grps, wwpns): for host_grp in host_grps: port = host_grp['portId'] gid = host_grp['hostGroupNumber'] hwwns_in_hostgroup = self._get_hwwns_in_hostgroup(port, gid, wwpns) if hwwns_in_hostgroup: targets['info'][port] = True targets['list'].append((port, gid)) LOG.debug( 'Found wwpns in host group. (port: %(port)s, ' 'gid: %(gid)s, wwpns: %(wwpns)s)', {'port': port, 'gid': gid, 'wwpns': hwwns_in_hostgroup}) return True return False def _get_hwwns_in_hostgroup_by_name(self, port, host_group_name, wwpns): hba_wwns = self.client.get_hba_wwns_by_name(port, host_group_name) return [hba_wwn for hba_wwn in hba_wwns if hba_wwn['hostWwn'] in wwpns] def _set_target_info_by_names(self, targets, port, target_names, wwpns): for target_name in target_names: hwwns_in_hostgroup = self._get_hwwns_in_hostgroup_by_name( port, target_name, wwpns) if hwwns_in_hostgroup: gid = hwwns_in_hostgroup[0]['hostGroupNumber'] targets['info'][port] = True targets['list'].append((port, gid)) LOG.debug( 'Found wwpns in host group. (port: %(port)s, ' 'gid: %(gid)s, wwpns: %(wwpns)s)', {'port': port, 'gid': gid, 'wwpns': [hwwn['hostWwn'] for hwwn in hwwns_in_hostgroup]}) return True return False def find_targets_from_storage( self, targets, connector, target_ports): wwpns = self.get_hba_ids_from_connector(connector) target_names = [ '%(prefix)s-%(wwpns)s' % { 'prefix': utils.DRIVER_PREFIX, 'wwpns': min(wwpns), } ] if 'ip' in connector: target_names.append( '%(prefix)s-%(ip)s' % { 'prefix': utils.DRIVER_PREFIX, 'ip': connector['ip'], } ) not_found_count = 0 for port in target_ports: targets['info'][port] = False if self._set_target_info_by_names( targets, port, target_names, wwpns): continue host_grps = self.client.get_host_grps({'portId': port}) if self._set_target_info( targets, [hg for hg in host_grps if hg['hostGroupName'] not in target_names], wwpns): pass else: not_found_count += 1 return not_found_count def initialize_connection(self, volume, connector): conn_info = super(HBSDRESTFC, self).initialize_connection( volume, connector) if self.conf.hitachi_zoning_request: init_targ_map = utils.build_initiator_target_map( connector, conn_info['data']['target_wwn'], self._lookup_service) if init_targ_map: conn_info['data']['initiator_target_map'] = init_targ_map fczm_utils.add_fc_zone(conn_info) return conn_info
Apache License 2.0
simon-v/minipos
bch.py
get_tx_propagation
python
def get_tx_propagation(txid, threshold=100, callback=None, stop_on_double_spend=False, ignore_errors=False): sightings = 0 double_spend = False num_servers = len(explorers) propagation = 0 for server in explorers.copy(): if not ignore_errors and 'errors' in server and server['errors'] > MAX_ERRORS: num_servers -= 1 continue try: tx = TxInfo(txid, explorer=server['name'], ignore_errors=ignore_errors) except TxNotFoundError: continue except KeyboardInterrupt: raise except: exception = sys.exc_info()[1] try: error = exception.reason except AttributeError: error = exception logging.error('Could not fetch explorer data: {}'.format(error)) continue if tx.double_spend: double_spend = True sightings += 1 propagation = 100 * sightings / num_servers if callback is not None: callback(propagation, double_spend) if propagation >= threshold: break elif double_spend and stop_on_double_spend: break return propagation, double_spend
Estimate a transaction's propagation across the Bitcoin Cash network Returns a tuple consisting of: * The percentage of explorers that are aware of the txid; * The transaction's double spend status. Keyword arguments: txid The txid to query threshold A percentage at which the propagation check is considered finished callback A function which will be called after every explorer query The function will be called with the perliminary results stop_on_double_spend The check will be aborted as soon as a double spend is detected
https://github.com/simon-v/minipos/blob/653afb82bdc3ed6a2aa661fa94cc3f2e4ac2b001/bch.py#L549-L595
import urllib.request import json import random import sys import datetime import logging MAX_ERRORS = 10 TIMEOUT = 5 exchanges = [ { 'url': 'https://api.coinmarketcap.com/v2/ticker/1831/?convert={cur}', 'price_key': 'data.quotes.{cur}.price', }, { 'url': 'https://api.coinbase.com/v2/exchange-rates?currency=BCH', 'price_key': 'data.rates.{cur}', }, { 'url': 'https://apiv2.bitcoinaverage.com/indices/global/ticker/short?crypto=BCH&fiat={cur}', 'price_key': 'BCH{cur}.last', }, { 'url': 'https://api.kraken.com/0/public/Ticker?pair=BCH{cur}', 'price_key': 'result.BCH{cur}.c.0', }, ] explorers = [ { 'url': 'https://cashexplorer.bitcoin.com/api/addr/{address}', 'tx_url': 'https://cashexplorer.bitcoin.com/api/tx/{txid}', 'balance_key': None, 'confirmed_key': 'balance', 'unconfirmed_key': 'unconfirmedBalance', 'last_tx_key': 'transactions.0', 'tx_time_key': 'time', 'tx_inputs_key': 'vin', 'tx_in_double_spend_key': 'doubleSpentTxID', 'tx_outputs_key': 'vout', 'tx_out_value_key': 'value', 'tx_out_address_key': 'scriptPubKey.addresses.0', 'tx_double_spend_key': None, 'tx_fee_key': 'fees', 'tx_size_key': 'size', 'tx_confirmations_key': 'confirmations', 'unit_satoshi': False, 'prefixes': '13', }, { 'url': 'https://blockdozer.com/api/addr/{address}', 'tx_url': 'https://blockdozer.com/api/tx/{txid}', 'balance_key': None, 'confirmed_key': 'balance', 'unconfirmed_key': 'unconfirmedBalance', 'last_tx_key': 'transactions.-1', 'tx_time_key': 'time', 'tx_inputs_key': 'vin', 'tx_in_double_spend_key': 'doubleSpentTxID', 'tx_outputs_key': 'vout', 'tx_out_value_key': 'value', 'tx_out_address_key': 'scriptPubKey.addresses.0', 'tx_double_spend_key': None, 'tx_fee_key': 'fees', 'tx_size_key': 'size', 'tx_confirmations_key': 'confirmations', 'unit_satoshi': False, 'prefixes': 'qp13', }, { 'url': 'https://bch-insight.bitpay.com/api/addr/{address}', 'tx_url': 'https://bch-insight.bitpay.com/api/tx/{txid}', 'balance_key': 'balance', 'confirmed_key': None, 'unconfirmed_key': 'unconfirmedBalance', 'last_tx_key': 'transactions.0', 'tx_time_key': 'time', 'tx_inputs_key': 'vin', 'tx_in_double_spend_key': 'doubleSpentTxID', 'tx_outputs_key': 'vout', 'tx_out_value_key': 'value', 'tx_out_address_key': 'scriptPubKey.addresses.0', 'tx_double_spend_key': None, 'tx_fee_key': 'fees', 'tx_size_key': 'size', 'tx_confirmations_key': 'confirmations', 'unit_satoshi': False, 'prefixes': 'qp', }, { 'url': 'https://bch-chain.api.btc.com/v3/address/{address}', 'tx_url': 'https://bch-chain.api.btc.com/v3/tx/{txid}', 'balance_key': 'data.balance', 'confirmed_key': None, 'unconfirmed_key': 'data.unconfirmed_received', 'last_tx_key': 'data.last_tx', 'tx_time_key': 'data.created_at', 'tx_inputs_key': 'data.inputs', 'tx_in_double_spend_key': None, 'tx_outputs_key': 'data.outputs', 'tx_out_value_key': 'value', 'tx_out_address_key': 'addresses.0', 'tx_double_spend_key': 'data.is_double_spend', 'tx_fee_key': 'data.fee', 'tx_size_key': 'data.vsize', 'tx_confirmations_key': 'data.confirmations', 'unit_satoshi': True, 'prefixes': '13', }, { 'url': 'https://bitcoincash.blockexplorer.com/api/addr/{address}', 'tx_url': 'https://bitcoincash.blockexplorer.com/api/tx/{txid}', 'balance_key': None, 'confirmed_key': 'balance', 'unconfirmed_key': 'unconfirmedBalance', 'last_tx_key': 'transactions.0', 'tx_time_key': 'time', 'tx_inputs_key': 'vin', 'tx_in_double_spend_key': 'doubleSpentTxID', 'tx_outputs_key': 'vout', 'tx_out_value_key': 'value', 'tx_out_address_key': 'scriptPubKey.addresses.0', 'tx_double_spend_key': None, 'tx_fee_key': 'fees', 'tx_size_key': 'size', 'tx_confirmations_key': 'confirmations', 'unit_satoshi': False, 'prefixes': '13', }, { 'url': 'https://rest.bitbox.earth/v1/address/details/{address}', 'tx_url': 'https://rest.bitbox.earth/v1/transaction/details/{txid}', 'balance_key': None, 'confirmed_key': 'balance', 'unconfirmed_key': 'unconfirmedBalance', 'last_tx_key': 'transactions.0', 'tx_time_key': 'time', 'tx_inputs_key': 'vin', 'tx_in_double_spend_key': 'doubleSpentTxID', 'tx_outputs_key': 'vout', 'tx_out_value_key': 'value', 'tx_out_address_key': 'scriptPubKey.addresses.0', 'tx_double_spend_key': None, 'tx_fee_key': 'fees', 'tx_size_key': 'size', 'tx_confirmations_key': 'confirmations', 'unit_satoshi': False, 'prefixes': 'qp13', }, ] random.seed() random.shuffle(explorers) for _server in explorers: _server['name'] = '.'.join(_server['url'].split('/')[2].split('.')[-2:]) for _server in exchanges: _server['name'] = '.'.join(_server['url'].split('/')[2].split('.')[-2:]) def btc(amount): result = ('%.8f' % float(amount)).rstrip('0.') if result == '': return '0' return result def bits(amount): amount = fiat(float(amount) * 1000000) if amount.endswith('.00'): amount = amount[:-3] return amount def fiat(amount): return ('%.2f' % float(amount)) def color(amount): wholes, sats = '{:.8f}'.format(float(amount)).split('.') mils = sats[0:3] bits = sats[3:6] sats = sats[6:8] whole_color = 'gray' if wholes == '0' else 'black' dot_color = 'lightgray' if mils == '000' and bits == '000' and sats == '00' else 'gray' mil_color = 'lightgray' if mils == '000' and bits == '000' and sats == '00' else 'green' bit_color = 'lightgray' if bits == '000' and sats == '00' else 'darkgreen' if sats == '00': sats = '&mdash;' result = '''<span style="color: {}">{}</span><span style="color: {}; padding-left: 0">.</span><span style="color: {}; padding-left: 0">{}</span><span style="color: {}; padding-left: 0.25em">{}</span><span style="color: gray; padding-left: 0.25em">{}</span>'''.format(whole_color, wholes, dot_color, mil_color, mils, bit_color, bits, sats) return result def jsonload(url): request = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0'}) with urllib.request.urlopen(request, timeout=TIMEOUT) as webpage: data = str(webpage.read(), 'UTF-8') data = json.loads(data) return data def get_value(json_object, key_path): if 'err_no' in json_object: if json_object['err_no'] == 1: raise urllib.error.HTTPError(None, 404, 'Resource Not Found', None, None) elif json_object['err_no'] == 2: raise urllib.error.HTTPError(None, 400, 'Parameter Error', None, None) for k in key_path.split('.'): try: k = int(k) except ValueError: pass try: json_object = json_object[k] except (TypeError, IndexError): return False return json_object def get_price(currency, exchange=exchanges[0]['name']): found = False for server in exchanges: if server['name'] == exchange: found = True break if not found: raise KeyError('{src} is not in list of exchanges'.format(src=exchange)) data = jsonload(server['url'].format(cur=currency.upper(), cur_lower=currency.lower())) rate = float(get_value(data, server['price_key'].format(cur=currency.upper(), cur_lower=currency.lower()))) if rate == 0.0: raise ValueError('Returned exchange rate is zero') return round(rate, 2) def pick_explorer(server_name=None, address_prefix=None, ignore_errors=False): for __ in explorers: server = explorers.pop(0) if server is None: raise StopIteration('Server list depleted') explorers.append(server) if 'errors' not in server: logging.debug('Adding control fields to {} definition'.format(server['name'])) server['errors'] = 0 server['last_error'] = None server['last_data'] = None if server_name is not None and server['name'] != server_name: continue if not ignore_errors and server['errors'] > MAX_ERRORS: logging.debug('Skipping {} based on error rates'.format(server['name'])) continue if address_prefix is not None and address_prefix not in server['prefixes']: logging.debug('Skipping {} due to unsupported address prefix'.format(server['name'])) continue return server raise KeyError('No servers match the requirements') class AddressInfo(object): def __init__(self, address, explorer=None, verify=False, ignore_errors=False): if verify and explorer is not None: raise ValueError('The "verify" and "explorer" parameters are incompatible') xpub = None idx = None if type(address) is tuple: xpub, idx = address elif address[0].lower() == 'b': address = address.split(':')[1] if address[0] in 'QP': address = address.lower() if address[0] in 'qp': self.address = address self.legacy_address = None elif address[0] in '13': self.address = None self.legacy_address = address try: if xpub is not None: self.address = generate_address(xpub, idx) self.legacy_address = generate_address(xpub, idx, False) elif self.address is None: self.address = convert_address(self.legacy_address) elif self.legacy_address is None: self.legacy_address = convert_address(self.address) except ImportError: if xpub is not None: raise explorers.append(None) results = [] if self.address is not None and self.legacy_address is not None: prefixes = 'qp13' elif self.legacy_address is None: prefixes = 'qp' else: prefixes = '13' while explorers[0] is not None: if prefixes == 'qp13': server = pick_explorer(explorer, ignore_errors=ignore_errors) else: server = pick_explorer(explorer, address_prefix=prefixes[0], ignore_errors=ignore_errors) try: logging.debug('Querying {}'.format(server['name'])) if 'q' in server['prefixes']: json = jsonload(server['url'].format(address=self.address)) else: json = jsonload(server['url'].format(address=self.legacy_address)) server['last_data'] = self.raw_data = json if server['confirmed_key'] is not None and server['unconfirmed_key'] is not None: confirmed = float(get_value(json, server['confirmed_key'])) unconfirmed = float(get_value(json, server['unconfirmed_key'])) elif server['confirmed_key'] is not None and server['balance_key'] is not None: confirmed = float(get_value(json, server['confirmed_key'])) balance = float(get_value(json, server['balance_key'])) unconfirmed = balance - confirmed elif server['unconfirmed_key'] is not None and server['balance_key'] is not None: balance = float(get_value(json, server['balance_key'])) unconfirmed = float(get_value(json, server['unconfirmed_key'])) confirmed = balance - unconfirmed else: raise RuntimeError('Cannot figure out address balance') try: txid = get_value(server['last_data'], server['last_tx_key']) except (KeyError, IndexError): txid = None if not txid: txid = None except KeyboardInterrupt: explorers.remove(None) raise except: server['errors'] += 1 exception = sys.exc_info()[1] try: server['last_error'] = str(exception.reason) except AttributeError: server['last_error'] = str(exception) if server['errors'] > MAX_ERRORS: logging.error('Excessive errors from {server}, disabling. Last error: {error}'.format(server=server['name'], error=server['last_error'])) continue if server['unit_satoshi']: confirmed /= 100000000 unconfirmed /= 100000000 if server['errors'] > 0: server['errors'] -= 1 data = (confirmed, unconfirmed, txid) if verify: if data not in results: results.append(data) continue results.append(data) break explorers.remove(None) if results == []: for server in explorers: if server['errors'] > 0: server['errors'] -= 1 raise ConnectionError('No results from any known block explorer') self.confirmed, self.unconfirmed, self.last_txid = results[-1] def get_balance(address, explorer=None, verify=False, ignore_errors=False): addr = AddressInfo(address, explorer, verify, ignore_errors) return addr.confirmed, addr.unconfirmed def get_last_txid(address, explorer=None, verify=False, ignore_errors=False): addr = AddressInfo(address, explorer, verify, ignore_errors) return addr.last_txid class TxNotFoundError(Exception): class TxInfo(object): def __init__(self, txid, explorer=None, ignore_errors=None): explorers.append(None) while explorers[0] is not None: try: server = pick_explorer(explorer, ignore_errors=ignore_errors) except StopIteration: break try: logging.debug('Querying {}'.format(server['name'])) json = jsonload(server['tx_url'].format(txid=txid)) server['last_data'] = self.raw_data = json if server['tx_double_spend_key'] is not None: self.double_spend = get_value(json, server['tx_double_spend_key']) else: self.double_spend = False for i, __ in enumerate(get_value(json, server['tx_inputs_key'])): try: if get_value(json, '.'.join([server['tx_inputs_key'], str(i), server['tx_in_double_spend_key']])) is not None: self.double_spend = True except KeyError as k: if str(k).strip('\'') != server['tx_in_double_spend_key']: raise self.outputs = {} for i, __ in enumerate(get_value(json, server['tx_outputs_key'])): try: addr = get_value(json, '.'.join([server['tx_outputs_key'], str(i), server['tx_out_address_key']])) except KeyError: continue value = float(get_value(json, '.'.join([server['tx_outputs_key'], str(i), server['tx_out_value_key']]))) if server['unit_satoshi']: value /= 100000000 if addr[0] in 'bB': addr = addr.lower().split(':')[1] self.outputs[addr] = value try: self.outputs[convert_address(addr)] = value except ImportError: pass self.fee = float(get_value(json, server['tx_fee_key'])) self.size = get_value(json, server['tx_size_key']) if server['unit_satoshi']: self.fee /= 100000000 self.fee_per_byte = self.fee / self.size * 100000000 self.time = datetime.datetime.fromtimestamp(get_value(json, server['tx_time_key'])) self.confirmations = get_value(json, server['tx_confirmations_key']) break except KeyboardInterrupt: explorers.remove(None) raise except: exception = sys.exc_info()[1] if isinstance(exception, urllib.error.HTTPError) and exception.code == 404: continue server['errors'] += 1 try: server['last_error'] = str(exception.reason) except AttributeError: server['last_error'] = str(exception) if server['errors'] > MAX_ERRORS: logging.error('Excessive errors from {server}, disabling. Last error: {error}'.format(server=server['name'], error=server['last_error'])) continue try: explorers.remove(None) except ValueError: pass if self.__dict__ == {}: raise TxNotFoundError('No results from any known block explorer')
Apache License 2.0
gaasedelen/prefix
plugin/ida_prefix.py
Hooks.finish_populating_widget_popup
python
def finish_populating_widget_popup(self, widget, popup): inject_prefix_actions(widget, popup, idaapi.get_widget_type(widget)) return 0
A right click menu is about to be shown. (IDA 7)
https://github.com/gaasedelen/prefix/blob/8ad9dc63b388e947136bfcd08a509512eeffeb27/plugin/ida_prefix.py#L207-L212
import os import idc import idaapi import idautils from PyQt5 import QtGui, QtCore, QtWidgets VERSION = "v1.2" AUTHORS = ['Andrew Marumoto', 'Markus Gaasedelen'] def PLUGIN_ENTRY(): return prefix_t() class prefix_t(idaapi.plugin_t): flags = idaapi.PLUGIN_PROC | idaapi.PLUGIN_HIDE help = "" comment = "A plugin for easy function prefixing" wanted_name = "prefix" wanted_hotkey = "" def init(self): self._init_action_bulk() self._init_action_clear() self._init_action_recursive() self._init_hooks() idaapi.msg("%s %s initialized...\n" % (self.wanted_name, VERSION)) return idaapi.PLUGIN_KEEP def run(self, arg): idaapi.msg("%s cannot be run as a script.\n" % self.wanted_name) def term(self): self._hooks.unhook() self._del_action_bulk() self._del_action_clear() self._del_action_recursive() idaapi.msg("%s terminated...\n" % self.wanted_name) def _init_hooks(self): self._hooks = Hooks() self._hooks.ready_to_run = self._init_hexrays_hooks self._hooks.hook() def _init_hexrays_hooks(self): if idaapi.init_hexrays_plugin(): idaapi.install_hexrays_callback(self._hooks.hxe_callback) ACTION_BULK = "prefix:bulk" ACTION_CLEAR = "prefix:clear" ACTION_RECURSIVE = "prefix:recursive" def _init_action_bulk(self): self._bulk_icon_id = idaapi.load_custom_icon(plugin_resource("bulk.png")) action_desc = idaapi.action_desc_t( self.ACTION_BULK, "Prefix selected functions", IDACtxEntry(bulk_prefix), None, "Assign a user prefix to the selected functions", self._bulk_icon_id ) assert idaapi.register_action(action_desc), "Action registration failed" def _init_action_clear(self): self._clear_icon_id = idaapi.load_custom_icon(plugin_resource("clear.png")) action_desc = idaapi.action_desc_t( self.ACTION_CLEAR, "Clear prefixes", IDACtxEntry(clear_prefix), None, "Clear user prefixes from the selected functions", self._clear_icon_id ) assert idaapi.register_action(action_desc), "Action registration failed" def _init_action_recursive(self): self._recursive_icon_id = idaapi.load_custom_icon(plugin_resource("recursive.png")) action_desc = idaapi.action_desc_t( self.ACTION_RECURSIVE, "Recursive function prefix", IDACtxEntry(recursive_prefix_cursor), None, "Recursively prefix callees of this function", self._recursive_icon_id ) assert idaapi.register_action(action_desc), "Action registration failed" def _del_action_bulk(self): idaapi.unregister_action(self.ACTION_BULK) idaapi.free_custom_icon(self._bulk_icon_id) self._bulk_icon_id = idaapi.BADADDR def _del_action_clear(self): idaapi.unregister_action(self.ACTION_CLEAR) idaapi.free_custom_icon(self._clear_icon_id) self._clear_icon_id = idaapi.BADADDR def _del_action_recursive(self): idaapi.unregister_action(self.ACTION_RECURSIVE) idaapi.free_custom_icon(self._recursive_icon_id) self._recursive_icon_id = idaapi.BADADDR class Hooks(idaapi.UI_Hooks): def ready_to_run(self): pass
MIT License