code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def parse_addr(addr, *, proto=None, host=None): port = None if isinstance(addr, Address): return addr elif isinstance(addr, str): if addr.startswith('http: (proto, addr) = ('http', addr[7:]) if addr.startswith('udp: (proto, addr) = ('udp', addr[6:]) elif addr.startswith('tcp: (proto, addr) = ('tcp', addr[6:]) elif addr.startswith('unix: (proto, addr) = ('unix', addr[7:]) (a, _, b) = addr.partition(':') host = (a or host) port = (b or port) elif isinstance(addr, (tuple, list)): (a, b) = addr host = (a or host) port = (b or port) elif isinstance(addr, int): port = addr else: raise ValueError('bad value') if (port is not None): port = int(port) return Address(proto, host, port)
Parses an address Returns: Address: the parsed address
codesearchnet
def register_array_types_from_sources(self, source_files): for fname in source_files: if is_vhdl(fname): self._register_array_types(self.extract_objects(fname))
Add array type definitions from a file list to internal registry Args: source_files (list of str): Files to parse for array definitions
codesearchnet
def heightmap_dig_bezier(hm: np.ndarray, px: Tuple[(int, int, int, int)], py: Tuple[(int, int, int, int)], startRadius: float, startDepth: float, endRadius: float, endDepth: float) -> None: lib.TCOD_heightmap_dig_bezier(_heightmap_cdata(hm), px, py, startRadius, startDepth, endRadius, endDepth)
Carve a path along a cubic Bezier curve. Both radius and depth can vary linearly along the path. Args: hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions. px (Sequence[int]): The 4 `x` coordinates of the Bezier curve. py (Sequence[int]): The 4 `y` coordinates of the Bezier curve. startRadius (float): The starting radius size. startDepth (float): The starting depth. endRadius (float): The ending radius size. endDepth (float): The ending depth.
codesearchnet
def _do_refresh_request(self, http): body = self._generate_refresh_request_body() headers = self._generate_refresh_request_headers() logger.info('Refreshing access_token') resp, content = transport.request( http, self.token_uri, method='POST', body=body, headers=headers) content = _helpers._from_bytes(content) if resp.status == http_client.OK: d = json.loads(content) self.token_response = d self.access_token = d['access_token'] self.refresh_token = d.get('refresh_token', self.refresh_token) if 'expires_in' in d: delta = datetime.timedelta(seconds=int(d['expires_in'])) self.token_expiry = delta + _UTCNOW() else: self.token_expiry = None if 'id_token' in d: self.id_token = _extract_id_token(d['id_token']) self.id_token_jwt = d['id_token'] else: self.id_token = None self.id_token_jwt = None self.invalid = False if self.store: self.store.locked_put(self) else: logger.info('Failed to retrieve access token: %s', content) error_msg = 'Invalid response {0}.'.format(resp.status) try: d = json.loads(content) if 'error' in d: error_msg = d['error'] if 'error_description' in d: error_msg += ': ' + d['error_description'] self.invalid = True if self.store is not None: self.store.locked_put(self) except (TypeError, ValueError): pass raise HttpAccessTokenRefreshError(error_msg, status=resp.status)
Refresh the access_token using the refresh_token. Args: http: an object to be used to make HTTP requests. Raises: HttpAccessTokenRefreshError: When the refresh fails.
juraj-google-style
def App(apptype, data_flow_kernel=None, walltime=60, cache=False, executors='all'): from parsl.app.python import PythonApp from parsl.app.bash import BashApp logger.warning("The 'App' decorator will be deprecated in Parsl 0.8. Please use 'python_app' or 'bash_app' instead.") if (apptype == 'python'): app_class = PythonApp elif (apptype == 'bash'): app_class = BashApp else: raise InvalidAppTypeError("Invalid apptype requested {}; must be 'python' or 'bash'".format(apptype)) def wrapper(f): return app_class(f, data_flow_kernel=data_flow_kernel, walltime=walltime, cache=cache, executors=executors) return wrapper
The App decorator function. Args: - apptype (string) : Apptype can be bash|python Kwargs: - data_flow_kernel (DataFlowKernel): The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for managing this app. This can be omitted only after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`. - walltime (int) : Walltime for app in seconds, default=60 - executors (str|list) : Labels of the executors that this app can execute over. Default is 'all'. - cache (Bool) : Enable caching of the app call default=False Returns: A PythonApp or BashApp object, which when called runs the apps through the executor.
codesearchnet
def _GetParser(self): parser = optparse.OptionParser() parser.disable_interspersed_args() parser.add_option('-m', '--map', action='append', type='string', dest='maps', help='map to operate on, can be supplied multiple times') return parser
Initialize the argument parser for this command object. A default parser is initialized which supports common flags. It is expected that Command subclasses extend this and add specific flags as needed. Returns: an optparse.OptionParser instance
github-repos
def extend(self, key, values, *, section=DataStoreDocumentSection.Data): key_notation = '.'.join([section, key]) if (not isinstance(values, list)): return False result = self._collection.update_one({'_id': ObjectId(self._workflow_id)}, {'$push': {key_notation: {'$each': self._encode_value(values)}}, '$currentDate': {'lastModified': True}}) return (result.modified_count == 1)
Extends a list in the data store with the elements of values. Args: key (str): The key pointing to the value that should be stored/updated. It supports MongoDB's dot notation for nested fields. values (list): A list of the values that should be used to extend the list in the document. section (DataStoreDocumentSection): The section from which the data should be retrieved. Returns: bool: ``True`` if the list in the database could be extended, otherwise ``False``.
codesearchnet
def remove_droplets(self, droplet_ids): return self.get_data(('load_balancers/%s/droplets/' % self.id), type=DELETE, params={'droplet_ids': droplet_ids})
Unassign a LoadBalancer. Args: droplet_ids (obj:`list` of `int`): A list of Droplet IDs
codesearchnet
def get_and_check_tasks_for(context, task, msg_prefix=''): tasks_for = task['extra']['tasks_for'] if tasks_for not in context.config['valid_tasks_for']: raise ValueError( '{}Unknown tasks_for: {}'.format(msg_prefix, tasks_for) ) return tasks_for
Given a parent task, return the reason the parent task was spawned. ``.taskcluster.yml`` uses this to know whether to spawn an action, cron, or decision task definition. ``tasks_for`` must be a valid one defined in the context. Args: task (dict): the task definition. msg_prefix (str): the string prefix to use for an exception. Raises: (KeyError, ValueError): on failure to find a valid ``tasks_for``. Returns: str: the ``tasks_for``
juraj-google-style
def _clean_out_of_range_indices(labels, num_classes): def _labels_is_sparse(): return isinstance(labels, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)) def _clean_out_of_range(values): return array_ops.where_v2(math_ops.greater_equal(values, num_classes), -1 * array_ops.ones_like(values), values) def _clean_labels_out_of_range(): if _labels_is_sparse(): return type(labels)(indices=labels.indices, values=_clean_out_of_range(labels.values), dense_shape=labels.dense_shape) else: return _clean_out_of_range(labels) max_labels = math_ops.reduce_max(labels.values if _labels_is_sparse() else labels) return cond.cond(math_ops.greater_equal(max_labels, num_classes), _clean_labels_out_of_range, lambda: labels)
Replaces large out-of-range labels by small out-of-range labels. Replaces any value in `labels` that is greater or equal to `num_classes` by -1. Do this conditionally for efficiency in case there are no such values. Args: labels: `int64` `Tensor` or `SparseTensor`. num_classes: `int64` scalar `Tensor`. Returns: An `int64` `Tensor` or `SparseTensor` as `labels` with indices greater or equal to num_classes replaced by -1.
github-repos
def furnish(app: web.Application): app_name = app['config']['name'] prefix = ('/' + app_name.lstrip('/')) app.router.add_routes(routes) cors_middleware.enable_cors(app) known_resources = set() for route in list(app.router.routes()): if (route.resource in known_resources): continue known_resources.add(route.resource) route.resource.add_prefix(prefix) aiohttp_swagger.setup_swagger(app, swagger_url=(prefix + '/api/doc'), description='', title=f'Brewblox Service "{app_name}"', api_version='0.0', contact='development@brewpi.com') LOGGER.info(('Service info: ' + getenv('SERVICE_INFO', 'UNKNOWN'))) for route in app.router.routes(): LOGGER.info(f'Endpoint [{route.method}] {route.resource}') for (name, impl) in app.get(features.FEATURES_KEY, {}).items(): LOGGER.info(f'Feature [{name}] {impl}')
Configures Application routes, readying it for running. This function modifies routes and resources that were added by calling code, and must be called immediately prior to `run(app)`. Args: app (web.Application): The Aiohttp Application as created by `create_app()`
codesearchnet
def __update_time__(self, filename, **kwargs): conn = self.__get_conn__(**kwargs) load_time = XsdDatetime(datetime.datetime.utcnow()) conn.update_query(.format(file=filename, ctime=load_time.sparql, graph="kdr:load_times"), **kwargs) self.loaded_times[filename] = load_time
updated the mod time for a file saved to the definition_store Args: filename: the name of the file
juraj-google-style
def detect_encoding(data, encoding=None, fallback='latin1', is_html=False): if encoding: encoding = normalize_codec_name(encoding) bs4_detector = EncodingDetector(data, override_encodings=((encoding,) if encoding else ()), is_html=is_html) candidates = itertools.chain(bs4_detector.encodings, (fallback,)) for candidate in candidates: if (not candidate): continue candidate = normalize_codec_name(candidate) if (not candidate): continue if ((candidate == 'ascii') and (fallback != 'ascii')): continue if try_decoding(data, candidate): return candidate raise ValueError('Unable to detect encoding.')
Detect the character encoding of the data. Returns: str: The name of the codec Raises: ValueError: The codec could not be detected. This error can only occur if fallback is not a "lossless" codec.
codesearchnet
def patch_f90_compiler(f90_compiler): from numpy.distutils.fcompiler import gnu if (not isinstance(f90_compiler, gnu.Gnu95FCompiler)): return False f90_compiler.compiler_f77[:] = _update_flags(f90_compiler.compiler_f77, remove_flags=('-Werror',)) f90_compiler.compiler_f90[:] = _update_flags(f90_compiler.compiler_f90)
Patch up ``f90_compiler``. For now, only updates the flags for ``gfortran``. In this case, it add any of ``GFORTRAN_SHARED_FLAGS`` that are missing. In debug mode, it also adds any flags in ``GFORTRAN_DEBUG_FLAGS`` and makes sure none of the flags in ``GFORTRAN_OPTIMIZE_FLAGS`` are present. In standard mode ("OPTIMIZE"), makes sure flags in ``GFORTRAN_OPTIMIZE_FLAGS`` are present and flags in ``GFORTRAN_DEBUG_FLAGS`` are not. Args: f90_compiler (numpy.distutils.fcompiler.FCompiler): A Fortran compiler instance.
codesearchnet
def AddMonths(start_date, months): current_date = start_date i = 0 while (i < months): month_days = calendar.monthrange(current_date.year, current_date.month)[1] current_date += timedelta(days=month_days) i += 1 return current_date
A simple convenience utility for adding months to a given start date. This increments the months by adding the number of days in the current month to the current month, for each month. Args: start_date: date The date months are being added to. months: int The number of months to add. Returns: A date equal to the start date incremented by the given number of months.
codesearchnet
class Concatenate(Merge): def __init__(self, axis=-1, **kwargs): super().__init__(**kwargs) self.axis = axis self.supports_masking = True self._reshape_required = False def build(self, input_shape): if len(input_shape) < 1 or not isinstance(input_shape[0], (tuple, list)): raise ValueError(f'A `Concatenate` layer should be called on a list of at least 1 input. Received: input_shape={input_shape}') if all((shape is None for shape in input_shape)): return reduced_inputs_shapes = [list(shape) for shape in input_shape] reduced_inputs_shapes_copy = copy.copy(reduced_inputs_shapes) shape_set = set() for i in range(len(reduced_inputs_shapes_copy)): concat_axis = self.axis % len(reduced_inputs_shapes_copy[i]) for axis, axis_value in enumerate(reduced_inputs_shapes_copy, start=1): if axis != concat_axis and axis_value == 1: del reduced_inputs_shapes[i][axis] if len(reduced_inputs_shapes[i]) > self.axis: del reduced_inputs_shapes[i][self.axis] shape_set.add(tuple(reduced_inputs_shapes[i])) if len(shape_set) != 1: err_msg = f'A `Concatenate` layer requires inputs with matching shapes except for the concatenation axis. Received: input_shape={input_shape}' ranks = set((len(shape) for shape in shape_set)) if len(ranks) != 1: raise ValueError(err_msg) rank, = ranks for axis in range(rank): unique_dims = set((shape[axis] for shape in shape_set if shape[axis] is not None)) if len(unique_dims) > 1: raise ValueError(err_msg) def _merge_function(self, inputs): return ops.concatenate(inputs, axis=self.axis) def compute_output_shape(self, input_shape): if not isinstance(input_shape, (tuple, list)) or not isinstance(input_shape[0], (tuple, list)): raise ValueError(f'A `Concatenate` layer should be called on a list of inputs. Received: input_shape={input_shape}') input_shapes = input_shape output_shape = list(input_shapes[0]) for shape in input_shapes[1:]: if output_shape[self.axis] is None or shape[self.axis] is None: output_shape[self.axis] = None break output_shape[self.axis] += shape[self.axis] return tuple(output_shape) def compute_mask(self, inputs, mask=None): if mask is None: return None if not isinstance(mask, (tuple, list)): raise ValueError(f'`mask` should be a list. Received mask={mask}') if not isinstance(inputs, (tuple, list)): raise ValueError(f'`inputs` should be a list. Received: inputs={inputs}') if len(mask) != len(inputs): raise ValueError(f'The lists `inputs` and `mask` should have the same length. Received: inputs={inputs} of length {len(inputs)}, and mask={mask} of length {len(mask)}') if all((m is None for m in mask)): return None masks = [] for input_i, mask_i in zip(inputs, mask): if mask_i is None: masks.append(ops.ones_like(input_i, dtype='bool')) elif mask_i.ndim < input_i.ndim: masks.append(ops.broadcast_to(ops.expand_dims(mask_i, axis=-1), ops.shape(input_i))) else: masks.append(mask_i) concatenated = ops.concatenate(masks, axis=self.axis) return ops.any(concatenated, axis=-1, keepdims=False) def get_config(self): config = {'axis': self.axis} base_config = super().get_config() return dict(list(base_config.items()) + list(config.items()))
Concatenates a list of inputs. It takes as input a list of tensors, all of the same shape except for the concatenation axis, and returns a single tensor that is the concatenation of all inputs. Examples: >>> x = np.arange(20).reshape(2, 2, 5) >>> y = np.arange(20, 30).reshape(2, 1, 5) >>> keras.layers.Concatenate(axis=1)([x, y]) Usage in a Keras model: >>> x1 = keras.layers.Dense(8)(np.arange(10).reshape(5, 2)) >>> x2 = keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2)) >>> y = keras.layers.Concatenate()([x1, x2]) Args: axis: Axis along which to concatenate. **kwargs: Standard layer keyword arguments. Returns: A tensor, the concatenation of the inputs alongside axis `axis`.
github-repos
def convert_acquire(self, shift, instruction): meas_level = self._run_config.get('meas_level', 2) command_dict = { 'name': 'acquire', 't0': shift+instruction.start_time, 'duration': instruction.duration, 'qubits': [q.index for q in instruction.acquires], 'memory_slot': [m.index for m in instruction.mem_slots] } if meas_level == 2: if instruction.command.discriminator: command_dict.update({ 'discriminators': [ QobjMeasurementOption( name=instruction.command.discriminator.name, params=instruction.command.discriminator.params) ] }) command_dict.update({ 'register_slot': [regs.index for regs in instruction.reg_slots] }) if meas_level >= 1: if instruction.command.kernel: command_dict.update({ 'kernels': [ QobjMeasurementOption( name=instruction.command.kernel.name, params=instruction.command.kernel.params) ] }) return self._qobj_model(**command_dict)
Return converted `AcquireInstruction`. Args: shift(int): Offset time. instruction (AcquireInstruction): acquire instruction. Returns: dict: Dictionary of required parameters.
juraj-google-style
def getattr_sdk(attr, name): if inspect.isroutine(attr): if hasattr(attr, '_sdkmeta'): return attr raise AttributeError(name)
Filter SDK attributes Args: attr(attribute): Attribute as returned by :func:`getattr`. name(str): Attribute name. Returns: `attr` if passed.
juraj-google-style
def sackin(self, normalize='leaves'): num_nodes_from_root = dict(); sackin = 0; num_leaves = 0 for node in self.traverse_preorder(): num_nodes_from_root[node] = 1 if not node.is_root(): num_nodes_from_root[node] += num_nodes_from_root[node.parent] if node.is_leaf(): num_nodes_from_root[node] -= 1; sackin += num_nodes_from_root[node]; num_leaves += 1 if normalize is None or normalize is False: return sackin elif not isinstance(normalize,str): raise TypeError("normalize must be None or a string") normalize = normalize.lower() if normalize == 'leaves': return float(sackin)/num_leaves elif normalize == 'yule': x = sum(1./i for i in range(2, num_leaves+1)) return (sackin - (2*num_leaves*x)) / num_leaves elif normalize == 'pda': return sackin/(num_leaves**1.5) else: raise RuntimeError("normalize must be None, 'leaves', 'yule', or 'pda'")
Compute the Sackin balance index of this ``Tree`` Args: ``normalize`` (``str``): How to normalize the Sackin index (if at all) * ``None`` to not normalize * ``"leaves"`` to normalize by the number of leaves * ``"yule"`` to normalize to the Yule model * ``"pda"`` to normalize to the Proportional to Distinguishable Arrangements model Returns: ``float``: Sackin index (either normalized or not)
juraj-google-style
def create_elb_dns(self, regionspecific=False): if regionspecific: dns_elb = self.generated.dns()['elb_region'] else: dns_elb = self.generated.dns()['elb'] dns_elb_aws = find_elb(name=self.app_name, env=self.env, region=self.region) zone_ids = get_dns_zone_ids(env=self.env, facing=self.elb_subnet) self.log.info('Updating Application URL: %s', dns_elb) dns_kwargs = {'dns_name': dns_elb, 'dns_name_aws': dns_elb_aws, 'dns_ttl': self.dns_ttl} for zone_id in zone_ids: self.log.debug('zone_id: %s', zone_id) update_dns_zone_record(self.env, zone_id, **dns_kwargs) return dns_elb
Create dns entries in route53. Args: regionspecific (bool): The DNS entry should have region on it Returns: str: Auto-generated DNS name for the Elastic Load Balancer.
codesearchnet
def insert(self, lines=None): for (i, (key, line)) in enumerate(lines.items()): n = (key + i) first_half = self._lines[:n] last_half = self._lines[n:] self._lines = ((first_half + [line]) + last_half)
Insert lines into the editor. Note: To insert before the first line, use :func:`~exa.core.editor.Editor.preappend` (or key 0); to insert after the last line use :func:`~exa.core.editor.Editor.append`. Args: lines (dict): Dictionary of lines of form (lineno, string) pairs
codesearchnet
def get_overlay_gateway(self): urn = "urn:brocade.com:mgmt:brocade-tunnels" config = ET.Element("config") ET.SubElement(config, "overlay-gateway", xmlns=urn) output = self._callback(config, handler='get_config') result = {} element = ET.fromstring(str(output)) for overlayGw in element.iter('{%s}overlay-gateway' % urn): result['name'] = overlayGw.find('{%s}name' % urn).text isactivate = overlayGw.find('{%s}activate' % urn) if isactivate is None: result['activate'] = False else: result['activate'] = True gwtype = overlayGw.find('{%s}gw-type' % urn) if gwtype is None: result['gwtype'] = None else: result['gwtype'] = gwtype.text attach = overlayGw.find('{%s}attach' % urn) if attach is not None: rbridgeId = attach.find('{%s}rbridge-id' % urn) if rbridgeId is None: result['attached-rbridgeId'] = None else: result['attached-rbridgeId'] = rbridgeId.find('{%s}rb-add' % urn).text result['attached-vlan'] = None vlans = [] for vlan in attach.iter('{%s}vlan'%urn): vlans.append(vlan.find('{%s}vid' % urn).text) result['attached-vlan'] = vlans return result
Get overlay-gateway name on the switch Args: callback (function): A function executed upon completion of the method. Returns: Dictionary containing details of VXLAN Overlay Gateway. Raises: None
juraj-google-style
def position_at_fraction(self, fraction): raise NotImplementedError
Returns the position at the given fraction. Given a fraction within the range [0.0, 1.0) this method will return the position at the given fraction compared to the position range [self.start_position, self.stop_position). ** Thread safety ** Methods of the class ``RangeTracker`` including this method may get invoked by different threads, hence must be made thread-safe, e.g. by using a single lock object. Args: fraction: a float value within the range [0.0, 1.0). Returns: a position within the range [self.start_position, self.stop_position).
github-repos
def to_routing_header(params): if sys.version_info[0] < 3: return urlencode(params).replace("%2F", "/") return urlencode( params, safe="/", )
Returns a routing header string for the given request parameters. Args: params (Mapping[str, Any]): A dictionary containing the request parameters used for routing. Returns: str: The routing header string.
juraj-google-style
def __init__( self, jumps ): self.jumps = jumps self.p = np.array( [ jump.relative_probability for jump in self.jumps ] )
Initialise a Transitions object. Args: jumps (List(Jump)): List of jumps to be contained in this Transitions object. Returns: None
juraj-google-style
def delete_container_instance_group(access_token, subscription_id, resource_group, container_group_name): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerInstance/ContainerGroups/', container_group_name, '?api-version=', CONTAINER_API]) return do_delete(endpoint, access_token)
Delete a container group from a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. container_group_name (str): Name of container instance group. Returns: HTTP response.
codesearchnet
def db_wb010(self, value=None): if (value is not None): try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float for field `db_wb010`'.format(value)) self._db_wb010 = value
Corresponds to IDD Field `db_wb010` mean coincident dry-bulb temperature to Wet-bulb temperature corresponding to 1.0% annual cumulative frequency of occurrence Args: value (float): value for IDD Field `db_wb010` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
class FineGrainedFP8Config(QuantizationConfigMixin): def __init__(self, activation_scheme: str='dynamic', weight_block_size: Tuple[int, int]=(128, 128), modules_to_not_convert: Optional[List]=None, **kwargs): self.quant_method = QuantizationMethod.FP8 self.modules_to_not_convert = modules_to_not_convert self.activation_scheme = activation_scheme self.weight_block_size = weight_block_size self.post_init() def post_init(self): self.activation_scheme = self.activation_scheme.lower() if self.activation_scheme not in ['dynamic']: raise ValueError(f'Activation scheme {self.activation_scheme} not supported') if len(self.weight_block_size) != 2: raise ValueError('weight_block_size must be a tuple of two integers') if self.weight_block_size[0] <= 0 or self.weight_block_size[1] <= 0: raise ValueError('weight_block_size must be a tuple of two positive integers')
FineGrainedFP8Config is a configuration class for fine-grained FP8 quantization used mainly for deepseek models. Args: activation_scheme (`str`, *optional*, defaults to `"dynamic"`): The scheme used for activation, the defaults and only support scheme for now is "dynamic". weight_block_size (`typing.Tuple[int, int]`, *optional*, defaults to `(128, 128)`): The size of the weight blocks for quantization, default is (128, 128). modules_to_not_convert (`list`, *optional*): A list of module names that should not be converted during quantization.
github-repos
def old_collective_correlation( self ): if self.has_run: return self.atoms.collective_dr_squared() / float( self.number_of_jumps ) else: return None
Returns the collective correlation factor, f_I Args: None Returns: (Float): The collective correlation factor, f_I. Notes: This function assumes that the jump distance between sites has been normalised to a=1. If the jumps distance is not equal to 1 then the value returned by this function should be divided by a^2. Even better, use self.collective_correlation
juraj-google-style
def validate_all_values_for_key_in_obj(obj, key, validation_fun): for vkey, value in obj.items(): if vkey == key: validation_fun(value) elif isinstance(value, dict): validate_all_values_for_key_in_obj(value, key, validation_fun) elif isinstance(value, list): validate_all_values_for_key_in_list(value, key, validation_fun)
Validate value for all (nested) occurrence of `key` in `obj` using `validation_fun`. Args: obj (dict): dictionary object. key (str): key whose value is to be validated. validation_fun (function): function used to validate the value of `key`. Raises: ValidationError: `validation_fun` will raise this error on failure
juraj-google-style
def validate_element(self, value): if not isinstance(value, self.type): if isinstance(value, six.integer_types) and self.type == float: return float(value) if value is None: if self.required: raise ValidationError('Required field is missing') else: try: name = self.name except AttributeError: raise ValidationError('Expected type %s for %s, ' 'found %s (type %s)' % (self.type, self.__class__.__name__, value, type(value))) else: raise ValidationError( 'Expected type %s for field %s, found %s (type %s)' % (self.type, name, value, type(value))) return value
Validate single element of field. This is different from validate in that it is used on individual values of repeated fields. Args: value: Value to validate. Returns: The value casted in the expected type. Raises: ValidationError if value is not expected type.
juraj-google-style
def _validate_device_existence(serials): valid_ad_identifiers = list_adb_devices() + list_adb_devices_by_usb_id() + list_fastboot_devices() for serial in serials: if serial not in valid_ad_identifiers: raise Error(f'Android device serial "{serial}" is specified in config but is not reachable.')
Validate that all the devices specified by the configs can be reached. Args: serials: list of strings, the serials of all the devices that are expected to exist.
github-repos
def merge_call(self, merge_fn, args=(), kwargs=None): require_replica_context(self) if kwargs is None: kwargs = {} merge_fn = autograph.tf_convert(merge_fn, autograph_ctx.control_status_ctx(), convert_by_default=False) return self._merge_call(merge_fn, args, kwargs)
Merge args across replicas and run `merge_fn` in a cross-replica context. This allows communication and coordination when there are multiple calls to the step_fn triggered by a call to `strategy.run(step_fn, ...)`. See `tf.distribute.Strategy.run` for an explanation. If not inside a distributed scope, this is equivalent to: ``` strategy = tf.distribute.get_strategy() with cross-replica-context(strategy): return merge_fn(strategy, *args, **kwargs) ``` Args: merge_fn: Function that joins arguments from threads that are given as PerReplica. It accepts `tf.distribute.Strategy` object as the first argument. args: List or tuple with positional per-thread arguments for `merge_fn`. kwargs: Dict with keyword per-thread arguments for `merge_fn`. Returns: The return value of `merge_fn`, except for `PerReplica` values which are unpacked.
github-repos
def apply_product_config(config): cot_product = config['cot_product'] for key in config: if isinstance(config[key], Mapping) and 'by-cot-product' in config[key]: try: config[key] = config[key]['by-cot-product'][cot_product] except KeyError: raise ConfigError("Product {} not specified for key {}".format(cot_product, key)) return config
Apply config values that are keyed by `cot_product`. This modifies the passed in configuration. Args: config dict: the config to apply cot_product keying too Returns: dict
juraj-google-style
def get_counters(self): with self._lock: return self.counters.values()
Returns the current set of counters. Returns: An iterable that contains the current set of counters. To make sure that multiple threads can iterate over the set of counters, we return a new iterable here. Note that the actual set of counters may get modified after this method returns hence the returned iterable may be stale.
github-repos
def energy(self, spins, break_aux_symmetry=True): subtheta = self.theta.copy() subtheta.fix_variables(spins) av = next(self._auxvar_counter) auxvars = {v: Symbol('aux{}_{}'.format(av, v), BOOL) for v in subtheta.linear} if (break_aux_symmetry and (av == 0)): self.assertions.update(set(auxvars.values())) trees = self._trees if (not trees): assert ((not subtheta.linear) and (not subtheta.quadratic)) return subtheta.offset energy = Plus(self.message(trees, {}, subtheta, auxvars), subtheta.offset) return energy
A formula for the exact energy of Theta with spins fixed. Args: spins (dict): Spin values for a subset of the variables in Theta. break_aux_symmetry (bool, optional): Default True. If True, break the aux variable symmetry by setting all aux variable to 1 for one of the feasible configurations. If the energy ranges are not symmetric then this can make finding models impossible. Returns: Formula for the exact energy of Theta with spins fixed.
codesearchnet
def __init__(self, tcex, domain, data_type, mapping=None): self.tcex = tcex self.domain = domain self.data_type = data_type self.mapping = mapping or {'dynamic': False} if self.tcex.default_args.tc_token is None: raise RuntimeError( 'The DataModel TcEx Module requires a Token to interact with the ' 'ThreatConnect platform.' ) self._create_index() self._update_mappings()
Initialize class properties. Args: tcex ([type]): [description] domain (str): A value of “system”, “organization”, or “local”. data_type (str): A free form type name for the data. mapping (dict, optional): Defaults to None. Elasticsearch mappings data. Raises: RuntimeError: [description]
juraj-google-style
def has_no_current_path(self, path, **kwargs): try: return self.assert_no_current_path(path, **kwargs) except ExpectationNotMet: return False
Checks if the page doesn't have the given path. Args: path (str | RegexObject): The string or regex that the current "path" should match. **kwargs: Arbitrary keyword arguments for :class:`CurrentPathQuery`. Returns: bool: Whether it doesn't match.
juraj-google-style
def add_init_container(self, name, image, security_context, init_environment, volume_mounts ): self.init_containers.append( { 'name': name, 'image': image, 'securityContext': security_context, 'env': init_environment, 'volumeMounts': volume_mounts } )
Adds an init container to the launched pod. useful for pre- Args: name (str): image (str): security_context (dict): init_environment (dict): volume_mounts (dict): Returns:
juraj-google-style
def remove_listener(self, event, listener): with contextlib.suppress(ValueError): self._listeners[event].remove(listener) return True with contextlib.suppress(ValueError): self._once[event].remove(listener) return True return False
Remove a listener from the emitter. Args: event (str): The event name on which the listener is bound. listener: A reference to the same object given to add_listener. Returns: bool: True if a listener was removed else False. This method only removes one listener at a time. If a listener is attached multiple times then this method must be called repeatedly. Additionally, this method removes listeners first from the those registered with 'on' or 'add_listener'. If none are found it continue to remove afterwards from those added with 'once'.
codesearchnet
def project_and_occlude_texture(texture, surface, angle=DEFAULT_ANGLE): projected_surface = project_surface(surface, angle) projected_surface = _remove_hidden_parts(projected_surface) texture_y = map_texture_to_surface(texture, projected_surface) texture_x, _ = texture return texture_x, texture_y
Projects a texture onto a surface with occluded areas removed. Args: texture (texture): the texture to map to the projected surface surface (surface): the surface to project angle (float): the angle to project at, in degrees (0 = overhead, 90 = side view) Returns: layer: A layer.
juraj-google-style
def poke_native(getstate): def poke(service, objname, obj, container, visited=None, _stack=None): service.pokeNative(objname, getstate(obj), container) return poke
Serializer factory for types which state can be natively serialized. Arguments: getstate (callable): takes an object and returns the object's state to be passed to `pokeNative`. Returns: callable: serializer (`poke` routine).
codesearchnet
def event(self, name, owner=None, **kwargs): return Event(self.tcex, name, owner=owner, **kwargs)
Create the Event TI object. Args: name: **kwargs: Return:
codesearchnet
def to_gpx(self): gpx_segments = [] for segment in self.segments: gpx_points = [] for point in segment.points: time = '' if point.time: iso_time = point.time.isoformat().split('.')[0] time = ('<time>%s</time>' % iso_time) gpx_points.append((u'<trkpt lat="%f" lon="%f">%s</trkpt>' % (point.lat, point.lon, time))) points = u'\n\t\t\t'.join(gpx_points) gpx_segments.append((u'\t\t<trkseg>\n\t\t\t%s\n\t\t</trkseg>' % points)) segments = u'\t\n'.join(gpx_segments) content = [u'<?xml version="1.0" encoding="UTF-8"?>', u'<gpx xmlns:xsi="http: return u'\n'.join(content)
Converts track to a GPX format Uses GPXPY library as an intermediate format Returns: A string with the GPX/XML track
codesearchnet
def load_pickled_model(filename, dirname=None): if (dirname is None): pkg_filename = pkgutil.get_loader('dragnet').get_filename('dragnet') pkg_dirname = os.path.dirname(pkg_filename) dirname = os.path.join(pkg_dirname, 'pickled_models', model_path) filepath = os.path.join(dirname, filename) return joblib.load(filepath)
Load a pickled ``Extractor`` model from disk. Args: filename (str): Name of pickled model file under ``dirname``. dirname (str): Name of directory on disk containing the pickled model. If None, dragnet's default pickled model directory is used: /path/to/dragnet/pickled_models/[PY_VERSION]_[SKLEARN_VERSION] Returns: :class:`dragnet.extractor.Extractor`
codesearchnet
def get_percentage(a, b, i=False, r=False): if ((i is False) and (r is True)): percentage = round((100.0 * (float(a) / b)), 2) elif (((i is True) and (r is True)) or ((i is True) and (r is False))): percentage = int(round((100 * (float(a) / b)))) if (r is False): warnings.warn('If integer is set to True and Round is set to False, you will still get a rounded number if you pass floating point numbers as arguments.') else: percentage = (100.0 * (float(a) / b)) return percentage
Finds the percentage of one number over another. Args: a: The number that is a percent, int or float. b: The base number that a is a percent of, int or float. i: Optional boolean integer. True if the user wants the result returned as a whole number. Assumes False. r: Optional boolean round. True if the user wants the result rounded. Rounds to the second decimal point on floating point numbers. Assumes False. Returns: The argument a as a percentage of b. Throws a warning if integer is set to True and round is set to False.
codesearchnet
def _indicator(self, indicator_data): if isinstance(indicator_data, dict): xid = indicator_data.get('xid') else: xid = indicator_data.xid if (self.indicators.get(xid) is not None): indicator_data = self.indicators.get(xid) elif (self.indicators_shelf.get(xid) is not None): indicator_data = self.indicators_shelf.get(xid) else: self.indicators[xid] = indicator_data return indicator_data
Return previously stored indicator or new indicator. Args: indicator_data (dict|obj): An Indicator dict or instance of Indicator object. Returns: dict|obj: The new Indicator dict/object or the previously stored dict/object.
codesearchnet
def crowding_distance_sort(frontier: List[pg.DNA]) -> List[pg.DNA]: if len(frontier) <= 1: return frontier individual_num = len(frontier) objective_num = len(base.get_fitness(frontier[0])) distances = [0.0] * individual_num dist = [list(range(individual_num)) for i in range(objective_num)] for i in range(objective_num): dist[i] = sorted(dist[i], key=lambda idx: base.get_fitness(frontier[idx])[i]) max_value = base.get_fitness(frontier[dist[i][individual_num - 1]])[i] min_value = base.get_fitness(frontier[dist[i][0]])[i] for j in range(individual_num): if j == 0 or j == individual_num - 1: distances[dist[i][j]] = objective_num elif max_value > min_value: distances[dist[i][j]] += (base.get_fitness(frontier[dist[i][j + 1]])[i] - base.get_fitness(frontier[dist[i][j - 1]])[i]) / (max_value - min_value) idx_arr = list(range(individual_num)) idx_arr = sorted(idx_arr, key=lambda idx: distances[idx], reverse=True) return [frontier[idx_arr[i]] for i in range(individual_num)]
Algorithm crowding-distance-assignment implementation. Check section III B in the original paper. Args: frontier: A list of Individual that need to be sorted. Returns: sorted list of the original list.
github-repos
def set_icon_file(self, filename, rel="icon"): mimetype, encoding = mimetypes.guess_type(filename) self.add_child("favicon", '<link rel="%s" href="%s" type="%s" />'%(rel, filename, mimetype))
Allows to define an icon for the App Args: filename (str): the resource file name (ie. "/res:myicon.png") rel (str): leave it unchanged (standard "icon")
juraj-google-style
def add_cmd_handler(self, handler_obj): for field in dir(handler_obj): if field.startswith('cmd_'): cmd = field[4:] fn = getattr(handler_obj, field) if (cmd in self.cmds): print('Replacing {} with {}'.format(_handler_name(self.cmds[cmd]), _handler_name(fn)), file=sys.stderr) self.cmds[cmd] = fn
Registers a new command handler object. All methods on `handler_obj` whose name starts with "cmd_" are registered as a GTP command. For example, the method cmd_genmove will be invoked when the engine receives a genmove command. Args: handler_obj: the handler object to register.
codesearchnet
def __getitem__(self, key): if isinstance(key, str): if key not in self.columns: raise AttributeError('Key not in columns.') return [row[key] if key in row else None for row in self.rows] elif isinstance(key, (int, slice)): return self.rows[key] else: raise TypeError('Invalid argument type.')
Get a column or row from the dataset. Args: key (str or int): String referencing a column or integer referencing a row Returns: :class:`list` or :class:`dict`: List of column values or a dict representing a row
juraj-google-style
def loader(self, file_name, bad_steps=None, **kwargs): new_tests = [] if not os.path.isfile(file_name): self.logger.info("Missing file_\n %s" % file_name) return None filesize = os.path.getsize(file_name) hfilesize = humanize_bytes(filesize) txt = "Filesize: %i (%s)" % (filesize, hfilesize) self.logger.debug(txt) temp_dir = tempfile.gettempdir() temp_filename = os.path.join(temp_dir, os.path.basename(file_name)) shutil.copy2(file_name, temp_dir) self.logger.debug("tmp file: %s" % temp_filename) self.logger.debug("HERE WE LOAD THE DATA") data = DataSet() fid = FileID(file_name) test_no = 1 data.test_no = test_no data.loaded_from = file_name data.channel_index = None data.channel_number = None data.creator = None data.item_ID = None data.schedule_file_name = None data.start_datetime = None data.test_ID = None data.test_name = None data.raw_data_files.append(fid) self.logger.debug("reading raw-data") self.mpr_data = None self.mpr_log = None self.mpr_settings = None self._load_mpr_data(temp_filename, bad_steps) length_of_test = self.mpr_data.shape[0] self.logger.debug(f"length of test: {length_of_test}") self.logger.debug("renaming columns") self._rename_headers() summary_df = self._create_summary_data() if summary_df.empty: txt = "\nCould not find any summary (stats-file)!" txt += " (summary_df.empty = True)" txt += "\n -> issue make_summary(use_cellpy_stat_file=False)" warnings.warn(txt) data.dfsummary = summary_df data.dfdata = self.mpr_data data.raw_data_files_length.append(length_of_test) new_tests.append(data) self._clean_up(temp_filename) return new_tests
Loads data from biologics .mpr files. Args: file_name (str): path to .res file. bad_steps (list of tuples): (c, s) tuples of steps s (in cycle c) to skip loading. Returns: new_tests (list of data objects)
juraj-google-style
def apply_activation( books, x, activation, activation_args=(), activation_kwargs=None): if activation is None: return x if activation_kwargs is None: activation_kwargs = {} y = activation(x, *activation_args, **activation_kwargs) if activation in (tf.nn.relu, functions.leaky_relu, functions.softplus): books.add_scalar_summary( tf.reduce_mean(tf.cast(tf.less(x, 0.0), tf.float32)), '%s/zeros' % y.op.name) elif activation is tf.nn.relu6: books.add_scalar_summary( tf.reduce_mean(tf.cast(tf.less(x, 0.0), tf.float32)), '%s/zeros' % y.op.name) books.add_scalar_summary( tf.reduce_mean(tf.cast(tf.greater(x, 6.0), tf.float32)), '%s/sixes' % y.op.name) elif activation in (functions.l2_normalize, tf.nn.l2_normalize, functions.l1_normalize): books.add_scalar_summary( tf.reduce_mean(tf.sqrt(tf.reduce_sum( tf.square(x), 1))), '%s/length' % y.op.name) return y
Returns activation(x, *activation_args, **activation_kwargs). This applies the given activation and adds useful summaries specific to the activation. Args: books: The bookkeeper. x: The tensor to apply activation to. activation: An activation function. activation_args: Optional additional arguments for the activation. activation_kwargs: Optional keyword args for activation. Returns: A tensor with activation applied to x.
juraj-google-style
def get_bool(self): return self.fdp.ConsumeBool()
Consume a bool. Returns: Consumed a bool based on input bytes and constraints.
github-repos
def set_metadata(self, token, data): req = requests.post(self.meta_url("metadata/ocp/set/" + token), json=data, verify=False) if req.status_code != 200: raise RemoteDataUploadError( "Could not upload metadata: " + req.json()['message'] ) return req.json()
Insert new metadata into the OCP metadata database. Arguments: token (str): Token of the datum to set data (str): A dictionary to insert as metadata. Include `secret`. Returns: json: Info of the inserted ID (convenience) or an error message. Throws: RemoteDataUploadError: If the token is already populated, or if there is an issue with your specified `secret` key.
juraj-google-style
def get_multi_dataset(datasets, pmf=None): pmf = (tf.fill([len(datasets)], (1.0 / len(datasets))) if (pmf is None) else pmf) samplers = [d.repeat().make_one_shot_iterator().get_next for d in datasets] sample = (lambda _: categorical_case(pmf, samplers)) return tf.data.Dataset.from_tensors([]).repeat().map(sample)
Returns a Dataset that samples records from one or more Datasets. Args: datasets: A list of one or more Dataset objects to sample from. pmf: A tensor of shape [len(datasets)], the probabilities to sample each dataset with. This tensor is often constructed with the global_step. If this is None, we sample from the datasets uniformly at random. Returns: A Dataset object containing records from multiple datasets. Note that because this dataset iterates through other datasets it is stateful, thus you will need to call make_initializable_iterator instead of make_one_shot_iterator.
codesearchnet
def GetAutomountMap(self, since=None, location=None): if location is None: self.log.error('A location is required to retrieve an automount map!') raise error.EmptyMap autofs_filter = '(objectclass=automount)' return AutomountUpdateGetter(self.conf).GetUpdates(source=self, search_base=location, search_filter=autofs_filter, search_scope='one', since=since)
Return an automount map from this source. Note that autmount maps are stored in multiple locations, thus we expect a caller to provide a location. We also follow the automount spec and set our search scope to be 'one'. Args: since: Get data only changed since this timestamp (inclusive) or None for all data. location: Currently a string containing our search base, later we may support hostname and additional parameters. Returns: instance of AutomountMap
github-repos
def uniform_distribution(number_of_nodes): number_of_states = 2 ** number_of_nodes return (np.ones(number_of_states) / number_of_states).reshape([2] * number_of_nodes)
Return the uniform distribution for a set of binary nodes, indexed by state (so there is one dimension per node, the size of which is the number of possible states for that node). Args: nodes (np.ndarray): A set of indices of binary nodes. Returns: np.ndarray: The uniform distribution over the set of nodes.
juraj-google-style
def _ensure_list(tensor_or_list): if isinstance(tensor_or_list, (list, tuple)): return (list(tensor_or_list), True) return ([tensor_or_list], False)
Converts the input arg to a list if it is not a list already. Args: tensor_or_list: A `Tensor` or a Python list of `Tensor`s. The argument to convert to a list of `Tensor`s. Returns: A tuple of two elements. The first is a Python list of `Tensor`s containing the original arguments. The second is a boolean indicating whether the original argument was a list or tuple already.
codesearchnet
def temp_dir(folder=None, delete=True): tempdir = get_temp_dir() if folder: tempdir = join(tempdir, folder) if not exists(tempdir): makedirs(tempdir) try: yield tempdir finally: if delete: rmtree(tempdir)
Get a temporary directory optionally with folder appended (and created if it doesn't exist) Args: folder (Optional[str]): Folder to create in temporary folder. Defaults to None. delete (bool): Whether to delete folder on exiting with statement Returns: str: A temporary directory
juraj-google-style
def _apply_shadow_vars(avg_grads): ps_var_grads = [] for grad, var in avg_grads: assert var.name.startswith('tower'), var.name my_name = '/'.join(var.name.split('/')[1:]) my_name = get_op_tensor_name(my_name)[0] new_v = tf.get_variable(my_name, dtype=var.dtype.base_dtype, initializer=var.initial_value, trainable=True) ps_var_grads.append((grad, new_v)) return ps_var_grads
Create shadow variables on PS, and replace variables in avg_grads by these shadow variables. Args: avg_grads: list of (grad, var) tuples
juraj-google-style
def from_xmrs(cls, xmrs, **kwargs): x = cls() x.__dict__.update(xmrs.__dict__) return x
Facilitate conversion among subclasses. Args: xmrs (:class:`Xmrs`): instance to convert from; possibly an instance of a subclass, such as :class:`Mrs` or :class:`Dmrs` **kwargs: additional keyword arguments that may be used by a subclass's redefinition of :meth:`from_xmrs`.
juraj-google-style
def add_item(name, command, system_wide=False): desktop_env = system.get_name() if os.path.isfile(command): command_is_file = True if (not (desktop_env == 'windows')): sp.Popen([('chmod +x %s' % command)], shell=True) if (desktop_env == 'windows'): import winreg if system_wide: startup_dir = os.path.join(winreg.ExpandEnvironmentStrings('%PROGRAMDATA%'), 'Microsoft\\Windows\\Start Menu\\Programs\\Startup') else: startup_dir = os.path.join(get_config_dir()[0], 'Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup') if (not command_is_file): with open(os.path.join(startup_dir, (name + '.bat')), 'w') as f: f.write(command) else: shutil.copy(command, startup_dir) elif (desktop_env == 'mac'): sp.Popen((['launchctl submit -l %s -- %s'] % (name, command)), shell=True) elif (desktop_env == 'unknown'): if system_wide: login_file = '/etc/profile' else: login_file = os.path.expanduser('~/.profile') with open(login_file, 'a') as f: f.write(command) else: try: desktop_file_name = (name + '.desktop') startup_file = os.path.join(get_config_dir('autostart', system_wide=system_wide)[0], desktop_file_name) desktop_str = desktopfile.construct(name=name, exec_=command, additional_opts={'X-GNOME-Autostart-enabled': 'true'}) with open(startup_file, 'w') as f: f.write(desktop_str) except: pass
Adds a program to startup. Adds a program to user startup. Args: name (str) : The name of the startup entry. command (str) : The command to run. system_wide (bool): Add to system-wide startup. Note: ``system_wide`` requires superuser/admin privileges.
codesearchnet
def reduce(self, initial_state, reduce_func, name=None): with ops.name_scope('initial_state'): initial_state = structure.normalize_element(initial_state) state_structure = structure.type_spec_from_value(initial_state) need_to_rerun = True while need_to_rerun: wrapped_func = structured_function.StructuredFunctionWrapper(reduce_func, 'reduce()', input_structure=(state_structure, self.element_spec), add_to_graph=False) output_classes = wrapped_func.output_classes state_classes = nest.map_structure(lambda component_spec: component_spec._to_legacy_output_classes(), state_structure) for new_state_class, state_class in zip(nest.flatten(output_classes), nest.flatten(state_classes)): if not issubclass(new_state_class, state_class): raise TypeError(f'The element classes for the new state must match the initial state. Expected {state_classes} but got {wrapped_func.output_classes}.') output_types = wrapped_func.output_types state_types = nest.map_structure(lambda component_spec: component_spec._to_legacy_output_types(), state_structure) for new_state_type, state_type in zip(nest.flatten(output_types), nest.flatten(state_types)): if new_state_type != state_type: raise TypeError(f'The element types for the new state must match the initial state. Expected {state_types} but got {wrapped_func.output_types}.') output_shapes = wrapped_func.output_shapes state_shapes = nest.map_structure(lambda component_spec: component_spec._to_legacy_output_shapes(), state_structure) flat_state_shapes = nest.flatten(state_shapes) flat_new_state_shapes = nest.flatten(output_shapes) weakened_state_shapes = [original.most_specific_compatible_shape(new) for original, new in zip(flat_state_shapes, flat_new_state_shapes)] need_to_rerun = False for original_shape, weakened_shape in zip(flat_state_shapes, weakened_state_shapes): if original_shape.ndims is not None and (weakened_shape.ndims is None or original_shape.as_list() != weakened_shape.as_list()): need_to_rerun = True break if need_to_rerun: state_structure = structure.convert_legacy_structure(state_types, nest.pack_sequence_as(state_shapes, weakened_state_shapes), state_classes) reduce_func = wrapped_func.function reduce_func.add_to_graph(ops.get_default_graph()) dataset = self._apply_debug_options() metadata = dataset_metadata_pb2.Metadata() if name: metadata.name = _validate_and_encode(name) return structure.from_compatible_tensor_list(state_structure, gen_dataset_ops.reduce_dataset(dataset._variant_tensor, structure.to_tensor_list(state_structure, initial_state), reduce_func.captured_inputs, f=reduce_func, output_shapes=structure.get_flat_tensor_shapes(state_structure), output_types=structure.get_flat_tensor_types(state_structure), metadata=metadata.SerializeToString()))
Reduces the input dataset to a single element. The transformation calls `reduce_func` successively on every element of the input dataset until the dataset is exhausted, aggregating information in its internal state. The `initial_state` argument is used for the initial state and the final state is returned as the result. >>> tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, _: x + ... 1).numpy().item() 5 >>> tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, y: x + ... y).numpy().item() 10 Args: initial_state: An element representing the initial state of the transformation. reduce_func: A function that maps `(old_state, input_element)` to `new_state`. It must take two arguments and return a new element The structure of `new_state` must match the structure of `initial_state`. name: (Optional.) A name for the tf.data operation. Returns: A dataset element corresponding to the final state of the transformation.
github-repos
def update_query_params(uri, params): parts = urllib.parse.urlparse(uri) query_params = parse_unique_urlencoded(parts.query) query_params.update(params) new_query = urllib.parse.urlencode(query_params) new_parts = parts._replace(query=new_query) return urllib.parse.urlunparse(new_parts)
Updates a URI with new query parameters. If a given key from ``params`` is repeated in the ``uri``, then the URI will be considered invalid and an error will occur. If the URI is valid, then each value from ``params`` will replace the corresponding value in the query parameters (if it exists). Args: uri: string, A valid URI, with potential existing query parameters. params: dict, A dictionary of query parameters. Returns: The same URI but with the new query parameters added.
codesearchnet
def return_dict(self): output_dict = {} output_dict['general'] = self._iterate_through_class(self.general.__dict__) output_dict['figure'] = self._iterate_through_class(self.figure.__dict__) if (self.total_plots > 1): trans_dict = {str(i): self._iterate_through_class(axis.__dict__) for (i, axis) in enumerate(self.ax)} output_dict['plot_info'] = trans_dict else: output_dict['plot_info'] = {'0': self._iterate_through_class(self.ax.__dict__)} if self.print_input: print(output_dict) return output_dict
Output dictionary for ``make_plot.py`` input. Iterates through the entire MainContainer class turning its contents into dictionary form. This dictionary becomes the input for ``make_plot.py``. If `print_input` attribute is True, the entire dictionary will be printed prior to returning the dicitonary. Returns: - **output_dict** (*dict*): Dicitonary for input into ``make_plot.py``.
codesearchnet
def GetKeyByScriptHash(self, script_hash): contract = self.GetContract(script_hash) if contract: return self.GetKey(contract.PublicKeyHash) return None
Get the KeyPair belonging to the script hash. Args: script_hash (UInt160): a bytearray (len 20) representing the public key. Returns: KeyPair: If successful, the KeyPair belonging to the public key hash, otherwise None
juraj-google-style
def get_user_roles(self, user): return self.service.get_user_roles( user, self.url_prefix, self.auth, self.session, self.session_send_opts)
Get roles associated with the given user. Args: user (string): User name. Returns: (list): List of roles that user has. Raises: requests.HTTPError on failure.
juraj-google-style
def report(self, name, **kwargs): group_obj = Report(name, **kwargs) return self._group(group_obj)
Add Report data to Batch object. Args: name (str): The name for this Group. file_name (str): The name for the attached file for this Group. date_added (str, kwargs): The date timestamp the Indicator was created. file_content (str;method, kwargs): The file contents or callback method to retrieve file content. publish_date (str, kwargs): The publish datetime expression for this Group. xid (str, kwargs): The external id for this Group. Returns: obj: An instance of Report.
codesearchnet
def dbmin20years(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `dbmin20years`'.format(value)) self._dbmin20years = value
Corresponds to IDD Field `dbmin20years` 20-year return period values for minimum extreme dry-bulb temperature Args: value (float): value for IDD Field `dbmin20years` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def __call__(self, request: beam.Row, *args, **kwargs): embedded_query = request['text'] query = {'size': self.size, 'query': {'knn': {self.vector_field: {'vector': embedded_query, 'k': self.k}}}} results = self.client.search(body=query, index=self.index_name) logger.info('Enrichment_results', results) return (beam.Row(text=embedded_query), beam.Row(docs=results))
Reads a row from the opensearch Vector DB and returns a `Tuple` of request and response. Args: request: the input `beam.Row` to enrich.
github-repos
def _build_key_wrapping_specification(self, value): if (value is None): return None if (not isinstance(value, dict)): raise TypeError('Key wrapping specification must be a dictionary.') encryption_key_info = self._build_encryption_key_information(value.get('encryption_key_information')) mac_signature_key_info = self._build_mac_signature_key_information(value.get('mac_signature_key_information')) key_wrapping_specification = cobjects.KeyWrappingSpecification(wrapping_method=value.get('wrapping_method'), encryption_key_information=encryption_key_info, mac_signature_key_information=mac_signature_key_info, attribute_names=value.get('attribute_names'), encoding_option=value.get('encoding_option')) return key_wrapping_specification
Build a KeyWrappingSpecification struct from a dictionary. Args: value (dict): A dictionary containing the key/value pairs for a KeyWrappingSpecification struct. Returns: KeyWrappingSpecification: a KeyWrappingSpecification struct Raises: TypeError: if the input argument is invalid
codesearchnet
def memory_write8(self, addr, data, zone=None): return self.memory_write(addr, data, zone, 8)
Writes bytes to memory of a target system. Args: self (JLink): the ``JLink`` instance addr (int): start address to write to data (list): list of bytes to write zone (str): optional memory zone to access Returns: Number of bytes written to target. Raises: JLinkException: on memory access error.
codesearchnet
def _transform_col(self, x, i): return x.fillna(NAN_INT).map(self.label_encoders[i]).fillna(0)
Encode one categorical column into labels. Args: x (pandas.Series): a categorical column to encode i (int): column index Returns: x (pandas.Series): a column with labels.
juraj-google-style
def remat(f): def wrapped(*args, **kwargs): return torch.utils.checkpoint.checkpoint(f, *args, use_reentrant=False) return wrapped
Implementation of rematerialization. Args: f: The function or operation to rematerialize. Returns: A function wrapping f that defines a custom gradient, which recomputes f on the backwards pass of a gradient call.
github-repos
def creationlog(base, package, stackdepth=_def_stackdepth): @staticmethod def wrapnew(cls, *argl, **argd): global _atdepth_new, _cstack_new, streamlining origstream = None if not (decorating or streamlining): entry, _atdepth_new = _pre_create(cls, _atdepth_new, stackdepth, *argl, **argd) _cstack_new.append(cls) fqdn = cls.__fqdn__ if fqdn in _streamlines and _streamlines[fqdn]: msg.std("Streamlining {}.".format(fqdn), 2) origstream = streamlining streamlining = True try: if six.PY2: result = base.__old__(cls, *argl, **argd) else: if base.__old__ is object.__new__: result = base.__old__(cls) else: result = base.__old__(cls, *argl, **argd) except TypeError: import sys xcls, xerr = sys.exc_info()[0:2] referral = xerr.args[0].split()[-1] if ".__new__()" in referral: t = eval(referral.split('.')[0]) result = t.__new__(cls, *argl, **argd) else: raise result = None if result is not None and hasattr(cls, "__init__"): try: cls.__init__(result, *argl, **argd) except: print(cls, argl, argd) raise else: msg.err("Object initialize failed for {}.".format(base.__name__)) if origstream is not None: streamlining = origstream if not (decorating or streamlining): _cstack_new.pop() if len(_cstack_new) == 0: _atdepth_new = False _post_create(_atdepth_new, entry, result) return result return wrapnew
Decorator for wrapping the creation of class instances that are being logged by acorn. Args: base: base class used to call __new__ for the construction. package (str): name of (global) package the class belongs to. stackdepth (int): if the calling stack is less than this depth, than include the entry in the log; otherwise ignore it.
juraj-google-style
def input_shape(self): return nest.map_structure(backend.int_shape, self.input)
Retrieves the input shape(s) of a layer. Only applicable if the layer has exactly one input, i.e. if it is connected to one incoming layer, or if all inputs have the same shape. Returns: Input shape, as an integer shape tuple (or list of shape tuples, one tuple per input tensor). Raises: AttributeError: if the layer has no defined input_shape. RuntimeError: if called in Eager mode.
github-repos
def ExpandWindowsPath(cls, path, environment_variables): if environment_variables is None: environment_variables = [] lookup_table = {} if environment_variables: for environment_variable in environment_variables: attribute_name = environment_variable.name.upper() attribute_value = environment_variable.value if not isinstance(attribute_value, py2to3.STRING_TYPES): continue lookup_table[attribute_name] = attribute_value path_segments = path.split('\\') for index, path_segment in enumerate(list(path_segments)): if (len(path_segment) <= 2 or not path_segment.startswith('%') or not path_segment.endswith('%')): continue path_segment_upper_case = path_segment.upper() if path_segment_upper_case.startswith('%%ENVIRON_'): lookup_key = path_segment_upper_case[10:-2] else: lookup_key = path_segment_upper_case[1:-1] path_segment = lookup_table.get(lookup_key, path_segment) path_segment = path_segment.split('\\') expanded_path_segments = list(path_segments[:index]) expanded_path_segments.extend(path_segment) expanded_path_segments.extend(path_segments[index + 1:]) path_segments = expanded_path_segments if cls._IsWindowsDrivePathSegment(path_segments[0]): path_segments[0] = '' return '\\'.join(path_segments)
Expands a Windows path containing environment variables. Args: path (str): Windows path with environment variables. environment_variables (list[EnvironmentVariableArtifact]): environment variables. Returns: str: expanded Windows path.
juraj-google-style
def formula_double_format(afloat, ignore_ones=True, tol=1e-08): if (ignore_ones and (afloat == 1)): return '' elif (abs((afloat - int(afloat))) < tol): return str(int(afloat)) else: return str(round(afloat, 8))
This function is used to make pretty formulas by formatting the amounts. Instead of Li1.0 Fe1.0 P1.0 O4.0, you get LiFePO4. Args: afloat (float): a float ignore_ones (bool): if true, floats of 1 are ignored. tol (float): Tolerance to round to nearest int. i.e. 2.0000000001 -> 2 Returns: A string representation of the float for formulas.
codesearchnet
def singleprint_from_fingerprint_proto(export_dir: str) -> str: try: return fingerprinting_pywrap.SingleprintFromFP(export_dir) except FingerprintException as e: raise ValueError(e) from None
Returns the singleprint of `fingerprint.pb` in `export_dir`. Args: export_dir: The directory that contains `fingerprint.pb`. Returns: A string containing the singleprint of `fingerprint.pb` in `export_dir`. Raises: ValueError: If a valid singleprint cannot be constructed from `fingerprint.pb`.
github-repos
def get_template_files(self, template_id, filename): url = (self.TEMPLATE_GET_FILES_URL + template_id) request = self._get_request() return request.get_file(url, filename)
Download a PDF copy of a template's original files Args: template_id (str): The id of the template to retrieve. filename (str): Filename to save the PDF file to. This should be a full path. Returns: Returns a PDF file
codesearchnet
def ref_for_message_type(self, message_type): name = self.__normalized_name(message_type) if name not in self.__schemas: raise KeyError('Message has not been parsed: %s', name) return name
Returns the JSON Schema id for the given message. Args: message_type: protorpc.message.Message class to be parsed. Returns: string, The JSON Schema id. Raises: KeyError: if the message hasn't been parsed via add_message().
juraj-google-style
def VerifyRow(self, parser_mediator, row): if row['md5'] != '0' and not self._MD5_RE.match(row['md5']): return False for column_name in ( 'uid', 'gid', 'size', 'atime', 'mtime', 'ctime', 'crtime'): column_value = row.get(column_name, None) if not column_value: continue try: int(column_value, 10) except (TypeError, ValueError): return False return True
Verifies if a line of the file is in the expected format. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. row (dict[str, str]): fields of a single row, as specified in COLUMNS. Returns: bool: True if this is the correct parser, False otherwise.
juraj-google-style
def run(self, command, block=True, cwd=None, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE): if cwd is None: cwd = self.cwd return ShellCommand(command=command, logger=self.logger, block=block, cwd=cwd, stdin=stdin, stdout=stdout, stderr=stderr).run()
Create an instance of :class:`~ShellCommand` and run it Args: command (str): :class:`~ShellCommand` block (bool): See :class:`~ShellCommand` cwd (str): Override the runner cwd. Useb by the :class:`~ShellCommand` instance
juraj-google-style
def lt(self, other, axis="columns", level=None): return self._binary_op("lt", other, axis=axis, level=level)
Checks element-wise that this is less than other. Args: other: A DataFrame or Series or scalar to compare to. axis: The axis to perform the lt over. level: The Multilevel index level to apply lt over. Returns: A new DataFrame filled with Booleans.
juraj-google-style
def cysparse_real_type_from_real_cysparse_complex_type(cysparse_type): r_type = None if cysparse_type in ['COMPLEX64_t']: r_type = 'FLOAT32_t' elif cysparse_type in ['COMPLEX128_t']: r_type = 'FLOAT64_t' elif cysparse_type in ['COMPLEX256_t']: r_type = 'FLOAT128_t' else: raise TypeError("Not a recognized complex type") return r_type
Returns the **real** type for the real or imaginary part of a **real** complex type. For instance: COMPLEX128_t -> FLOAT64_t Args: cysparse:
juraj-google-style
def barycentric_coords(coords, simplex): coords = np.atleast_2d(coords) t = np.transpose(simplex[:-1, :]) - np.transpose(simplex[-1, :])[:, None] all_but_one = np.transpose( np.linalg.solve(t, np.transpose(coords - simplex[-1]))) last_coord = 1 - np.sum(all_but_one, axis=-1)[:, None] return np.append(all_but_one, last_coord, axis=-1)
Converts a list of coordinates to barycentric coordinates, given a simplex with d+1 points. Only works for d >= 2. Args: coords: list of n coords to transform, shape should be (n,d) simplex: list of coordinates that form the simplex, shape should be (d+1, d) Returns: a LIST of barycentric coordinates (even if the original input was 1d)
juraj-google-style
def last_modified(self) -> str: if ('last_modified' in self.attrs): return self.attrs['last_modified'] elif (self.mode == 'r+'): self.attrs['last_modified'] = timestamp() return self.attrs['last_modified'] return timestamp()
Return an ISO8601 timestamp indicating when the file was last modified Returns: An ISO8601 timestamp indicating when the file was last modified Remarks: If the file has no timestamp, and mode is 'r+', a new timestamp is created and returned. Otherwise, the current time in UTC is returned
codesearchnet
def with_resource_hints(self, **kwargs): self.get_resource_hints().update(resources.parse_resource_hints(kwargs)) return self
Adds resource hints to the :class:`PTransform`. Resource hints allow users to express constraints on the environment where the transform should be executed. Interpretation of the resource hints is defined by Beam Runners. Runners may ignore the unsupported hints. Args: **kwargs: key-value pairs describing hints and their values. Raises: ValueError: if provided hints are unknown to the SDK. See :mod:`apache_beam.transforms.resources` for a list of known hints. Returns: PTransform: A reference to the instance of this particular :class:`PTransform` object.
github-repos
def _worker(self, constructor, conn): try: env = constructor() while True: try: if not conn.poll(0.1): continue message, payload = conn.recv() except (EOFError, KeyboardInterrupt): break if message == self._ACCESS: name = payload result = getattr(env, name) conn.send((self._RESULT, result)) continue if message == self._CALL: name, args, kwargs = payload result = getattr(env, name)(*args, **kwargs) conn.send((self._RESULT, result)) continue if message == self._CLOSE: assert payload is None break raise KeyError('Received message of unknown type {}'.format(message)) except Exception: stacktrace = ''.join(traceback.format_exception(*sys.exc_info())) tf.logging.error('Error in environment process: {}'.format(stacktrace)) conn.send((self._EXCEPTION, stacktrace)) conn.close()
The process waits for actions and sends back environment results. Args: constructor: Constructor for the OpenAI Gym environment. conn: Connection for communication to the main process. Raises: KeyError: When receiving a message of unknown type.
juraj-google-style
def is_all_initialized(self): return (frozenset(self._class_map.keys()) == frozenset(self._instance_map.keys()))
Return whether all the instances have been initialized. Returns: bool
codesearchnet
def verbose_ping(dest_addr: str, count: int=4, *args, **kwargs): timeout = kwargs.get('timeout') src = kwargs.get('src') unit = kwargs.setdefault('unit', 'ms') for i in range(count): output_text = "ping '{}'".format(dest_addr) output_text += (" from '{}'".format(src) if src else '') output_text += ' ... ' print(output_text, end='') delay = ping(dest_addr, *args, seq=i, **kwargs) if (delay is None): print(('Timeout > {}s'.format(timeout) if timeout else 'Timeout')) else: print('{value}{unit}'.format(value=int(delay), unit=unit))
Send pings to destination address with the given timeout and display the result. Args: dest_addr: The destination address. Ex. "192.168.1.1"/"example.com" count: How many pings should be sent. Default is 4, same as Windows CMD. (default 4) *args and **kwargs: And all the other arguments available in ping() except `seq`. Returns: Formatted ping results printed.
codesearchnet
def set_scope(self, include=None, exclude=None): if include: self.scope = u'document.querySelector("{}")'.format(u', '.join(include)) else: self.scope = 'null' if (exclude is not None): raise NotImplementedError('The argument `exclude` has not been implemented in AxsAuditConfig.set_scope method.')
Sets `scope`, the "start point" for the audit. Args: include: A list of css selectors specifying the elements that contain the portion of the page that should be audited. Defaults to auditing the entire document. exclude: This arg is not implemented in this ruleset. Examples: To check only the `div` with id `foo`:: page.a11y_audit.config.set_scope(["div#foo"]) To reset the scope to check the whole document:: page.a11y_audit.config.set_scope()
codesearchnet
def update_resharding_callback(self, callback: checkpoint_adapter.ReshardCallback): if not issubclass(checkpoint_adapter.ReshardCallback, type(self.callback)): raise TypeError('Cannot override resharding callback, already set to non trivial.') self.callback = callback
Add a resharding callback to the checkpoint. This will be applied to the checkpoint value before being supplied to the restore ops. Args: callback: Reshard callback for resharding this checkpoint position. Maybe None.
github-repos
def ReadFromFile(self, path): self._definitions = {} with open(path, 'r') as file_object: for preset_definition in self._ReadPresetsFromFileObject(file_object): self._definitions[preset_definition.name] = preset_definition
Reads parser and parser plugin presets from a file. Args: path (str): path of file that contains the the parser and parser plugin presets configuration. Raises: MalformedPresetError: if one or more plugin preset definitions are malformed.
juraj-google-style
def PushTask(self, task): storage_file_size = getattr(task, 'storage_file_size', None) if not storage_file_size: raise ValueError('Task storage file size not set.') if task.file_entry_type == dfvfs_definitions.FILE_ENTRY_TYPE_DIRECTORY: weight = 1 else: weight = storage_file_size task.merge_priority = weight heap_values = (weight, task) heapq.heappush(self._heap, heap_values) self._task_identifiers.add(task.identifier)
Pushes a task onto the heap. Args: task (Task): task. Raises: ValueError: if the size of the storage file is not set in the task.
juraj-google-style
def ParseMessage(self, parser_mediator, key, date_time, tokens): if (key not in ('failed_connection', 'login', 'opened_connection')): raise ValueError('Unknown grammar key: {0:s}'.format(key)) if (key == 'login'): event_data = SSHLoginEventData() elif (key == 'failed_connection'): event_data = SSHFailedConnectionEventData() elif (key == 'opened_connection'): event_data = SSHOpenedConnectionEventData() event_data.address = tokens.get('address', None) event_data.authentication_method = tokens.get('authentication_method', None) event_data.body = tokens.get('body', None) event_data.fingerprint = tokens.get('fingerprint', None) event_data.hostname = tokens.get('hostname', None) event_data.offset = 0 event_data.pid = tokens.get('pid', None) event_data.protocol = tokens.get('protocol', None) event_data.port = tokens.get('port', None) event_data.reporter = tokens.get('reporter', None) event_data.severity = tokens.get('severity', None) event_data.username = tokens.get('username', None) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
Produces an event from a syslog body that matched one of the grammars. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. key (str): name of the matching grammar. date_time (dfdatetime.DateTimeValues): date and time values. tokens (dict[str, str]): tokens derived from a syslog message based on the defined grammar. Raises: ValueError: If an unknown key is provided.
codesearchnet
def make_method(ctx, node, name, params=None, posonly_count=0, kwonly_params=None, return_type=None, self_param=None, varargs=None, kwargs=None, kind=pytd.MethodKind.METHOD): def _process_annotation(param): param_type = param.typ if not param_type: return elif isinstance(param_type, cfg.Variable): types = param_type.data if len(types) == 1: annotations[param.name] = types[0].cls else: t = abstract.Union([t.cls for t in types], ctx) annotations[param.name] = t else: annotations[param.name] = param_type params = params or [] kwonly_params = kwonly_params or [] if kind in (pytd.MethodKind.METHOD, pytd.MethodKind.PROPERTY): self_param = [self_param or Param('self', None, None)] elif kind == pytd.MethodKind.CLASSMETHOD: self_param = [Param('cls', None, None)] else: assert kind == pytd.MethodKind.STATICMETHOD self_param = [] annotations = {} params = self_param + params return_param = Param('return', return_type, None) if return_type else None special_params = [x for x in (return_param, varargs, kwargs) if x] for param in special_params + params + kwonly_params: _process_annotation(param) names = lambda xs: tuple((x.name for x in xs)) param_names = names(params) kwonly_names = names(kwonly_params) defaults = {x.name: x.default for x in params + kwonly_params if x.default} varargs_name = varargs.name if varargs else None kwargs_name = kwargs.name if kwargs else None ret = abstract.SimpleFunction.build(name=name, param_names=param_names, posonly_count=posonly_count, varargs_name=varargs_name, kwonly_params=kwonly_names, kwargs_name=kwargs_name, defaults=defaults, annotations=annotations, ctx=ctx) ret.signature.check_defaults(ctx) retvar = ret.to_variable(node) if kind in (pytd.MethodKind.METHOD, pytd.MethodKind.PROPERTY): return retvar if kind == pytd.MethodKind.CLASSMETHOD: decorator = ctx.vm.load_special_builtin('classmethod') else: assert kind == pytd.MethodKind.STATICMETHOD decorator = ctx.vm.load_special_builtin('staticmethod') args = function.Args(posargs=(retvar,)) return decorator.call(node, func=None, args=args)[1]
Make a method from params. Args: ctx: The context node: Node to create the method variable at name: The method name params: Positional params [type: [Param]] posonly_count: Number of positional-only parameters kwonly_params: Keyword only params [type: [Param]] return_type: Return type [type: PARAM_TYPES] self_param: Self param [type: Param, defaults to self: Any] varargs: Varargs param [type: Param, allows *args to be named and typed] kwargs: Kwargs param [type: Param, allows **kwargs to be named and typed] kind: The method kind Returns: A new method wrapped in a variable.
github-repos
def on_snapshot(self, callback): return Watch.for_query(self, callback, document.DocumentSnapshot, document.DocumentReference)
Monitor the documents in this collection that match this query. This starts a watch on this query using a background thread. The provided callback is run on the snapshot of the documents. Args: callback(~.firestore.query.QuerySnapshot): a callback to run when a change occurs. Example: from google.cloud import firestore_v1beta1 db = firestore_v1beta1.Client() query_ref = db.collection(u'users').where("user", "==", u'Ada') def on_snapshot(docs, changes, read_time): for doc in docs: print(u'{} => {}'.format(doc.id, doc.to_dict())) # Watch this query query_watch = query_ref.on_snapshot(on_snapshot) # Terminate this watch query_watch.unsubscribe()
codesearchnet
def _parse_ISBN_EAN(details): isbn_ean = _get_td_or_none(details, 'ctl00_ContentPlaceHolder1_tblRowIsbnEan') if (not isbn_ean): return (None, None) ean = None isbn = None if ('/' in isbn_ean): (isbn, ean) = isbn_ean.split('/') isbn = isbn.strip() ean = ean.strip() else: isbn = isbn_ean.strip() if (not isbn): isbn = None return (isbn, ean)
Parse ISBN and EAN. Args: details (obj): HTMLElement containing slice of the page with details. Returns: (ISBN, EAN): Tuple with two string or two None.
codesearchnet