code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def __init__(self, *futures):
for f in futures:
if not isinstance(f, PipelineFuture):
raise TypeError('May only pass PipelineFuture instances to After(). %r',
type(f))
self._futures = set(futures)
|
Initializer.
Args:
*futures: PipelineFutures that all subsequent pipelines should follow.
May be empty, in which case this statement does nothing.
|
juraj-google-style
|
def _wait_on_metadata(self, topic, max_wait):
self._sender.add_topic(topic)
begin = time.time()
elapsed = 0.0
metadata_event = None
while True:
partitions = self._metadata.partitions_for_topic(topic)
if (partitions is not None):
return partitions
if (not metadata_event):
metadata_event = threading.Event()
log.debug('Requesting metadata update for topic %s', topic)
metadata_event.clear()
future = self._metadata.request_update()
future.add_both((lambda e, *args: e.set()), metadata_event)
self._sender.wakeup()
metadata_event.wait((max_wait - elapsed))
elapsed = (time.time() - begin)
if (not metadata_event.is_set()):
raise Errors.KafkaTimeoutError(('Failed to update metadata after %.1f secs.' % (max_wait,)))
elif (topic in self._metadata.unauthorized_topics):
raise Errors.TopicAuthorizationFailedError(topic)
else:
log.debug('_wait_on_metadata woke after %s secs.', elapsed)
|
Wait for cluster metadata including partitions for the given topic to
be available.
Arguments:
topic (str): topic we want metadata for
max_wait (float): maximum time in secs for waiting on the metadata
Returns:
set: partition ids for the topic
Raises:
KafkaTimeoutError: if partitions for topic were not obtained before
specified max_wait timeout
|
codesearchnet
|
def set_density_matrix(self, density_matrix_repr: Union[(int, np.ndarray)]):
density_matrix = density_matrix_utils.to_valid_density_matrix(density_matrix_repr, len(self._qubit_map), self._dtype)
density_matrix = np.reshape(density_matrix, self.simulator_state().density_matrix.shape)
np.copyto(dst=self.simulator_state().density_matrix, src=density_matrix)
|
Set the density matrix to a new density matrix.
Args:
density_matrix_repr: If this is an int, the density matrix is set to
the computational basis state corresponding to this state. Otherwise
if this is a np.ndarray it is the full state, either a pure state
or the full density matrix. If it is the pure state it must be the
correct size, be normalized (an L2 norm of 1), and be safely
castable to an appropriate dtype for the simulator. If it is a
mixed state it must be correctly sized and positive semidefinite
with trace one.
|
codesearchnet
|
def bridge_delete(br, if_exists=True):
param_if_exists = _param_if_exists(if_exists)
cmd = 'ovs-vsctl {1}del-br {0}'.format(br, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
|
Deletes bridge and all of its ports.
Args:
br: A string - bridge name
if_exists: Bool, if False - attempting to delete a bridge that does not exist returns False.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_delete br0
|
codesearchnet
|
def _expand_url(short_link, subreddit=None):
message_scheme = 'https:
comment_scheme = 'https:
post_scheme = 'https:
if short_link == '':
return None
else:
parts = short_link.split(',')
if parts[0] == 'm':
return message_scheme.format(parts[1])
if parts[0] == 'l' and subreddit:
if len(parts) > 2:
return comment_scheme.format(subreddit, parts[1], parts[2])
else:
return post_scheme.format(subreddit, parts[1])
elif not subreddit:
raise ValueError('Subreddit name must be provided')
else:
return None
|
Convert a usernote's URL short-hand into a full reddit URL.
Arguments:
subreddit: the subreddit the URL is for (PRAW Subreddit object or str)
short_link: the compressed link from a usernote (str)
Returns a String of the full URL.
|
juraj-google-style
|
def invert_attention_mask(encoder_attention_mask: tf.Tensor) -> tf.Tensor:
if not isinstance(encoder_attention_mask, tf.Tensor):
encoder_attention_mask = tf.convert_to_tensor(encoder_attention_mask)
if encoder_attention_mask.shape.rank == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
encoder_extended_attention_mask = (tf.cast(1, encoder_attention_mask.dtype) - encoder_extended_attention_mask) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
|
Invert an attention mask (e.g., switches 0. and 1.).
Args:
encoder_attention_mask (`torch.Tensor`): An attention mask.
Returns:
`tf.Tensor`: The inverted attention mask.
|
github-repos
|
def TryConsume(self, token):
if self.token == token:
self.NextToken()
return True
return False
|
Tries to consume a given piece of text.
Args:
token: Text to consume.
Returns:
True iff the text was consumed.
|
juraj-google-style
|
def search(cls, five9, filters):
return cls._name_search(five9.configuration.getDispositions, filters)
|
Search for a record on the remote and return the results.
Args:
five9 (five9.Five9): The authenticated Five9 remote.
filters (dict): A dictionary of search parameters, keyed by the
name of the field to search. This should conform to the
schema defined in :func:`five9.Five9.create_criteria`.
Returns:
list[BaseModel]: A list of records representing the result.
|
juraj-google-style
|
def clinvar_submissions(self, user_id, institute_id):
LOG.info("Retrieving all clinvar submissions for user '%s', institute '%s'", user_id, institute_id)
query = dict(user_id=user_id, institute_id=institute_id)
results = list(self.clinvar_submission_collection.find(query))
submissions = []
for result in results:
submission = {}
submission['_id'] = result.get('_id')
submission['status'] = result.get('status')
submission['user_id'] = result.get('user_id')
submission['institute_id'] = result.get('institute_id')
submission['created_at'] = result.get('created_at')
submission['updated_at'] = result.get('updated_at')
if ('clinvar_subm_id' in result):
submission['clinvar_subm_id'] = result['clinvar_subm_id']
if result.get('variant_data'):
submission['variant_data'] = self.clinvar_collection.find({'_id': {'$in': result['variant_data']}})
if result.get('case_data'):
submission['case_data'] = self.clinvar_collection.find({'_id': {'$in': result['case_data']}})
submissions.append(submission)
return submissions
|
Collect all open and closed clinvar submission created by a user for an institute
Args:
user_id(str): a user ID
institute_id(str): an institute ID
Returns:
submissions(list): a list of clinvar submission objects
|
codesearchnet
|
def _deserialize(self, entity, p, unused_depth=1):
if (p.meaning() == entity_pb.Property.EMPTY_LIST):
self._store_value(entity, [])
return
val = self._db_get_value(p.value(), p)
if (val is not None):
val = _BaseValue(val)
if self._repeated:
if self._has_value(entity):
value = self._retrieve_value(entity)
assert isinstance(value, list), repr(value)
value.append(val)
else:
value = [val]
else:
value = val
self._store_value(entity, value)
|
Internal helper to deserialize this property from a protocol buffer.
Subclasses may override this method.
Args:
entity: The entity, a Model (subclass) instance.
p: A Property Message object (a protocol buffer).
depth: Optional nesting depth, default 1 (unused here, but used
by some subclasses that override this method).
|
codesearchnet
|
def fail_run_group(group, session):
from datetime import datetime
group.end = datetime.now()
group.status = 'failed'
session.commit()
|
End the run_group unsuccessfully.
Args:
group: The run_group we want to complete.
session: The database transaction we will finish.
|
codesearchnet
|
def add_key_path(key_proto, *path_elements):
for i in range(0, len(path_elements), 2):
pair = path_elements[i:(i + 2)]
elem = key_proto.path.add()
elem.kind = pair[0]
if (len(pair) == 1):
return
id_or_name = pair[1]
if isinstance(id_or_name, (int, long)):
elem.id = id_or_name
elif isinstance(id_or_name, basestring):
elem.name = id_or_name
else:
raise TypeError(('Expected an integer id or string name as argument %d; received %r (a %s).' % ((i + 2), id_or_name, type(id_or_name))))
return key_proto
|
Add path elements to the given datastore.Key proto message.
Args:
key_proto: datastore.Key proto message.
*path_elements: list of ancestors to add to the key.
(kind1, id1/name1, ..., kindN, idN/nameN), the last 2 elements
represent the entity key, if no terminating id/name: they key
will be an incomplete key.
Raises:
TypeError: the given id or name has the wrong type.
Returns:
the same datastore.Key.
Usage:
>>> add_key_path(key_proto, 'Kind', 'name') # no parent, with name
datastore.Key(...)
>>> add_key_path(key_proto, 'Kind2', 1) # no parent, with id
datastore.Key(...)
>>> add_key_path(key_proto, 'Kind', 'name', 'Kind2', 1) # parent, complete
datastore.Key(...)
>>> add_key_path(key_proto, 'Kind', 'name', 'Kind2') # parent, incomplete
datastore.Key(...)
|
codesearchnet
|
def _add_ttl_ns(self, line):
lg = logging.getLogger(('%s.%s' % (self.ln, inspect.stack()[0][3])))
lg.setLevel(self.log_level)
lg.debug('line:\n%s', line)
line = str(line).strip()
if ((line is None) or (line == 'none') or (line == '') or (not line.lower().startswith('@prefix'))):
return
line = line.replace('@prefix', '', 1).strip()
if line.endswith('.'):
line = line[:(- 1)]
prefix = line[:line.find(':')].strip()
uri = self.clean_iri(line[(line.find(':') + 1):].strip())
lg.debug('\nprefix: %s uri: %s', prefix, uri)
self.bind(prefix, uri, override=False, calc=False)
|
takes one prefix line from the turtle file and binds the namespace
to the class
Args:
line: the turtle prefix line string
|
codesearchnet
|
def GetLoadedModuleBySuffix(path):
root = os.path.splitext(path)[0]
for module in sys.modules.values():
mod_root = os.path.splitext((getattr(module, '__file__', None) or ''))[0]
if (not mod_root):
continue
if (not os.path.isabs(mod_root)):
mod_root = os.path.join(os.getcwd(), mod_root)
if IsPathSuffix(mod_root, root):
return module
return None
|
Searches sys.modules to find a module with the given file path.
Args:
path: Path to the source file. It can be relative or absolute, as suffix
match can handle both. If absolute, it must have already been
sanitized.
Algorithm:
The given path must be a full suffix of a loaded module to be a valid match.
File extensions are ignored when performing suffix match.
Example:
path: 'a/b/c.py'
modules: {'a': 'a.py', 'a.b': 'a/b.py', 'a.b.c': 'a/b/c.pyc']
returns: module('a.b.c')
Returns:
The module that corresponds to path, or None if such module was not
found.
|
codesearchnet
|
def core(num: int) -> Text:
return 'device:TPU_REPLICATED_CORE:{}'.format(num)
|
Returns the device name for a core in a replicated TPU computation.
Args:
num: the virtual core number within each replica to which operators should
be assigned.
Returns:
A device name, suitable for passing to `tf.device()`.
|
github-repos
|
def with_content_spec(self, column_name: str='content', python_type: Type=str, convert_fn: Optional[Callable[[str], Any]]=None, sql_typecast: Optional[str]=None) -> 'ColumnSpecsBuilder':
def value_fn(chunk: Chunk) -> Any:
if chunk.content.text is None:
raise ValueError(f'Expected chunk to contain content. {chunk}')
value = chunk.content.text
return convert_fn(value) if convert_fn else value
self._specs.append(ColumnSpec(column_name=column_name, python_type=python_type, value_fn=value_fn, sql_typecast=sql_typecast))
return self
|
Add content :class:`.ColumnSpec` with optional type and conversion.
Args:
column_name: Name for the content column (defaults to "content")
python_type: Python type for the column (defaults to str)
convert_fn: Optional function to convert the content text
If None, uses content text as-is
sql_typecast: Optional SQL type cast
Returns:
Self for method chaining
Example:
>>> builder.with_content_spec(
... column_name="content_length",
... python_type=int,
... convert_fn=len # Store content length instead of content
... )
|
github-repos
|
def download_archive_artifact_bundle(self, id_or_uri, file_path):
uri = ((self.BACKUP_ARCHIVE_PATH + '/') + extract_id_from_uri(id_or_uri))
return self._client.download(uri, file_path)
|
Downloads an archive for the Artifact Bundle.
Args:
id_or_uri: ID or URI of the Artifact Bundle.
file_path(str): Destination file path.
Returns:
bool: Successfully downloaded.
|
codesearchnet
|
def update(self, **kwargs):
kwargs = {k: (np.array(v) if isinstance(v, (int, float)) else v)
for k, v in kwargs.items()}
self.args.update(kwargs)
|
Update the model arguments with additional arguments.
Args:
kwargs (dict): Optional keyword arguments to add to prior args.
|
juraj-google-style
|
def ping(dest_addr: str, timeout: int=4, unit: str='s', src_addr: str=None, ttl: int=64, seq: int=0, size: int=56) -> (float or None):
with socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP) as sock:
sock.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl)
if src_addr:
sock.bind((src_addr, 0))
icmp_id = (threading.current_thread().ident % 65535)
try:
send_one_ping(sock=sock, dest_addr=dest_addr, icmp_id=icmp_id, seq=seq, size=size)
delay = receive_one_ping(sock=sock, icmp_id=icmp_id, seq=seq, timeout=timeout)
except errors.PingError as e:
_debug(e)
if EXCEPTIONS:
raise e
return None
if (delay is None):
return None
if (unit == 'ms'):
delay *= 1000
return delay
|
Send one ping to destination address with the given timeout.
Args:
dest_addr: The destination address, can be an IP address or a domain name. Ex. "192.168.1.1"/"example.com"
timeout: Timeout in seconds. Default is 4s, same as Windows CMD. (default 4)
unit: The unit of returned value. "s" for seconds, "ms" for milliseconds. (default "s")
src_addr: The IP address to ping from. This is for multi-interface clients. Ex. "192.168.1.20". (default None)
ttl: The Time-To-Live of the outgoing packet. Default is 64, same as in Linux and macOS. (default 64)
seq: ICMP packet sequence, usually increases from 0 in the same process. (default 0)
size: The ICMP packet payload size in bytes. Default is 56, same as in macOS. (default 56)
Returns:
The delay in seconds/milliseconds or None on timeout.
Raises:
PingError: Any PingError will raise again if `ping3.EXCEPTIONS` is True.
|
codesearchnet
|
def __init__(self, schema, force_deterministic=False):
self.schema = schema
self._type_hint = named_tuple_from_schema(self.schema)
self.components = [_nonnull_coder_from_type(field.type) for field in self.schema.fields]
if force_deterministic:
self.components = [c.as_deterministic_coder(force_deterministic) for c in self.components]
self.forced_deterministic = bool(force_deterministic)
|
Initializes a :class:`RowCoder`.
Args:
schema (apache_beam.portability.api.schema_pb2.Schema): The protobuf
representation of the schema of the data that the RowCoder will be used
to encode/decode.
|
github-repos
|
def __build_config_block(self, config_block_node):
node_lists = []
for line_node in config_block_node:
if isinstance(line_node, pegnode.ConfigLine):
node_lists.append(self.__build_config(line_node))
elif isinstance(line_node, pegnode.OptionLine):
node_lists.append(self.__build_option(line_node))
elif isinstance(line_node, pegnode.ServerLine):
node_lists.append(
self.__build_server(line_node))
elif isinstance(line_node, pegnode.BindLine):
node_lists.append(
self.__build_bind(line_node))
elif isinstance(line_node, pegnode.AclLine):
node_lists.append(
self.__build_acl(line_node))
elif isinstance(line_node, pegnode.BackendLine):
node_lists.append(
self.__build_usebackend(line_node))
elif isinstance(line_node, pegnode.UserLine):
node_lists.append(
self.__build_user(line_node))
elif isinstance(line_node, pegnode.GroupLine):
node_lists.append(
self.__build_group(line_node))
else:
pass
return node_lists
|
parse `config_block` in each section
Args:
config_block_node (TreeNode): Description
Returns:
[line_node1, line_node2, ...]
|
juraj-google-style
|
def not_found(cls, errors=None):
if cls.expose_status:
cls.response.content_type = 'application/json'
cls.response._status_line = '404 Not Found'
return cls(404, None, errors).to_json
|
Shortcut API for HTTP 404 `Not found` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
|
juraj-google-style
|
def __init__(self, description=None, **options):
self.__doc__ = description
self._options = {}
for name, option in compat.iteritems(options):
self.register(name, option)
super(Namespace, self).__init__()
|
Initalize the Namespace with options
Args:
description (str, optional): A human readable description of what
the Namespace contains.
**options: Each keyword should be an Option object which will be
added to the Namespace.
Raises:
TypeError: If an entry is not an Option object.
|
juraj-google-style
|
def __init__(self, steps_col, slc):
self._col = steps_col
self._idx = slc.indices(len(self._col))
self._flt = {
'snap': False,
'rprof': False,
'fields': [],
'func': lambda _: True,
}
self._dflt_func = self._flt['func']
|
Initialization of instances:
Args:
steps_col (:class:`_Steps` or :class:`_Snaps`): steps collection,
i.e. :attr:`StagyyData.steps` or :attr:`StagyyData.snaps`
attributes.
slc (slice): slice of desired isteps or isnap.
|
juraj-google-style
|
def email_has_role(self, email, role_name, uuid=None):
mbr_data = self.get_membership(uuid=uuid)
docs = []
try:
docs = mbr_data['response']['docs']
except KeyError:
failure_message = ('KeyError in membership data - '
'got {0}'.format(mbr_data))
log.exception(failure_message)
raise PyLmodUnexpectedData(failure_message)
if len(docs) == 0:
return False
has_role = any(
(x.get('email') == email and x.get('roleType') == role_name)
for x in docs
)
if has_role:
return True
return False
|
Determine if an email is associated with a role.
Args:
email (str): user email
role_name (str): user role
uuid (str): optional uuid. defaults to self.cuuid
Raises:
PyLmodUnexpectedData: Unexpected data was returned.
requests.RequestException: Exception connection error
Returns:
bool: True or False if email has role_name
|
juraj-google-style
|
def random_expr(depth, vlist, ops):
if not depth:
return str(vlist[random.randrange(len(vlist))])
max_depth_side = random.randrange(2)
other_side_depth = random.randrange(depth)
left = random_expr(depth - 1
if max_depth_side else other_side_depth, vlist, ops)
right = random_expr(depth - 1
if not max_depth_side else other_side_depth, vlist, ops)
op = ops[random.randrange(len(ops))]
return ExprNode(left, right, op)
|
Generate a random expression tree.
Args:
depth: At least one leaf will be this many levels down from the top.
vlist: A list of chars. These chars are randomly selected as leaf values.
ops: A list of ExprOp instances.
Returns:
An ExprNode instance which is the root of the generated expression tree.
|
juraj-google-style
|
def from_index_amount(cls, matrixpos, amt):
f = np.identity(3)
f[matrixpos] += amt
return cls(f)
|
Factory method for constructing a Deformation object
from a matrix position and amount
Args:
matrixpos (tuple): tuple corresponding the matrix position to
have a perturbation added
amt (float): amount to add to the identity matrix at position
matrixpos
|
juraj-google-style
|
def add_collection_def(meta_graph_def, key, graph=None, export_scope=None, exclude_nodes=None, override_contents=None):
if graph and (not isinstance(graph, ops.Graph)):
raise TypeError(f'graph must be of type Graph. Received type: {type(graph)}.')
if not isinstance(key, str) and (not isinstance(key, bytes)):
logging.warning('Only collections with string type keys will be serialized. This key has %s', type(key))
return
graph = graph or ops.get_default_graph()
if override_contents:
collection_list = override_contents
else:
collection_list = graph.get_collection(key)
collection_list = [x for x in collection_list if _should_include_node(x, export_scope, exclude_nodes)]
if not collection_list:
return
try:
col_def = meta_graph_def.collection_def[key]
to_proto = ops.get_to_proto_function(key)
proto_type = ops.get_collection_proto_type(key)
if to_proto:
kind = 'bytes_list'
for x in collection_list:
proto = to_proto(x, export_scope=export_scope)
if proto:
assert isinstance(proto, proto_type)
getattr(col_def, kind).value.append(proto.SerializeToString())
else:
kind = _get_kind_name(collection_list[0])
if kind == 'node_list':
for x in collection_list:
if not export_scope or x.name.startswith(export_scope):
getattr(col_def, kind).value.append(ops.strip_name_scope(x.name, export_scope))
elif kind == 'bytes_list':
getattr(col_def, kind).value.extend([compat.as_bytes(x) for x in collection_list])
else:
getattr(col_def, kind).value.extend([x for x in collection_list])
except Exception as e:
logging.warning("Issue encountered when serializing %s.\nType is unsupported, or the types of the items don't match field type in CollectionDef. Note this is a warning and probably safe to ignore.\n%s", key, str(e))
if key in meta_graph_def.collection_def:
del meta_graph_def.collection_def[key]
return
|
Adds a collection to MetaGraphDef protocol buffer.
Args:
meta_graph_def: MetaGraphDef protocol buffer.
key: One of the GraphKeys or user-defined string.
graph: The `Graph` from which to get collections.
export_scope: Optional `string`. Name scope to remove.
exclude_nodes: An iterable of nodes or `string` node names to omit from the
collection, or None.
override_contents: An iterable of values to place in the collection,
ignoring the current values (if set).
|
github-repos
|
def CopyToDateTimeString(self):
if ((self._timestamp is None) or (self._timestamp < 0) or (self._timestamp > self._UINT64_MAX)):
return None
(timestamp, remainder) = divmod(self._timestamp, self._100NS_PER_SECOND)
(number_of_days, hours, minutes, seconds) = self._GetTimeValues(timestamp)
(year, month, day_of_month) = self._GetDateValuesWithEpoch(number_of_days, self._EPOCH)
return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}.{6:07d}'.format(year, month, day_of_month, hours, minutes, seconds, remainder)
|
Copies the FILETIME timestamp to a date and time string.
Returns:
str: date and time value formatted as: "YYYY-MM-DD hh:mm:ss.#######" or
None if the timestamp is missing or invalid.
|
codesearchnet
|
def get_variable_value_for_variation(self, variable, variation):
if ((not variable) or (not variation)):
return None
if (variation.id not in self.variation_variable_usage_map):
self.logger.error(('Variation with ID "%s" is not in the datafile.' % variation.id))
return None
variable_usages = self.variation_variable_usage_map[variation.id]
variable_usage = None
if variable_usages:
variable_usage = variable_usages.get(variable.id)
if variable_usage:
variable_value = variable_usage.value
self.logger.info(('Value for variable "%s" for variation "%s" is "%s".' % (variable.key, variation.key, variable_value)))
else:
variable_value = variable.defaultValue
self.logger.info(('Variable "%s" is not used in variation "%s". Assigning default value "%s".' % (variable.key, variation.key, variable_value)))
return variable_value
|
Get the variable value for the given variation.
Args:
variable: The Variable for which we are getting the value.
variation: The Variation for which we are getting the variable value.
Returns:
The variable value or None if any of the inputs are invalid.
|
codesearchnet
|
def _dbParamsMom01(self):
db_grad = [[]] * 10
db_out = [[]] * 10
db_grad[0] = [0.00096264342, 0.17914793, 0.93945462, 0.41396621, 0.53037018, 0.93197989, 0.78648776, 0.50036013, 0.55345792, 0.96722615]
db_out[0] = [-9.6264346e-05, -0.017914793, -0.093945466, -0.041396622, -0.053037018, -0.093197994, -0.078648776, -0.050036013, -0.055345792, -0.096722618]
db_grad[1] = [0.17075552, 0.88821375, 0.20873757, 0.25236958, 0.57578111, 0.15312378, 0.5513742, 0.94687688, 0.16012503, 0.22159521]
db_out[1] = [-0.017181443, -0.10852765, -0.12421377, -0.070773244, -0.11591884, -0.11783017, -0.14165108, -0.14972731, -0.076892875, -0.1285544]
db_grad[2] = [0.35077485, 0.47304362, 0.44412705, 0.44368884, 0.078527533, 0.81223965, 0.31168157, 0.43203235, 0.16792089, 0.24644311]
db_out[2] = [-0.053967446, -0.1648933, -0.1716533, -0.1180798, -0.13005978, -0.20151734, -0.17911947, -0.20289968, -0.095839672, -0.15638189]
db_grad[3] = [0.9694621, 0.75035888, 0.28171822, 0.83813518, 0.53807181, 0.3728098, 0.81454384, 0.03848977, 0.89759839, 0.93665648]
db_out[3] = [-0.15459226, -0.24556576, -0.20456907, -0.20662397, -0.18528105, -0.24716705, -0.2643207, -0.21206589, -0.18749419, -0.2528303]
db_grad[4] = [0.38578293, 0.8536852, 0.88722926, 0.66276771, 0.13678469, 0.94036359, 0.69107032, 0.81897682, 0.5433259, 0.67860287]
db_out[4] = [-0.20323303, -0.33900154, -0.29658359, -0.28175515, -0.20448165, -0.34576839, -0.34194785, -0.29488021, -0.25099224, -0.33033544]
db_grad[5] = [0.27885768, 0.76100707, 0.24625534, 0.81354135, 0.18959245, 0.48038563, 0.84163809, 0.41172323, 0.83259648, 0.44941229]
db_out[5] = [-0.23598288, -0.42444581, -0.33041057, -0.3706224, -0.22536094, -0.40366709, -0.43387437, -0.34433398, -0.34060168, -0.38302717]
db_grad[6] = [0.27233034, 0.056316052, 0.5039115, 0.24105175, 0.35697976, 0.75913221, 0.73577434, 0.16014607, 0.57500273, 0.071136251]
db_out[6] = [-0.26649091, -0.43862185, -0.38418442, -0.40361428, -0.26314685, -0.48537019, -0.51664448, -0.36529395, -0.40706289, -0.39540997]
db_grad[7] = [0.58697265, 0.2494842, 0.08106143, 0.39954534, 0.15892942, 0.12683646, 0.74053431, 0.16033, 0.66625422, 0.73515922]
db_out[7] = [-0.32823896, -0.46498787, -0.39766794, -0.446868, -0.28281838, -0.50622416, -0.59897494, -0.38342294, -0.48033443, -0.47016418]
db_grad[8] = [0.8215279, 0.41994119, 0.95172721, 0.68000203, 0.79439718, 0.43384039, 0.55561525, 0.22567581, 0.93331909, 0.29438227]
db_out[8] = [-0.41656655, -0.50961858, -0.49418902, -0.51919359, -0.36422527, -0.55169362, -0.6627695, -0.40780342, -0.58099347, -0.50707781]
db_grad[9] = [0.68297005, 0.67758518, 0.1748755, 0.13266537, 0.70697063, 0.055731893, 0.68593478, 0.50580865, 0.12602448, 0.093537711]
db_out[9] = [-0.49369633, -0.58184016, -0.52132869, -0.5396927, -0.44306302, -0.56181377, -0.73774242, -0.46082234, -0.60366184, -0.52012295]
return (db_grad, db_out)
|
Return dist-belief momentum values.
Return values been generated from the dist-belief momentum unittest,
running with a learning rate of 0.1 and a momentum of 0.1.
These values record how a parameter vector of size 10, initialized with 0.0,
gets updated with 10 consecutive momentum steps. It uses random gradients.
Returns:
db_grad: The gradients to apply
db_out: The parameters after the momentum update.
|
github-repos
|
def _EmbedIPython(variables, argv=None):
import IPython
argv = argv or []
IPython.start_ipython(argv=argv, user_ns=variables)
|
Drops into an IPython REPL with variables available for use.
Args:
variables: A dict of variables to make available. Keys are variable names.
Values are variable values.
argv: The argv to use for starting ipython. Defaults to an empty list.
|
github-repos
|
def get_tpu_system_metadata(self):
cluster_spec = self.cluster_spec()
cluster_def = cluster_spec.as_cluster_def() if cluster_spec else None
tpu_system_metadata = tpu_system_metadata_lib._query_tpu_system_metadata(self.master(), cluster_def=cluster_def, query_topology=False)
return tpu_system_metadata
|
Returns the metadata of the TPU system.
Users can call this method to get some facts of the TPU system, like
total number of cores, number of TPU workers and the devices. E.g.
```python
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
tpu_system_metadata = resolver.get_tpu_system_metadata()
num_hosts = tpu_system_metadata.num_hosts
```
Returns:
A `tf.tpu.experimental.TPUSystemMetadata` object.
|
github-repos
|
def _ValidateFractionalAvgPoolResult(self, input_tensor, pooling_ratio, pseudo_random, overlapping):
with self.cached_session() as sess:
p, r, c = nn_ops.fractional_avg_pool_v2(input_tensor, pooling_ratio, pseudo_random, overlapping, seed=self._SEED)
actual, row_seq, col_seq = self.evaluate([p, r, c])
expected = self._GetExpectedFractionalAvgPoolResult(input_tensor, row_seq, col_seq, overlapping)
self.assertShapeEqual(expected, p)
self.assertAllClose(expected, actual)
|
Validate FractionalAvgPool's result against expected.
Expected result is computed given input_tensor, and pooling region defined
by row_seq and col_seq.
Args:
input_tensor: A tensor or numpy ndarray.
pooling_ratio: A list or tuple of length 4, first and last element be 1.
pseudo_random: Use pseudo random method to generate pooling sequence.
overlapping: Use overlapping when pooling.
Returns:
None
|
github-repos
|
class MaxNorm(Constraint):
def __init__(self, max_value=2, axis=0):
self.max_value = max_value
self.axis = axis
@doc_controls.do_not_generate_docs
def __call__(self, w):
norms = backend.sqrt(math_ops.reduce_sum(math_ops.square(w), axis=self.axis, keepdims=True))
desired = backend.clip(norms, 0, self.max_value)
return w * (desired / (backend.epsilon() + norms))
@doc_controls.do_not_generate_docs
def get_config(self):
return {'max_value': self.max_value, 'axis': self.axis}
|
MaxNorm weight constraint.
Constrains the weights incident to each hidden unit
to have a norm less than or equal to a desired value.
Also available via the shortcut function `tf.keras.constraints.max_norm`.
Args:
max_value: the maximum norm value for the incoming weights.
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
|
github-repos
|
def __init__(self, *args: str, api_name: str=TENSORFLOW_API_NAME, v1: Optional[Sequence[str]]=None, allow_multiple_exports: bool=True):
self._names = args
self._names_v1 = v1 if v1 is not None else args
self._api_name = api_name
self._validate_symbol_names()
|
Export under the names *args (first one is considered canonical).
Args:
*args: API names in dot delimited format.
api_name: API you want to generate Currently, only `tensorflow`.
v1: Names for the TensorFlow V1 API. If not set, we will use V2 API names
both for TensorFlow V1 and V2 APIs.
allow_multiple_exports: Deprecated.
|
github-repos
|
def eval_detection_voc(pred_boxlists, gt_boxlists, iou_thresh=0.5, use_07_metric=False):
assert len(gt_boxlists) == len(
pred_boxlists
), "Length of gt and pred lists need to be same."
prec, rec = calc_detection_voc_prec_rec(
pred_boxlists=pred_boxlists, gt_boxlists=gt_boxlists, iou_thresh=iou_thresh
)
ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric)
return {"ap": ap, "map": np.nanmean(ap)}
|
Evaluate on voc dataset.
Args:
pred_boxlists(list[BoxList]): pred boxlist, has labels and scores fields.
gt_boxlists(list[BoxList]): ground truth boxlist, has labels field.
iou_thresh: iou thresh
use_07_metric: boolean
Returns:
dict represents the results
|
juraj-google-style
|
def SetDecryptedStreamSize(self, decrypted_stream_size):
if self._is_open:
raise IOError('Already open.')
if decrypted_stream_size < 0:
raise ValueError((
'Invalid decrypted stream size: {0:d} value out of '
'bounds.').format(decrypted_stream_size))
self._decrypted_stream_size = decrypted_stream_size
|
Sets the decrypted stream size.
This function is used to set the decrypted stream size if it can be
determined separately.
Args:
decrypted_stream_size (int): size of the decrypted stream in bytes.
Raises:
IOError: if the file-like object is already open.
OSError: if the file-like object is already open.
ValueError: if the decrypted stream size is invalid.
|
juraj-google-style
|
def rule(self, column: str, rule: str, error: str, value: Any, rule_params: dict={}) -> None:
log = self._build_rule_message(column, rule, error, value, rule_params)
self.queue_log_message(log)
|
Adds rule error information to base log message and
sends it to the logger for writing.
Args:
* column: column where the rule is applied
* rule: rule that is violated and raises this message
* error: error that occurred
* value: value that violates the rule
* rule_params: optional, parameters set for the rule
Returns:
* None
|
github-repos
|
def delay_embedding(data, emb_dim, lag=1):
data = np.asarray(data)
min_len = (emb_dim - 1) * lag + 1
if len(data) < min_len:
msg = "cannot embed data of length {} with embedding dimension {} " \
+ "and lag {}, minimum required length is {}"
raise ValueError(msg.format(len(data), emb_dim, lag, min_len))
m = len(data) - min_len + 1
indices = np.repeat([np.arange(emb_dim) * lag], m, axis=0)
indices += np.arange(m).reshape((m, 1))
return data[indices]
|
Perform a time-delay embedding of a time series
Args:
data (array-like):
the data that should be embedded
emb_dim (int):
the embedding dimension
Kwargs:
lag (int):
the lag between elements in the embedded vectors
Returns:
emb_dim x m array:
matrix of embedded vectors of the form
[data[i], data[i+lag], data[i+2*lag], ... data[i+(emb_dim-1)*lag]]
for i in 0 to m-1 (m = len(data)-(emb_dim-1)*lag)
|
juraj-google-style
|
def setup(self, puller: bool=None, subscriptions: Dict[str, Any]={}):
if puller:
puller = self._zmq.socket(zmq.PULL)
ip, port, host = self.rslv('rcv')
puller.bind('tcp:
self.poll(puller)
if subscriptions:
for publisher in subscriptions:
self.add(publisher, subscriptions[publisher].get('slots'), subscriptions[publisher].get('buffer-length'))
logger.info('Listening to %s', {
k: (1 if subscriptions[k].get('slots') is None else len(subscriptions[k].get('slots')))
for k in subscriptions
})
|
Sets up this Node with the specified Interfaces before it is run.
Args:
puller: Indication if a Puller Interface should be created.
subscriptions: Collection of the Subscriber Interfaces to be created and their Slots.
|
juraj-google-style
|
def wrap_with_monitor(env, video_dir):
env = ExtendToEvenDimentions(env)
env = RenderObservations(env)
env = gym.wrappers.Monitor(env, video_dir, force=True, video_callable=(lambda idx: True), write_upon_reset=True)
return env
|
Wrap environment with gym.Monitor.
Video recording provided by Monitor requires
1) both height and width of observation to be even numbers.
2) rendering of environment
Args:
env: environment.
video_dir: video directory.
Returns:
wrapped environment.
|
codesearchnet
|
def execute_code(self, code, filename=None, isolate=False):
def _apply():
self.compile_code(code=code,
filename=filename,
exec_namespace=self.globals)
if isolate:
saved_globals = dict(self.globals)
try:
_apply()
finally:
self.globals.clear()
self.globals.update(saved_globals)
else:
_apply()
|
Execute code within the execution context.
Args:
code (str or SourceCode): Rex code to execute.
filename (str): Filename to report if there are syntax errors.
isolate (bool): If True, do not affect `self.globals` by executing
this code.
|
juraj-google-style
|
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(DeviceCredential, self).read(input_stream, kmip_version=kmip_version)
local_stream = BytearrayStream(input_stream.read(self.length))
if self.is_tag_next(enums.Tags.DEVICE_SERIAL_NUMBER, local_stream):
self._device_serial_number = primitives.TextString(tag=enums.Tags.DEVICE_SERIAL_NUMBER)
self._device_serial_number.read(local_stream, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.PASSWORD, local_stream):
self._password = primitives.TextString(tag=enums.Tags.PASSWORD)
self._password.read(local_stream, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.DEVICE_IDENTIFIER, local_stream):
self._device_identifier = primitives.TextString(tag=enums.Tags.DEVICE_IDENTIFIER)
self._device_identifier.read(local_stream, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.NETWORK_IDENTIFIER, local_stream):
self._network_identifier = primitives.TextString(tag=enums.Tags.NETWORK_IDENTIFIER)
self._network_identifier.read(local_stream, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.MACHINE_IDENTIFIER, local_stream):
self._machine_identifier = primitives.TextString(tag=enums.Tags.MACHINE_IDENTIFIER)
self._machine_identifier.read(local_stream, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.MEDIA_IDENTIFIER, local_stream):
self._media_identifier = primitives.TextString(tag=enums.Tags.MEDIA_IDENTIFIER)
self._media_identifier.read(local_stream, kmip_version=kmip_version)
self.is_oversized(local_stream)
|
Read the data encoding the DeviceCredential struct and decode it into
its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
|
codesearchnet
|
def Open(self, filename):
if not super(WinevtResourcesSqlite3DatabaseReader, self).Open(filename):
return False
version = self.GetMetadataAttribute('version')
if not version or version != '20150315':
raise RuntimeError('Unsupported version: {0:s}'.format(version))
string_format = self.GetMetadataAttribute('string_format')
if not string_format:
string_format = 'wrc'
if string_format not in ('pep3101', 'wrc'):
raise RuntimeError('Unsupported string format: {0:s}'.format(
string_format))
self._string_format = string_format
return True
|
Opens the database reader object.
Args:
filename (str): filename of the database.
Returns:
bool: True if successful.
Raises:
RuntimeError: if the version or string format of the database
is not supported.
|
juraj-google-style
|
def register_menu_item(self, items):
for itm in items:
if itm.group in self.menu_items:
if itm not in self.menu_items[itm.group]['items']:
self.menu_items[itm.group]['items'].append(itm)
else:
logger.warning('Tried registering menu item to unknown group {}'.format(itm.group))
|
Registers a views menu items into the metadata for the application. Skip if the item is already present
Args:
items (`list` of `MenuItem`): A list of `MenuItem`s
Returns:
`None`
|
juraj-google-style
|
def diff_bisect(self, text1, text2, deadline):
text1_length = len(text1)
text2_length = len(text2)
max_d = (text1_length + text2_length + 1)
v_offset = max_d
v_length = 2 * max_d
v1 = [-1] * v_length
v1[v_offset + 1] = 0
v2 = v1[:]
delta = text1_length - text2_length
front = (delta % 2 != 0)
k1start = 0
k1end = 0
k2start = 0
k2end = 0
for d in range(max_d):
if time.time() > deadline:
break
for k1 in range(-d + k1start, d + 1 - k1end, 2):
k1_offset = v_offset + k1
if k1 == -d or (k1 != d and
v1[k1_offset - 1] < v1[k1_offset + 1]):
x1 = v1[k1_offset + 1]
else:
x1 = v1[k1_offset - 1] + 1
y1 = x1 - k1
while (x1 < text1_length and y1 < text2_length and
text1[x1] == text2[y1]):
x1 += 1
y1 += 1
v1[k1_offset] = x1
if x1 > text1_length:
k1end += 2
elif y1 > text2_length:
k1start += 2
elif front:
k2_offset = v_offset + delta - k1
if k2_offset >= 0 and k2_offset < v_length and v2[k2_offset] != -1:
x2 = text1_length - v2[k2_offset]
if x1 >= x2:
return self.diff_bisectSplit(text1, text2, x1, y1, deadline)
for k2 in range(-d + k2start, d + 1 - k2end, 2):
k2_offset = v_offset + k2
if k2 == -d or (k2 != d and
v2[k2_offset - 1] < v2[k2_offset + 1]):
x2 = v2[k2_offset + 1]
else:
x2 = v2[k2_offset - 1] + 1
y2 = x2 - k2
while (x2 < text1_length and y2 < text2_length and
text1[-x2 - 1] == text2[-y2 - 1]):
x2 += 1
y2 += 1
v2[k2_offset] = x2
if x2 > text1_length:
k2end += 2
elif y2 > text2_length:
k2start += 2
elif not front:
k1_offset = v_offset + delta - k2
if k1_offset >= 0 and k1_offset < v_length and v1[k1_offset] != -1:
x1 = v1[k1_offset]
y1 = v_offset + x1 - k1_offset
x2 = text1_length - x2
if x1 >= x2:
return self.diff_bisectSplit(text1, text2, x1, y1, deadline)
return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)]
|
Find the 'middle snake' of a diff, split the problem in two
and return the recursively constructed diff.
See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
deadline: Time at which to bail if not yet complete.
Returns:
Array of diff tuples.
|
juraj-google-style
|
def get_frame(self, frame_id):
if frame_id < 0 or frame_id >= self._frame_cnt:
raise IndexError(
'"frame_id" must be between 0 and {}'.format(self._frame_cnt -
1))
if frame_id == self._position:
return self.read()
if self._cache:
img = self._cache.get(frame_id)
if img is not None:
self._position = frame_id + 1
return img
self._set_real_position(frame_id)
ret, img = self._vcap.read()
if ret:
if self._cache:
self._cache.put(self._position, img)
self._position += 1
return img
|
Get frame by index.
Args:
frame_id (int): Index of the expected frame, 0-based.
Returns:
ndarray or None: Return the frame if successful, otherwise None.
|
juraj-google-style
|
def Create(self, request, global_params=None):
config = self.GetMethodConfig('Create')
return self._RunMethod(config, request, global_params=global_params)
|
Starts a build with the specified configuration. This method returns a long-running `Operation`, which includes the build ID. Pass the build ID to `GetBuild` to determine the build status (such as `SUCCESS` or `FAILURE`).
Args:
request: (CloudbuildProjectsLocationsBuildsCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
|
github-repos
|
def __init__(self, pipeline, required_transforms=None, referenced_pcollections=None, cached_pcollections=None):
self._required_transforms = required_transforms or set()
self._referenced_pcollections = referenced_pcollections or set()
self._cached_pcollections = cached_pcollections or set()
super().__init__(pipeline=pipeline, default_vertex_attrs={'color': 'gray', 'fontcolor': 'gray'}, default_edge_attrs={'color': 'gray'})
transform_updates, pcollection_updates = self._generate_graph_update_dicts()
self._update_graph(transform_updates, pcollection_updates)
|
Constructor of PipelineGraph.
Args:
pipeline: (Pipeline proto) or (Pipeline) pipeline to be rendered.
required_transforms: (list/set of str) ID of top level PTransforms that
lead to visible results.
referenced_pcollections: (list/set of str) ID of PCollections that are
referenced by top level PTransforms executed (i.e.
required_transforms)
cached_pcollections: (set of str) a set of PCollection IDs of those whose
cached results are used in the execution.
|
github-repos
|
def get_score(self, error=None):
if (error is not None):
self.error = error
if (self.error >= 0):
return (1 / (self.error + 1))
else:
return (1 + abs(self.error))
|
Calculate bee's fitness score given a value returned by the fitness
function
Args:
error (float): value returned by the fitness function
Returns:
float: derived fitness score
|
codesearchnet
|
def get_candidate(self, dest_spec: ValueSpec) -> typing.Optional[ValueSpec]:
for c in self._candidates:
if dest_spec.__class__ == c.__class__ and dest_spec.is_compatible(c):
return c
for c in self._candidates:
if isinstance(c, Union):
child = c.get_candidate(dest_spec)
if child is not None:
return child
elif dest_spec.is_compatible(c):
return c
return None
|
Get candidate by a destination value spec.
Args:
dest_spec: destination value spec which is a superset of the value spec
to return. E.g. Any (dest_spec) is superset of Int (child spec).
Returns:
The first value spec under Union with which the destination value spec
is compatible.
|
github-repos
|
def learn_mealy_machine(self):
logging.info('Initializing learning procedure.')
self._init_table()
logging.info('Generating a closed and consistent observation table.')
while True:
closed = False
while not closed:
logging.debug('Checking if table is closed.')
closed, string = self.observation_table.is_closed()
if not closed:
logging.debug('Closing table.')
self._ot_make_closed(string)
else:
logging.debug('Table closed.')
mma = self.get_mealy_conjecture()
logging.info('Generated conjecture machine with %d states.',
len(list(mma.states)))
logging.debug('Running equivalence query.')
found, counter_example = self._equivalence_query(mma)
if found:
logging.info('No counterexample found. Hypothesis is correct!')
break
logging.info(
'Processing counterexample %input_string with length %d.',
counter_example,
len(counter_example))
self._process_counter_example(mma, counter_example)
logging.info('Learning complete.')
return mma
|
Implements the high level loop of the algorithm for learning a
Mealy machine.
Args:
None
Returns:
MealyMachine: The learned mealy machine
|
juraj-google-style
|
def _ParsePlistKeyValue(self, knowledge_base, name, value):
if not knowledge_base.GetHostname():
if name in self._PLIST_KEYS:
hostname_artifact = artifacts.HostnameArtifact(name=value)
knowledge_base.SetHostname(hostname_artifact)
|
Parses a plist key value.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
name (str): name of the plist key.
value (str): value of the plist key.
|
juraj-google-style
|
def apply_theme(self, property_values):
old_dict = self.themed_values()
if old_dict is property_values:
return
removed = set()
if old_dict is not None:
removed.update(set(old_dict.keys()))
added = set(property_values.keys())
old_values = dict()
for k in added.union(removed):
old_values[k] = getattr(self, k)
if len(property_values) > 0:
setattr(self, '__themed_values__', property_values)
elif hasattr(self, '__themed_values__'):
delattr(self, '__themed_values__')
for k, v in old_values.items():
if k in self._unstable_themed_values:
del self._unstable_themed_values[k]
for k, v in old_values.items():
descriptor = self.lookup(k)
descriptor.trigger_if_changed(self, v)
|
Apply a set of theme values which will be used rather than
defaults, but will not override application-set values.
The passed-in dictionary may be kept around as-is and shared with
other instances to save memory (so neither the caller nor the
|HasProps| instance should modify it).
Args:
property_values (dict) : theme values to use in place of defaults
Returns:
None
|
juraj-google-style
|
def annotate(self, records, **kwargs):
self.annotator_params.update(**kwargs)
chunk_size = self.annotator_params.get('chunk_size', self.CHUNK_SIZE)
chunk = []
for i, record in enumerate(records):
chunk.append(record)
if (i + 1) % chunk_size == 0:
for r in self._execute(chunk):
yield r
chunk = []
if chunk:
for r in self._execute(chunk):
yield r
chunk = []
|
Annotate a set of records with stored fields.
Args:
records: A list or iterator (can be a Query object)
chunk_size: The number of records to annotate at once (max 500).
Returns:
A generator that yields one annotated record at a time.
|
juraj-google-style
|
def return_secondary_learner(self):
estimator = self.base_learner_origin.return_estimator()
estimator = estimator.set_params(**self.secondary_learner_hyperparameters)
return estimator
|
Returns secondary learner using its origin and the given hyperparameters
Returns:
est (estimator): Estimator object
|
codesearchnet
|
def set_iprouting(self, value=None, default=False, disable=False):
if value is False:
disable = True
cmd = self.command_builder('ip routing', value=value, default=default,
disable=disable)
return self.configure(cmd)
|
Configures the state of global ip routing
EosVersion:
4.13.7M
Args:
value(bool): True if ip routing should be enabled or False if
ip routing should be disabled
default (bool): Controls the use of the default keyword
disable (bool): Controls the use of the no keyword
Returns:
bool: True if the commands completed successfully otherwise False
|
juraj-google-style
|
def find_response_component(self, api_id=None, signature_id=None):
if ((not api_id) and (not signature_id)):
raise ValueError('At least one of api_id and signature_id is required')
components = list()
if self.response_data:
for component in self.response_data:
if (((api_id and component['api_id']) == api_id) or (signature_id and (component['signature_id'] == signature_id))):
components.append(component)
return components
|
Find one or many repsonse components.
Args:
api_id (str): Api id associated with the component(s) to be retrieved.
signature_id (str): Signature id associated with the component(s) to be retrieved.
Returns:
A list of dictionaries containing component data
|
codesearchnet
|
def is_monotonic(neurite, tol):
for node in neurite.iter_sections():
sec = node.points
for point_id in range(len(sec) - 1):
if sec[point_id + 1][COLS.R] > sec[point_id][COLS.R] + tol:
return False
if(node.parent is not None and
sec[0][COLS.R] > node.parent.points[-1][COLS.R] + tol):
return False
return True
|
Check if neurite tree is monotonic
If each child has smaller or equal diameters from its parent
Args:
neurite(Neurite): neurite to operate on
tol(float): tolerance
Returns:
True if neurite monotonic
|
juraj-google-style
|
def from_csv(cls, filename: str):
with open(filename, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter=unicode2str(","),
quotechar=unicode2str("\""),
quoting=csv.QUOTE_MINIMAL)
entries = list()
header_read = False
elements = None
for row in reader:
if not header_read:
elements = row[1:(len(row) - 1)]
header_read = True
else:
name = row[0]
energy = float(row[-1])
comp = dict()
for ind in range(1, len(row) - 1):
if float(row[ind]) > 0:
comp[Element(elements[ind - 1])] = float(row[ind])
entries.append(PDEntry(Composition(comp), energy, name))
return cls(entries)
|
Imports PDEntries from a csv.
Args:
filename: Filename to import from.
Returns:
List of Elements, List of PDEntries
|
juraj-google-style
|
def groupby(iterable: Iterable[_Tin], *, key: Callable[[_Tin], _K], value: Callable[[_Tin], _Tout]=_identity) -> dict[_K, list[_Tout]]:
groups = collections.defaultdict(list)
for v in iterable:
groups[key(v)].append(value(v))
return dict(groups)
|
Similar to `itertools.groupby` but return result as a `dict()`.
Example:
```python
out = epy.groupby(
['555', '4', '11', '11', '333'],
key=len,
value=int,
)
# Order is consistent with above
assert out == {
3: [555, 333],
1: [4],
2: [11, 11],
}
```
Other difference with `itertools.groupby`:
* Iterable do not need to be sorted. Order of the original iterator is
preserved in the group.
* Transformation can be applied to the value too
Args:
iterable: The iterable to group
key: Mapping applied to group the values (should return a hashable)
value: Mapping applied to the values
Returns:
The dict
|
github-repos
|
def add_curves_from_las(self, fname, remap=None, funcs=None):
try:
self.add_curves_from_lasio(lasio.read(fname),
remap=remap,
funcs=funcs
)
except:
for f in fname:
self.add_curves_from_lasio(lasio.read(f),
remap=remap,
funcs=funcs
)
return None
|
Given a LAS file, add curves from it to the current well instance.
Essentially just wraps ``add_curves_from_lasio()``.
Args:
fname (str): The path of the LAS file to read curves from.
remap (dict): Optional. A dict of 'old': 'new' LAS field names.
funcs (dict): Optional. A dict of 'las field': function() for
implementing a transform before loading. Can be a lambda.
Returns:
None. Works in place.
|
juraj-google-style
|
def tabledata_list(self, table_name, start_index=None, max_results=None, page_token=None):
url = Api._ENDPOINT + (Api._TABLEDATA_PATH % table_name)
args = {}
if start_index:
args['startIndex'] = start_index
if max_results:
args['maxResults'] = max_results
if page_token is not None:
args['pageToken'] = page_token
return datalab.utils.Http.request(url, args=args, credentials=self._credentials)
|
Retrieves the contents of a table.
Args:
table_name: the name of the table as a tuple of components.
start_index: the index of the row at which to start retrieval.
max_results: an optional maximum number of rows to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
|
juraj-google-style
|
def put(self, destination):
target = get_target_path(destination, self.localpath)
shutil.copytree(self.localpath, target)
|
Copy the referenced directory to this path
The semantics of this command are similar to unix ``cp``: if ``destination`` already
exists, the copied directory will be put at ``[destination] // [basename(localpath)]``. If
it does not already exist, the directory will be renamed to this path (the parent directory
must exist).
Args:
destination (str): path to put this directory
|
juraj-google-style
|
def get_int(self, name, default=None):
if (name not in self):
if (default is not None):
return default
raise EnvironmentError.not_found(self._prefix, name)
return int(self[name])
|
Retrieves an environment variable as an integer.
Args:
name (str): The case-insensitive, unprefixed variable name.
default: If provided, a default value will be returned
instead of throwing ``EnvironmentError``.
Returns:
int: The environment variable's value as an integer.
Raises:
EnvironmentError: If the environment variable does not
exist, and ``default`` was not provided.
ValueError: If the environment variable value is not an
integer with base 10.
|
codesearchnet
|
def event_stream(app, *, filter_by_prefix=None):
q = Queue()
def handle_event(event):
if filter_by_prefix is None or\
(filter_by_prefix is not None and
event['type'].startswith(filter_by_prefix)):
q.put(event)
def receive_events():
with app.connection() as connection:
recv = app.events.Receiver(connection, handlers={
'*': handle_event
})
recv.capture(limit=None, timeout=None, wakeup=True)
t = threading.Thread(target=receive_events)
t.start()
while True:
yield q.get(block=True)
|
Generator function that returns celery events.
This function turns the callback based celery event handling into a generator.
Args:
app: Reference to a celery application object.
filter_by_prefix (str): If not None, only allow events that have a type that
starts with this prefix to yield an generator event.
Returns:
generator: A generator that returns celery events.
|
juraj-google-style
|
def batch_size(self):
raise NotImplementedError
|
Return the batch size of the dataset created.
For certain type of the data input, the batch size is known, and even
required, like numpy array. Where as for dataset, the batch is unknown
unless we take a peek.
Returns:
int, the batch size of the dataset, or None if it is unknown.
|
github-repos
|
def display_hierarchy(root_ad_unit, all_ad_units):
parent_id_to_children = collections.defaultdict(list)
for ad_unit in all_ad_units:
if 'parentId' in ad_unit:
parent_id_to_children[ad_unit['parentId']].append(ad_unit)
parent_id_to_children = dict(parent_id_to_children)
display_hierarchy_helper(root_ad_unit, parent_id_to_children, 0)
|
Display the ad units as a tree.
Args:
root_ad_unit: The root ad unit to begin from.
all_ad_units: A list containing all ad units.
|
juraj-google-style
|
def CreateMock(self, class_to_mock):
new_mock = MockObject(class_to_mock)
self._mock_objects.append(new_mock)
return new_mock
|
Create a new mock object.
Args:
# class_to_mock: the class to be mocked
class_to_mock: class
Returns:
MockObject that can be used as the class_to_mock would be.
|
juraj-google-style
|
def to_proto(self, export_scope=None):
if export_scope is None or self.name.startswith(export_scope):
context_def = control_flow_pb2.CondContextDef()
context_def.context_name = ops.strip_name_scope(self.name, export_scope)
context_def.pred_name = ops.strip_name_scope(self._pred.name, export_scope)
context_def.pivot_name = ops.strip_name_scope(self._pivot.name, export_scope)
context_def.branch = self._branch
context_def.values_def.MergeFrom(super(CondContext, self)._to_values_def(export_scope))
for nested in self._nested_contexts:
nested_def = context_def.nested_contexts.add()
nested.to_control_flow_context_def(nested_def)
return context_def
else:
return None
|
Converts a `CondContext` to a `CondContextDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `CondContextDef` protocol buffer.
|
github-repos
|
def disassemble(code, origin=None):
if inspect.isfunction(code):
code = six.get_function_code(code).co_code
origin = get_py_internals(origin)
opname = origin['opname']
hasjrel = origin['hasjrel']
hasjabs = origin['hasjabs']
hasjump = set(hasjrel) | set(hasjabs)
wordcode = origin['wordcode']
if not wordcode:
ext_arg_shift = 16
else:
ext_arg_shift = 8
ext_arg_name = opname[origin['extended_arg']]
ext_arg = 0
addr_labels = {}
addr_ops = []
code_iter = enumerate(six.iterbytes(code))
for op_addr, op_code in code_iter:
if op_code >= origin['have_argument']:
rel_addr, arg = next(code_iter)
if not wordcode:
rel_addr, b = next(code_iter)
arg += b << 8
arg += ext_arg
if op_code in hasjrel:
arg += rel_addr
if op_code in hasjump:
arg = addr_labels.setdefault(arg, Label())
else:
if wordcode:
next(code_iter)
arg = None
ext_arg = 0
op_name = opname[op_code]
if op_name == ext_arg_name:
ext_arg = arg << ext_arg_shift
op = None
else:
op = Op(op_name, arg)
addr_ops.append((op_addr, op))
ops = []
for op_addr, op in addr_ops:
label = addr_labels.get(op_addr)
if label is not None:
ops.append(label)
if op is not None:
ops.append(op)
return ops
|
Disassemble python bytecode into a series of :class:`Op` and
:class:`Label` instances.
Arguments:
code(bytes): The bytecode (a code object's ``co_code`` property). You
can also provide a function.
origin(dict): The opcode specification of the python version that
generated ``code``. If you provide ``None``, the specs for the
currently running python version will be used.
Returns:
list: A list of opcodes and labels.
|
juraj-google-style
|
def slithir_cfg_to_dot(self, filename):
from slither.core.cfg.node import NodeType
with open(filename, 'w', encoding='utf8') as f:
f.write('digraph{\n')
for node in self.nodes:
label = 'Node Type: {} {}\n'.format(NodeType.str(node.type), node.node_id)
if node.expression:
label += '\nEXPRESSION:\n{}\n'.format(node.expression)
if node.irs:
label += '\nIRs:\n' + '\n'.join([str(ir) for ir in node.irs])
f.write('{}[label="{}"];\n'.format(node.node_id, label))
for son in node.sons:
f.write('{}->{};\n'.format(node.node_id, son.node_id))
f.write("}\n")
|
Export the function to a dot file
Args:
filename (str)
|
juraj-google-style
|
def evaluate_layout(self, layout):
layout_dict = {}
if layout:
for pair in layout.split(';'):
(mtf_dimension_name, mesh_dimension_name) = pair.split(':', 1)
if (mtf_dimension_name in self._layout_validator.splittable_mtf_dimension_names):
layout_dict[mtf_dimension_name] = mesh_dimension_name
else:
logging.warning('Skipping unsplittable dimension %s.', mtf_dimension_name)
tensor_memory = {}
for tensor_name in self._graph.get_all_tensor_names():
if self._graph.is_tensor_on_canonical_device(tensor_name):
tensor_memory[tensor_name] = self._graph.get_tensor_size(tensor_name, layout_dict, self._layout_validator.mesh_dimension_name_to_size)
else:
tensor_memory[tensor_name] = 0.0
peak_memory_usage = 0.0
for tensor_names in self._get_memory_contents():
memory_usage = 0.0
for tensor_name in tensor_names:
memory_usage += tensor_memory[tensor_name]
peak_memory_usage = max(peak_memory_usage, memory_usage)
return peak_memory_usage
|
The current objective value for the given layout.
TODO(joshuawang): The current function does not check that the given
layout is valid.
Args:
layout: a string, representing a layout to evaluate (e.g.
"d_ff:m1;heads:m2").
Returns:
A float, the objective value.
|
codesearchnet
|
def ProcessMessage(self, message):
cert = rdf_crypto.Certificate(message.payload)
queue = self.well_known_session_id.Queue()
client_id = message.source
try:
enrolment_cache.Get(client_id)
return
except KeyError:
enrolment_cache.Put(client_id, 1)
if data_store.AFF4Enabled():
client = aff4.FACTORY.Create(client_id, aff4_grr.VFSGRRClient, mode='rw', token=self.token)
client_cert = client.Get(client.Schema.CERT)
if data_store.RelationalDBEnabled():
try:
md = data_store.REL_DB.ReadClientMetadata(client_id.Basename())
client_cert = md.certificate
except db.UnknownClientError:
client_cert = None
if data_store.RelationalDBEnabled():
data_store.REL_DB.WriteClientMetadata(client_id.Basename(), fleetspeak_enabled=False)
if (not client_cert):
flow.StartAFF4Flow(client_id=client_id, flow_name=CAEnroler.__name__, csr=cert, queue=queue, token=self.token)
|
Begins an enrollment flow for this client.
Args:
message: The Certificate sent by the client. Note that this message is
not authenticated.
|
codesearchnet
|
def __init__(self, source: Any, tag: str, stacktrace: Optional[bool]=None, stacklimit: Optional[int]=None, stacktop: int=-1):
if not isinstance(tag, str):
raise ValueError(f'`tag` must be a string. Encountered: {tag!r}.')
self._source = source
self._tag = tag
self._stack = None
self._stacktrace = None
if stacktrace is None:
stacktrace = flags.is_tracking_origin()
if stacklimit is None:
stacklimit = flags.get_origin_stacktrace_limit()
if stacktrace:
self._stack = traceback.extract_stack(limit=stacklimit - stacktop)
if stacktop < 0:
self._stack = self._stack[:stacktop]
|
Constructor.
Args:
source: Source value for the origin.
tag: A descriptive tag of the origin. Built-in tags are:
'__init__', 'clone', 'deepclone', 'return'. Users can manually
call `sym_setorigin` with custom tag value.
stacktrace: If True, enable stack trace for the origin. If None, enable
stack trace if `pg.tracek_origin()` is called. Otherwise stack trace is
disabled.
stacklimit: An optional integer to limit the stack depth. If None, it's
determined by the value passed to `pg.set_origin_stacktrace_limit`,
which is 10 by default.
stacktop: A negative integer to indicate the stack top among the stack
frames that we want to present to user, by default it's 2-level up from
the stack within current `sym_setorigin` call.
|
github-repos
|
def register_domain(self, domain=0, tokenizer=None, trie=None):
self.domains[domain] = IntentDeterminationEngine(
tokenizer=tokenizer, trie=trie)
|
Register a domain with the intent engine.
Args:
tokenizer(tokenizer): The tokenizer you wish to use.
trie(Trie): the Trie() you wish to use.
domain(str): a string representing the domain you wish to add
|
juraj-google-style
|
def checksum(self, url):
_, path = self._parse_url(url)
file_checksum = self._hdfs_client.checksum(path)
return '%s-%d-%s' % (file_checksum[_FILE_CHECKSUM_ALGORITHM], file_checksum[_FILE_CHECKSUM_LENGTH], file_checksum[_FILE_CHECKSUM_BYTES])
|
Fetches a checksum description for a URL.
Returns:
String describing the checksum.
Raises:
``BeamIOError``: if url doesn't exist.
|
github-repos
|
def merge_checkpoint(input_graph,
checkpoint,
output_node_names,
output_graph,
sess):
restore_op_name = "save/restore_all"
filename_tensor_name = "save/Const:0"
input_graph_def = graph_pb2.GraphDef()
with gfile.FastGFile(input_graph, "r") as f:
text_format.Merge(f.read().decode("utf-8"), input_graph_def)
for node in input_graph_def.node:
node.device = ""
importer.import_graph_def(input_graph_def, name="")
sess.run([restore_op_name], {filename_tensor_name: checkpoint})
output_graph_def = graph_util.convert_variables_to_constants(
sess,
input_graph_def,
output_node_names,
variable_names_blacklist=""
)
with gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
|
Get the variable values from the checkpoint file, and merge them to the GraphDef file
Args:
input_graph: the GraphDef file, doesn't contain variable values
checkpoint: the checkpoint file
output_node_names: A list of string, the output names
output_graph: String of the location and the name of the
output graph
|
juraj-google-style
|
def DirnamePath(self, path):
if path.endswith(self.PATH_SEPARATOR):
path = path[:-1]
if not path:
return None
dirname, _, _ = path.rpartition(self.PATH_SEPARATOR)
return dirname
|
Determines the directory name of the path.
The file system root is represented by an empty string.
Args:
path (str): path.
Returns:
str: directory name of the path or None.
|
juraj-google-style
|
def dict_hist(item_list, weight_list=None, ordered=False, labels=None):
if (labels is None):
hist_ = defaultdict(int)
else:
hist_ = {k: 0 for k in labels}
if (weight_list is None):
for item in item_list:
hist_[item] += 1
else:
for (item, weight) in zip(item_list, weight_list):
hist_[item] += weight
if ordered:
getval = op.itemgetter(1)
key_order = [key for (key, value) in sorted(hist_.items(), key=getval)]
hist_ = order_dict_by(hist_, key_order)
return hist_
|
r"""
Builds a histogram of items in item_list
Args:
item_list (list): list with hashable items (usually containing duplicates)
Returns:
dict : dictionary where the keys are items in item_list, and the values
are the number of times the item appears in item_list.
CommandLine:
python -m utool.util_dict --test-dict_hist
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> item_list = [1, 2, 39, 900, 1232, 900, 1232, 2, 2, 2, 900]
>>> hist_ = dict_hist(item_list)
>>> result = ut.repr2(hist_)
>>> print(result)
{1: 1, 2: 4, 39: 1, 900: 3, 1232: 2}
|
codesearchnet
|
def validate(self, value, model_instance):
if (not isinstance(value, base.StateWrapper)):
raise exceptions.ValidationError((self.error_messages['wrong_type'] % value))
elif (not (value.workflow == self.workflow)):
raise exceptions.ValidationError((self.error_messages['wrong_workflow'] % value.workflow))
elif (value.state not in self.workflow.states):
raise exceptions.ValidationError((self.error_messages['invalid_state'] % value.state))
|
Validate that a given value is a valid option for a given model instance.
Args:
value (xworkflows.base.StateWrapper): The base.StateWrapper returned by to_python.
model_instance: A WorkflowEnabled instance
|
codesearchnet
|
def defocus_blur(x, severity=1):
c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][(severity - 1)]
x = (np.array(x) / 255.0)
kernel = disk(radius=c[0], alias_blur=c[1])
channels = []
for d in range(3):
channels.append(tfds.core.lazy_imports.cv2.filter2D(x[(:, :, d)], (- 1), kernel))
channels = np.array(channels).transpose((1, 2, 0))
x_clip = (np.clip(channels, 0, 1) * 255)
return around_and_astype(x_clip)
|
Defocus blurring to images.
Apply defocus blurring to images using Gaussian kernel.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied defocus blur.
|
codesearchnet
|
def callEventWaitAndGetRpc(self, callback_id, event_name, timeout_sec):
|
Calls snippet lib's RPC to wait for a callback event.
Override this method to use this class with various snippet lib
implementations.
This function waits and gets a CallbackEvent with the specified identifier
from the server. It will raise a timeout error if the expected event does
not occur within the time limit.
Args:
callback_id: str, the callback identifier.
event_name: str, the callback name.
timeout_sec: float, the number of seconds to wait for the event. It is
already checked that this argument is no longer than the max timeout
of a single RPC.
Returns:
The event dictionary.
Raises:
errors.CallbackHandlerTimeoutError: Raised if the expected event does not
occur within the time limit.
|
github-repos
|
def document(self, document_id, **kwargs):
baseuri = '{}document/{}/content'.format(self._DOCUMENT_URI,
document_id)
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res
|
Requests for a document by the document id.
Normally the response.content can be saved as a pdf file
Args:
document_id (str): The id of the document retrieved.
kwargs (dict): additional keywords passed into
requests.session.get *params* keyword.
|
juraj-google-style
|
def save(self, filename=None, directory=None):
if filename is not None:
self.filename = filename
if directory is not None:
self.directory = directory
filepath = self.filepath
tools.mkdirs(filepath)
data = text_type(self.source)
with io.open(filepath, 'w', encoding=self.encoding) as fd:
fd.write(data)
if not data.endswith(u'\n'):
fd.write(u'\n')
return filepath
|
Save the DOT source to file. Ensure the file ends with a newline.
Args:
filename: Filename for saving the source (defaults to ``name`` + ``'.gv'``)
directory: (Sub)directory for source saving and rendering.
Returns:
The (possibly relative) path of the saved source file.
|
juraj-google-style
|
def _assertOpOutputMatchesExpected(self, op, axis, output_type, op_input, expected):
with self.session() as session:
with self.test_scope():
pinp = array_ops.placeholder(dtypes.as_dtype(op_input.dtype), op_input.shape, name='a')
output = op(pinp, axis=axis, output_type=output_type)
result = session.run(output, {pinp: op_input})
self.assertAllEqual(result, expected)
|
Verifies that 'op' produces 'expected' when fed input 'op_input' .
Args:
op: argmin or argmax operator to test.
axis: integer axis to reduce across.
output_type: numpy datatype of the output to produce.
op_input: numpy input array to use as input to 'op'.
expected: numpy array representing the expected output of 'op'.
|
github-repos
|
def converted_function_names(self):
if self._converted_function_names is None:
parsed_names = []
for name in self.functions:
elements = name.rsplit('_', 1)
if len(elements) == 2 and elements[1].isnumeric():
parsed_names.append((int(elements[1]), elements[0], name))
else:
parsed_names.append((-1, name, name))
self._converted_function_names = {name: '{}_frozen_{}'.format(base_name, ops.uid()) for _, base_name, name in sorted(parsed_names)}
return self._converted_function_names
|
Map from original to new function names.
In order to avoid conflicts (two functions with the same name, one converted
and one not), we need to change the name of every converted function to
something that is hopefully unique.
Returns:
Map from original to new suggested function names.
|
github-repos
|
def _map_graph_network(inputs, outputs):
nodes_in_decreasing_depth, layer_indices = _build_map(outputs)
network_nodes = {_make_node_key(node.layer.name, node.layer._inbound_nodes.index(node)) for node in nodes_in_decreasing_depth}
nodes_depths = {}
layers_depths = {}
for node in reversed(nodes_in_decreasing_depth):
depth = nodes_depths.setdefault(node, 0)
previous_depth = layers_depths.get(node.layer, 0)
depth = max(depth, previous_depth)
layers_depths[node.layer] = depth
nodes_depths[node] = depth
for node_dep in node.parent_nodes:
previous_depth = nodes_depths.get(node_dep, 0)
nodes_depths[node_dep] = max(depth + 1, previous_depth)
for input_t in inputs:
input_layer = input_t._keras_history[0]
if input_layer not in layers_depths:
layers_depths[input_layer] = 0
layer_indices[input_layer] = -1
nodes_depths[input_layer._inbound_nodes[0]] = 0
network_nodes.add(_make_node_key(input_layer.name, 0))
nodes_by_depth = collections.defaultdict(list)
for node, depth in nodes_depths.items():
nodes_by_depth[depth].append(node)
layers_by_depth = collections.defaultdict(list)
for layer, depth in layers_depths.items():
layers_by_depth[depth].append(layer)
depth_keys = list(layers_by_depth.keys())
depth_keys.sort(reverse=True)
layers = []
for depth in depth_keys:
layers_for_depth = layers_by_depth[depth]
layers_for_depth.sort(key=lambda x: layer_indices[x])
layers.extend(layers_for_depth)
depth_keys = list(nodes_by_depth.keys())
depth_keys.sort(reverse=True)
computable_tensors = set()
for x in inputs:
computable_tensors.add(id(x))
layers_with_complete_input = []
for depth in depth_keys:
for node in nodes_by_depth[depth]:
layer = node.layer
if layer and (not node.is_input):
for x in nest.flatten(node.keras_inputs):
if id(x) not in computable_tensors:
raise ValueError('Graph disconnected: cannot obtain value for tensor ' + str(x) + ' at layer "' + layer.name + '". The following previous layers were accessed without issue: ' + str(layers_with_complete_input))
for x in nest.flatten(node.outputs):
computable_tensors.add(id(x))
layers_with_complete_input.append(layer.name)
all_names = [layer.name for layer in layers]
for name in all_names:
if all_names.count(name) != 1:
raise ValueError('The name "' + name + '" is used ' + str(all_names.count(name)) + ' times in the model. All layer names should be unique.')
return (network_nodes, nodes_by_depth, layers, layers_by_depth)
|
Validates a network's topology and gather its layers and nodes.
Args:
inputs: List of input tensors.
outputs: List of outputs tensors.
Returns:
A tuple `(nodes, nodes_by_depth, layers, layers_by_depth)`.
- nodes: list of Node instances.
- nodes_by_depth: dict mapping ints (depth) to lists of node instances.
- layers: list of Layer instances.
- layers_by_depth: dict mapping ints (depth) to lists of layer instances.
Raises:
ValueError: In case the network is not valid (e.g. disconnected graph).
|
github-repos
|
def get_trans(self) -> torch.Tensor:
return self._trans
|
Getter for the translation.
Returns:
The stored translation
|
github-repos
|
def get_clusters(self, variant_id):
query = {'variant_id':variant_id}
identities = self.db.identity.find(query)
return identities
|
Search what clusters a variant belongs to
Args:
variant_id(str): From ID column in vcf
Returns:
clusters()
|
juraj-google-style
|
def resolve_widget(self, field):
if hasattr(field, 'field'):
widget = field.field.widget
else:
widget = field.widget
return widget
|
Given a Field or BoundField, return widget instance.
Todo:
Raise an exception if given field object does not have a
widget.
Arguments:
field (Field or BoundField): A field instance.
Returns:
django.forms.widgets.Widget: Retrieved widget from given field.
|
juraj-google-style
|
def attach_socket(self, container, params=None, ws=False):
if (params is None):
params = {'stdout': 1, 'stderr': 1, 'stream': 1}
if (('detachKeys' not in params) and ('detachKeys' in self._general_configs)):
params['detachKeys'] = self._general_configs['detachKeys']
if ws:
return self._attach_websocket(container, params)
headers = {'Connection': 'Upgrade', 'Upgrade': 'tcp'}
u = self._url('/containers/{0}/attach', container)
return self._get_raw_response_socket(self.post(u, None, params=self._attach_params(params), stream=True, headers=headers))
|
Like ``attach``, but returns the underlying socket-like object for the
HTTP request.
Args:
container (str): The container to attach to.
params (dict): Dictionary of request parameters (e.g. ``stdout``,
``stderr``, ``stream``).
For ``detachKeys``, ~/.docker/config.json is used by default.
ws (bool): Use websockets instead of raw HTTP.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
codesearchnet
|
def vqt(input_qhbm: qhbm.QHBM, target_hamiltonian: Union[tf.Tensor, hamiltonian.Hamiltonian], beta: tf.Tensor):
def f_vqt(bitstrings):
h_expectations = tf.squeeze(input_qhbm.q_inference.expectation(bitstrings, target_hamiltonian), 1)
beta_h_expectations = beta * h_expectations
energies = tf.stop_gradient(input_qhbm.e_inference.energy(bitstrings))
return beta_h_expectations - energies
average_expectation = input_qhbm.e_inference.expectation(f_vqt)
current_partition = tf.stop_gradient(input_qhbm.e_inference.log_partition())
return average_expectation - current_partition
|
Computes the VQT loss of a given QHBM and Hamiltonian.
This function is differentiable within a `tf.GradientTape` scope.
Args:
input_qhbm: Inference methods for the model.
target_hamiltonian: The Hamiltonian whose thermal state is to be learned. If
it is a `tf.Tensor`, it is of type `tf.string` with shape [1], result of
calling `tfq.convert_to_tensor` on a list of `cirq.PauliSum`, `[op]`.
Otherwise, a Hamiltonian.
beta: A scalar `tf.Tensor` which is the inverse temperature at which the
loss is calculated.
Returns:
The VQT loss.
|
github-repos
|
def quad_genz_keister_22 ( order ):
order = sorted(GENZ_KEISTER_22.keys())[order]
abscissas, weights = GENZ_KEISTER_22[order]
abscissas = numpy.array(abscissas)
weights = numpy.array(weights)
weights /= numpy.sum(weights)
abscissas *= numpy.sqrt(2)
return abscissas, weights
|
Hermite Genz-Keister 22 rule.
Args:
order (int):
The quadrature order. Must be in the interval (0, 8).
Returns:
(:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]):
Abscissas and weights
Examples:
>>> abscissas, weights = quad_genz_keister_22(1)
>>> print(numpy.around(abscissas, 4))
[-1.7321 0. 1.7321]
>>> print(numpy.around(weights, 4))
[0.1667 0.6667 0.1667]
|
juraj-google-style
|
def swo_supported_speeds(self, cpu_speed, num_speeds=3):
buf_size = num_speeds
buf = (ctypes.c_uint32 * buf_size)()
res = self._dll.JLINKARM_SWO_GetCompatibleSpeeds(cpu_speed, 0, buf, buf_size)
if (res < 0):
raise errors.JLinkException(res)
return list(buf)[:res]
|
Retrives a list of SWO speeds supported by both the target and the
connected J-Link.
The supported speeds are returned in order from highest to lowest.
Args:
self (JLink): the ``JLink`` instance
cpu_speed (int): the target's CPU speed in Hz
num_speeds (int): the number of compatible speeds to return
Returns:
A list of compatible SWO speeds in Hz in order from highest to lowest.
|
codesearchnet
|
def _open_usb_handle(serial_number=None, **kwargs):
init_dependent_flags()
remote_usb = conf.remote_usb
if remote_usb:
if (remote_usb.strip() == 'ethersync'):
device = conf.ethersync
try:
mac_addr = device['mac_addr']
port = device['plug_port']
except (KeyError, TypeError):
raise ValueError('Ethersync needs mac_addr and plug_port to be set')
else:
ethersync = cambrionix.EtherSync(mac_addr)
serial_number = ethersync.get_usb_serial(port)
return local_usb.LibUsbHandle.open(serial_number=serial_number, **kwargs)
|
Open a UsbHandle subclass, based on configuration.
If configuration 'remote_usb' is set, use it to connect to remote usb,
otherwise attempt to connect locally.'remote_usb' is set to usb type,
EtherSync or other.
Example of Cambrionix unit in config:
remote_usb: ethersync
ethersync:
mac_addr: 78:a5:04:ca:91:66
plug_port: 5
Args:
serial_number: Optional serial number to connect to.
**kwargs: Arguments to pass to respective handle's Open() method.
Returns:
Instance of UsbHandle.
|
codesearchnet
|
def transformer_encoder_attention_unit(x, hparams, encoder_self_attention_bias, attention_dropout_broadcast_dims, save_weights_to=None, make_image_summary=True):
with tf.variable_scope('self_attention'):
y = common_attention.multihead_attention(common_layers.layer_preprocess(x, hparams), None, encoder_self_attention_bias, (hparams.attention_key_channels or hparams.hidden_size), (hparams.attention_value_channels or hparams.hidden_size), hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.self_attention_type, save_weights_to=save_weights_to, max_relative_position=hparams.max_relative_position, make_image_summary=make_image_summary, dropout_broadcast_dims=attention_dropout_broadcast_dims, hard_attention_k=hparams.hard_attention_k)
x = common_layers.layer_postprocess(x, y, hparams)
return x
|
Applies multihead attention function which is parametrised for encoding.
Args:
x: input
hparams: model hyper-parameters
encoder_self_attention_bias: a bias tensor for use in encoder self-attention
attention_dropout_broadcast_dims: Fpr noise broadcasting in the dropout
layers to save memory during training
save_weights_to: an optional dictionary to capture attention weights for
visualization; the weights tensor will be appended there under a string
key created from the variable scope (including name).
make_image_summary: Whether to make an attention image summary.
Returns:
the output tensor
|
codesearchnet
|
def delete_contexts(self, context_id_list):
for c_id in context_id_list:
if (c_id in self._contexts):
del self._contexts[c_id]
|
Delete contexts from the ContextManager.
Args:
context_id_list (list): a list of context ids
Returns:
None
|
codesearchnet
|
def client(self, service_name, version, component, **kw):
service = _create_service_api(self._credentials, service_name, version, kw.get('developer_key'), kw.get('cache_discovery', False), (self._http or _build_http()))
return ServiceClient(gcp_service=service, component=component, credentials=self._credentials, rate_limiter=self._rate_limiter, use_cached_http=self._use_cached_http, http=self._http)
|
Safely initialize a repository class to a property.
Args:
repository_class (class): The class to initialize.
version (str): The gcp service version for the repository.
Returns:
object: An instance of repository_class.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.