code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def jwt_is_expired(self, access_token=None, leeway=0):
if (access_token is not None):
exp = self._decode_exp(access_token)
else:
exp = self.jwt_exp
now = time()
if (exp < (now - leeway)):
return True
return False
|
Validate JWT access token expiration.
Args:
access_token (str): Access token to validate. Defaults to ``None``.
leeway (float): Time in seconds to adjust for local clock skew. Defaults to 0.
Returns:
bool: ``True`` if expired, otherwise ``False``.
|
codesearchnet
|
def _DropCommonSuffixes(filename):
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc', 'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and (len(filename) > len(suffix)) and (filename[((- len(suffix)) - 1)] in ('-', '_'))):
return filename[:((- len(suffix)) - 1)]
return os.path.splitext(filename)[0]
|
Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
|
codesearchnet
|
def __edit_distance_alt(self, words):
words = [x.lower() for x in words]
return [e2 for e1 in words for e2 in self.edit_distance_1(e1)]
|
Compute all strings that are 1 edits away from all the words using
only the letters in the corpus
Args:
words (list): The words for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided words
|
juraj-google-style
|
def fit(self, x, y):
train = np.vstack((np.array([self.featurize_row(row.iloc[0],
row.iloc[1]) for idx, row in x.iterrows()]),
np.array([self.featurize_row(row.iloc[1],
row.iloc[0]) for idx, row in x.iterrows()])))
labels = np.vstack((y, -y)).ravel()
verbose = 1 if self.verbose else 0
self.clf = CLF(verbose=verbose,
min_samples_leaf=self.L,
n_estimators=self.E,
max_depth=self.max_depth,
n_jobs=self.n_jobs).fit(train, labels)
|
Train the model.
Args:
x_tr (pd.DataFrame): CEPC format dataframe containing the pairs
y_tr (pd.DataFrame or np.ndarray): labels associated to the pairs
|
juraj-google-style
|
def compilable_sources(self, sourcedir, absolute=False, recursive=True, excludes=[]):
filepaths = []
for (root, dirs, files) in os.walk(sourcedir):
dirs.sort()
files.sort()
for item in files:
relative_dir = os.path.relpath(root, sourcedir)
if (relative_dir == '.'):
relative_dir = ''
absolute_filepath = os.path.join(root, item)
conditions = {'sourcedir': sourcedir, 'nopartial': True, 'exclude_patterns': excludes, 'excluded_libdirs': []}
if self.match_conditions(absolute_filepath, **conditions):
relative_filepath = os.path.join(relative_dir, item)
if absolute:
filepath = absolute_filepath
else:
filepath = relative_filepath
filepaths.append(filepath)
if (not recursive):
break
return filepaths
|
Find all scss sources that should be compiled, aka all sources that
are not "partials" Sass sources.
Args:
sourcedir (str): Directory path to scan.
Keyword Arguments:
absolute (bool): Returned paths will be absolute using
``sourcedir`` argument (if True), else return relative paths.
recursive (bool): Switch to enabled recursive finding (if True).
Default to True.
excludes (list): A list of excluding patterns (glob patterns).
Patterns are matched against the relative filepath (from its
sourcedir).
Returns:
list: List of source paths.
|
codesearchnet
|
def to_python_package(classes, target_folder, parent_package=None, indent=DEFAULT_INDENT):
PackageBuilder(target_folder, parent_package, indent).from_classes_with_refs(classes)
|
This function can be used to build a python package representation of pyschema classes.
One module is created per namespace in a package matching the namespace hierarchy.
Args:
classes: A collection of classes to build the package from
target_folder: Root folder of the package
parent_package: Prepended on all import statements in order to support absolute imports.
parent_package is not used when building the package file structure
indent: Indent level. Defaults to 4 spaces
|
codesearchnet
|
def get_equivalent_atoms(self, tolerance=0.3):
PA = self._get_point_group_analyzer(tolerance=tolerance)
eq = PA.get_equivalent_atoms()
self._convert_eq(eq)
return eq
|
Returns sets of equivalent atoms with symmetry operations
Args:
tolerance (float): Tolerance to generate the full set of symmetry
operations.
Returns:
dict: The returned dictionary has two possible keys:
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``.
|
codesearchnet
|
def create_threads(self, sess, coord=None, daemon=False, start=False):
with self._lock:
try:
if self._runs_per_session[sess] > 0:
return []
except KeyError:
pass
self._runs_per_session[sess] = len(self._enqueue_ops)
self._exceptions_raised = []
ret_threads = []
for op in self._enqueue_ops:
name = 'QueueRunnerThread-{}-{}'.format(self.name, op.name)
ret_threads.append(threading.Thread(target=self._run, args=(sess, op, coord), name=name))
if coord:
name = 'QueueRunnerThread-{}-close_on_stop'.format(self.name)
ret_threads.append(threading.Thread(target=self._close_on_stop, args=(sess, self._cancel_op, coord), name=name))
for t in ret_threads:
if coord:
coord.register_thread(t)
if daemon:
t.daemon = True
if start:
t.start()
return ret_threads
|
Create threads to run the enqueue ops for the given session.
This method requires a session in which the graph was launched. It creates
a list of threads, optionally starting them. There is one thread for each
op passed in `enqueue_ops`.
The `coord` argument is an optional coordinator that the threads will use
to terminate together and report exceptions. If a coordinator is given,
this method starts an additional thread to close the queue when the
coordinator requests a stop.
If previously created threads for the given session are still running, no
new threads will be created.
Args:
sess: A `Session`.
coord: Optional `Coordinator` object for reporting errors and checking
stop conditions.
daemon: Boolean. If `True` make the threads daemon threads.
start: Boolean. If `True` starts the threads. If `False` the
caller must call the `start()` method of the returned threads.
Returns:
A list of threads.
|
github-repos
|
def rand_ascii_str(length):
letters = [random.choice(ascii_letters_and_digits) for _ in range(length)]
return ''.join(letters)
|
Generates a random string of specified length, composed of ascii letters
and digits.
Args:
length: The number of characters in the string.
Returns:
The random string generated.
|
codesearchnet
|
def __init__(self, wrapped_list):
self._non_append_mutation_value = False
self._external_modification_value = False
super().__init__(wrapped_list)
self._last_wrapped_list_snapshot = list(self._storage)
|
Construct a new list wrapper.
Args:
wrapped_list: The initial value of the data structure. A shallow copy may
be maintained for error checking. `wrapped_list` itself should not be
modified directly after constructing the `ListWrapper`, and if changes
are detected the `ListWrapper` will throw an exception on save.
|
github-repos
|
async def stop_tasks(self, address):
tasks = self._tasks.get(address, [])
for task in tasks:
task.cancel()
asyncio.gather(*tasks, return_exceptions=True)
self._tasks[address] = []
|
Clear all tasks pertaining to a tile.
This coroutine will synchronously cancel all running tasks that were
attached to the given tile and wait for them to stop before returning.
Args:
address (int): The address of the tile we should stop.
|
juraj-google-style
|
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
vision_data = {}
if image_sizes is not None:
images_kwargs = LlavaProcessorKwargs._defaults.get('images_kwargs', {})
images_kwargs.update(kwargs)
crop_size = images_kwargs.get('crop_size', None) or self.image_processor.crop_size
resized_height, resized_width = (crop_size['height'], crop_size['width'])
num_image_tokens = resized_height
num_image_tokens += self.num_additional_image_tokens
if self.vision_feature_select_strategy == 'default':
num_image_tokens -= 1
num_image_tokens = [num_image_tokens] * len(image_sizes)
num_image_patches = [1] * len(image_sizes)
vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches})
return MultiModalData(**vision_data)
|
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`List[List[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
|
github-repos
|
def __init__(self, name=None):
rr = gen_io_ops.identity_reader_v2(name=name)
super(IdentityReader, self).__init__(rr, supports_serialize=True)
|
Create a IdentityReader.
Args:
name: A name for the operation (optional).
|
github-repos
|
def to_frame(self, **kwargs):
df = export.write_dataframe(self._values, **kwargs)
df.name = self.title
return df
|
r"""Return a pandas DataFrame loaded from the worksheet data.
Args:
\**kwargs: passed to ``pandas.read_csv()`` (e.g. ``header``, ``index_col``)
Returns:
pandas.DataFrame: new ``DataFrame`` instance
|
codesearchnet
|
def to_pandas(self):
df = self.data.to_pandas(is_transposed=self._is_transposed)
if df.empty:
if (len(self.columns) != 0):
df = pandas.DataFrame(columns=self.columns).astype(self.dtypes)
else:
df = pandas.DataFrame(columns=self.columns, index=self.index)
else:
ErrorMessage.catch_bugs_and_request_email(((len(df.index) != len(self.index)) or (len(df.columns) != len(self.columns))))
df.index = self.index
df.columns = self.columns
return df
|
Converts Modin DataFrame to Pandas DataFrame.
Returns:
Pandas DataFrame of the DataManager.
|
codesearchnet
|
def write_info_file(tensorboard_info):
payload = ('%s\n' % _info_to_string(tensorboard_info))
with open(_get_info_file_path(), 'w') as outfile:
outfile.write(payload)
|
Write TensorBoardInfo to the current process's info file.
This should be called by `main` once the server is ready. When the
server shuts down, `remove_info_file` should be called.
Args:
tensorboard_info: A valid `TensorBoardInfo` object.
Raises:
ValueError: If any field on `info` is not of the correct type.
|
codesearchnet
|
def __init__(self, dump):
self._dump = dump
self._cached_tensor_values = {}
|
Constructor of ExpressionEvaluator.
Args:
dump: an instance of `DebugDumpDir`.
|
github-repos
|
def RunJob(self, job):
if (not job.leased_until):
raise LockError('CronJob must be leased for Run() to be called.')
if (job.leased_until < rdfvalue.RDFDatetime.Now()):
raise LockError(('CronJob lease expired for %s.' % job.cron_job_id))
logging.info('Starting cron job: %s', job.cron_job_id)
if (job.args.action_type == job.args.ActionType.SYSTEM_CRON_ACTION):
cls_name = job.args.system_cron_action.job_class_name
job_cls = registry.SystemCronJobRegistry.CronJobClassByName(cls_name)
name = ('%s runner' % cls_name)
elif (job.args.action_type == job.args.ActionType.HUNT_CRON_ACTION):
job_cls = registry.CronJobRegistry.CronJobClassByName('RunHunt')
name = 'Hunt runner'
else:
raise ValueError(("CronJob %s doesn't have a valid args type set." % job.cron_job_id))
run_state = rdf_cronjobs.CronJobRun(cron_job_id=job.cron_job_id, status='RUNNING')
run_state.GenerateRunId()
run_obj = job_cls(run_state, job)
(wait_for_start_event, signal_event, wait_for_write_event) = (threading.Event(), threading.Event(), threading.Event())
try:
self._GetThreadPool().AddTask(target=run_obj.StartRun, args=(wait_for_start_event, signal_event, wait_for_write_event), name=name, blocking=False, inline=False)
if (not wait_for_start_event.wait(TASK_STARTUP_WAIT)):
logging.error('Cron job run task for %s is too slow to start.', job.cron_job_id)
return False
signal_event.set()
wait_for_write_event.wait(TASK_STARTUP_WAIT)
return True
except threadpool.Full:
return False
|
Does the actual work of the Cron, if the job is due to run.
Args:
job: The cronjob rdfvalue that should be run. Must be leased.
Returns:
A boolean indicating if this cron job was started or not. False may
be returned when the threadpool is already full.
Raises:
LockError: if the object is not locked.
ValueError: If the job argument is invalid.
|
codesearchnet
|
def sync_trial_info(self, job_path, expr_dir_name):
expr_name = expr_dir_name[(- 8):]
expr_path = os.path.join(job_path, expr_dir_name)
if (expr_name not in self._monitored_trials):
self._create_trial_info(expr_path)
self._monitored_trials.add(expr_name)
else:
self._update_trial_info(expr_path)
|
Load information of the trial from the given experiment directory.
Create or update the trial information, together with the trial
meta file.
Args:
job_path(str)
expr_dir_name(str)
|
codesearchnet
|
def connect(self, timeout=600):
if self.socket:
raise TensorForceError("Already connected to {}:{}. Only one connection allowed at a time. " +
"Close first by calling `close`!".format(self.host, self.port))
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if timeout < 5 or timeout is None:
timeout = 5
err = 0
start_time = time.time()
while time.time() - start_time < timeout:
self.socket.settimeout(5)
err = self.socket.connect_ex((self.host, self.port))
if err == 0:
break
time.sleep(1)
if err != 0:
raise TensorForceError("Error when trying to connect to {}:{}: errno={} errcode='{}' '{}'".
format(self.host, self.port, err, errno.errorcode[err], os.strerror(err)))
|
Starts the server tcp connection on the given host:port.
Args:
timeout (int): The time (in seconds) for which we will attempt a connection to the remote
(every 5sec). After that (or if timeout is None or 0), an error is raised.
|
juraj-google-style
|
def get_source_event_declaration(self, event):
return next((x.source_mapping for x in self.events if (x.name == event)))
|
Return the source mapping where the event is declared
Args:
event (str): event name
Returns:
(dict): sourceMapping
|
codesearchnet
|
def drag_and_drop(self, source_selector, destination_selector, **kwargs):
self.info_log(('Drag and drop: source (%s); destination (%s)' % (source_selector, destination_selector)))
use_javascript_dnd = kwargs.get('use_javascript_dnd', 'proxy_driver:use_javascript_dnd')
source_el = self.find(source_selector)
destination_el = self.find(destination_selector)
if use_javascript_dnd:
try:
dnd_script = ["function simulate(f,c,d,e){var b,a=null;for(b in eventMatchers)if(eventMatchers[b].test(c)){a=b;break}if(!a)return!1;document.createEvent?(b=document.createEvent(a),a=='HTMLEvents'?b.initEvent(c,!0,!0):b.initMouseEvent(c,!0,!0,document.defaultView,0,d,e,d,e,!1,!1,!1,!1,0,null),f.dispatchEvent(b)):(a=document.createEventObject(),a.detail=0,a.screenX=d,a.screenY=e,a.clientX=d,a.clientY=e,a.ctrlKey=!1,a.altKey=!1,a.shiftKey=!1,a.metaKey=!1,a.button=1,f.fireEvent('on'+c,a));return!0} var eventMatchers={HTMLEvents:/^(?:load|unload|abort|error|select|change|submit|reset|focus|blur|resize|scroll)$/,MouseEvents:/^(?:click|dblclick|mouse(?:down|up|over|move|out))$/};", 'var source = arguments[0],destination = arguments[1];', "simulate(source, 'mousedown', 0, 0);", "simulate(source, 'mousemove', destination.offsetLeft, destination.offsetTop);", "simulate(source, 'mouseup', destination.offsetLeft, destination.offsetTop);"]
self._driver.execute_script('\n'.join(dnd_script), source_el._element, destination_el._element)
except Exception as e:
self.error_log((u'drag_and_drop exception: %s' % str(e)))
raise
else:
try:
ActionChains(self._driver).drag_and_drop(source_el, destination_el).perform()
except Exception as e:
self.error_log((u'drag_and_drop exception: %s' % str(e)))
raise
|
Drag and drop
Args:
source_selector: (str)
destination_selector: (str)
Kwargs:
use_javascript_dnd: bool; default:
config proxy_driver:use_javascript_dnd
|
codesearchnet
|
def multiplicative_jitter(x, epsilon=0.01):
if (epsilon == 0):
return x
return (x * mtf.random_uniform(x.mesh, x.shape, minval=(1.0 - epsilon), maxval=(1.0 + epsilon), dtype=x.dtype))
|
Multiply values by a random number between 1-epsilon and 1+epsilon.
Makes models more resilient to rounding errors introduced by bfloat16.
This seems particularly important for logits.
Args:
x: a mtf.Tensor
epsilon: a floating point value
Returns:
a mtf.Tensor with the same type and shape as x.
|
codesearchnet
|
def delete_by_file(self, file_obj):
BalancedDiscStorage._check_interface(file_obj)
file_hash = self._get_hash(file_obj)
return self.delete_by_hash(file_hash)
|
Remove file from the storage. File is identified by opened `file_obj`,
from which the hashes / path are computed.
Args:
file_obj (file): Opened file-like object, which is used to compute
hashes.
Raises:
IOError: If the `file_obj` is not in storage.
|
juraj-google-style
|
def pull(self, arm_id, success, failure):
self.__beta_dist_dict[arm_id].observe(success, failure)
|
Pull arms.
Args:
arm_id: Arms master id.
success: The number of success.
failure: The number of failure.
|
juraj-google-style
|
def consume_input(self, mystr, stack=[], state=1, curchar=0, depth=0):
mystrsplit = mystr.split(' ')
if self.s[state].type == 1:
stack.append(self.s[state].sym)
if len(self.s[state].trans) > 0:
state = self.s[state].trans[0]
if self.parse(
mystr,
stack=stack,
state=state,
curchar=curchar,
depth=depth + 1) == 1:
return True
return False
if self.s[state].type == 2:
if len(stack) == 0:
return False
sym = stack.pop()
for key in self.s[state].trans:
if sym in self.s[state].trans[key]:
if self.parse(
mystr,
stack=stack,
state=key,
curchar=curchar,
depth=depth + 1) == 1:
return True
return False
if self.s[state].type == 3:
for key in self.s[state].trans:
if mystrsplit[curchar] in self.s[state].trans[key]:
if curchar + 1 == len(mystrsplit) \
and 'closing' in self.s[key].trans:
return True
elif curchar + 1 == len(mystrsplit):
return False
if self.parse(
mystr,
stack=stack,
state=key,
curchar=curchar + 1,
depth=depth + 1) == 1:
return True
return False
|
Consumes an input and validates if it is accepted
Args:
mystr (str): the input string to be consumes
stack (list): the stack of symbols
state (int): the current state of the PDA
curchar (int): the index of the consumed character
depth (int): the depth of the function call in the stack
Returns:
bool: A value indicating the correct or erroneous execution
|
juraj-google-style
|
def __init__(self, log_dir, testbed_name):
self._log_dir = log_dir
self._testbed_name = testbed_name
self.results = records.TestResult()
self._test_run_infos = []
self._test_run_metadata = TestRunner._TestRunMetaData(log_dir, testbed_name)
|
Constructor for TestRunner.
Args:
log_dir: string, root folder where to write logs
testbed_name: string, name of the testbed to run tests on
|
github-repos
|
def parse_fields_whois(self, response):
try:
temp = response.split('|')
ret = {'asn_registry': temp[4].strip(' \n')}
if (ret['asn_registry'] not in self.rir_whois.keys()):
raise ASNRegistryError('ASN registry {0} is not known.'.format(ret['asn_registry']))
ret['asn'] = temp[0].strip(' \n')
ret['asn_cidr'] = temp[2].strip(' \n')
ret['asn_country_code'] = temp[3].strip(' \n').upper()
ret['asn_date'] = temp[5].strip(' \n')
ret['asn_description'] = temp[6].strip(' \n')
except ASNRegistryError:
raise
except Exception as e:
raise ASNParseError('Parsing failed for "{0}" with exception: {1}.'.format(response, e)[:100])
return ret
|
The function for parsing ASN fields from a whois response.
Args:
response (:obj:`str`): The response from the ASN whois server.
Returns:
dict: The ASN lookup results
::
{
'asn' (str) - The Autonomous System Number
'asn_date' (str) - The ASN Allocation date
'asn_registry' (str) - The assigned ASN registry
'asn_cidr' (str) - The assigned ASN CIDR
'asn_country_code' (str) - The assigned ASN country code
'asn_description' (str) - The ASN description
}
Raises:
ASNRegistryError: The ASN registry is not known.
ASNParseError: ASN parsing failed.
|
codesearchnet
|
def events_filter(self, topics: List[str]=None, from_block: BlockSpecification=None, to_block: BlockSpecification=None) -> StatelessFilter:
return self.client.new_filter(self.address, topics=topics, from_block=from_block, to_block=to_block)
|
Install a new filter for an array of topics emitted by the contract.
Args:
topics: A list of event ids to filter for. Can also be None,
in which case all events are queried.
from_block: The block number at which to start looking for events.
to_block: The block number at which to stop looking for events.
Return:
Filter: The filter instance.
|
codesearchnet
|
def get_propagator(name):
from .sgp4 import Sgp4
from .sgp4beta import Sgp4Beta
scope = locals().copy()
scope.update(globals())
if name not in scope:
raise UnknownPropagatorError(name)
return scope[name]
|
Retrieve a named propagator
Args:
name (str): Name of the desired propagator
Return:
Propagator class
|
juraj-google-style
|
def refl(scatterer, h_pol=True):
return scatterer.wavelength**4/(np.pi**5*scatterer.Kw_sqr) * \
radar_xsect(scatterer, h_pol)
|
Reflectivity (with number concentration N=1) for the current setup.
Args:
scatterer: a Scatterer instance.
h_pol: If True (default), use horizontal polarization.
If False, use vertical polarization.
Returns:
The reflectivity.
NOTE: To compute reflectivity in dBZ, give the particle diameter and
wavelength in [mm], then take 10*log10(Zi).
|
juraj-google-style
|
def make_trace_api(client):
generated = trace_service_client.TraceServiceClient(credentials=client._credentials, client_info=_CLIENT_INFO)
return _TraceAPI(generated, client)
|
Create an instance of the gapic Trace API.
Args:
client (~google.cloud.trace.client.Client): The client that holds
configuration details.
Returns:
A :class:`~google.cloud.trace._gapic._TraceAPI` instance with the
proper configurations.
|
codesearchnet
|
def empty(shape, dtype=None, **kwargs):
data = np.empty(shape, dtype)
return dc.array(data, **kwargs)
|
Create an array of given shape and type, without initializing entries.
Args:
shape (sequence of ints): 2D shape of the array.
dtype (data-type, optional): Desired data-type for the array.
kwargs (optional): Other arguments of the array (*coords, attrs, and name).
Returns:
array (decode.array): Decode array without initializing entries.
|
juraj-google-style
|
def timestamp(method='iso8601'):
if (method == 'iso8601'):
tz_hour = (time.timezone
utc_offset = (str(tz_hour) if (tz_hour < 0) else ('+' + str(tz_hour)))
stamp = (time.strftime('%Y-%m-%dT%H%M%S') + utc_offset)
return stamp
else:
raise ValueError('only iso8601 is accepted for now')
|
make an iso8601 timestamp
Args:
method (str): type of timestamp
Example:
>>> stamp = timestamp()
>>> print('stamp = {!r}'.format(stamp))
stamp = ...-...-...T...
|
codesearchnet
|
def reindex(self):
_map = dict(zip(self.micro_indices, reindex(self.micro_indices)))
partition = tuple((tuple((_map[index] for index in group)) for group in self.partition))
return CoarseGrain(partition, self.grouping)
|
Re-index this coarse graining to use squeezed indices.
The output grouping is translated to use indices ``0..n``, where ``n``
is the number of micro indices in the coarse-graining. Re-indexing does
not effect the state grouping, which is already index-independent.
Returns:
CoarseGrain: A new |CoarseGrain| object, indexed from ``0..n``.
Example:
>>> partition = ((1, 2),)
>>> grouping = (((0,), (1, 2)),)
>>> coarse_grain = CoarseGrain(partition, grouping)
>>> coarse_grain.reindex()
CoarseGrain(partition=((0, 1),), grouping=(((0,), (1, 2)),))
|
codesearchnet
|
def GetSubkeyByName(self, name):
if not self._registry_key and self._registry:
self._GetKeyFromRegistry()
return self._subkeys.get(name.upper(), None)
|
Retrieves a subkey by name.
Args:
name (str): name of the subkey.
Returns:
WinRegistryKey: Windows Registry subkey or None if not found.
|
juraj-google-style
|
def select_sites( self, site_labels ):
if type( site_labels ) in ( list, set ):
selected_sites = [ s for s in self.sites if s.label in site_labels ]
elif type( site_labels ) is str:
selected_sites = [ s for s in self.sites if s.label is site_labels ]
else:
raise ValueError( str( site_labels ) )
return selected_sites
|
Selects sites in the lattice with specified labels.
Args:
site_labels (List(Str)|Set(Str)|Str): Labels of sites to select.
This can be a List [ 'A', 'B' ], a Set ( 'A', 'B' ), or a String 'A'.
Returns:
(List(Site)): List of sites with labels given by `site_labels`.
|
juraj-google-style
|
def get_holodeck_path():
if (('HOLODECKPATH' in os.environ) and (os.environ['HOLODECKPATH'] != '')):
return os.environ['HOLODECKPATH']
if (os.name == 'posix'):
return os.path.expanduser('~/.local/share/holodeck')
elif (os.name == 'nt'):
return os.path.expanduser('~\\AppData\\Local\\holodeck')
else:
raise NotImplementedError('holodeck is only supported for Linux and Windows')
|
Gets the path of the holodeck environment
Returns:
(str): path to the current holodeck environment
|
codesearchnet
|
def compress_dir(path, compression="gz"):
for parent, subdirs, files in os.walk(path):
for f in files:
compress_file(os.path.join(parent, f), compression=compression)
|
Recursively compresses all files in a directory. Note that this
compresses all files singly, i.e., it does not create a tar archive. For
that, just use Python tarfile class.
Args:
path (str): Path to parent directory.
compression (str): A compression mode. Valid options are "gz" or
"bz2". Defaults to gz.
|
juraj-google-style
|
def authenticate_direct_bind(self, username, password):
bind_user = '{rdn}={username},{user_search_dn}'.format(rdn=self.config.get('LDAP_USER_RDN_ATTR'), username=username, user_search_dn=self.full_user_search_dn)
connection = self._make_connection(bind_user=bind_user, bind_password=password)
response = AuthenticationResponse()
try:
connection.bind()
log.debug("Authentication was successful for user '{0}'".format(username))
response.status = AuthenticationResponseStatus.success
user_info = self.get_user_info(dn=bind_user, _connection=connection)
response.user_dn = bind_user
response.user_id = username
response.user_info = user_info
if self.config.get('LDAP_SEARCH_FOR_GROUPS'):
response.user_groups = self.get_user_groups(dn=bind_user, _connection=connection)
except ldap3.core.exceptions.LDAPInvalidCredentialsResult:
log.debug("Authentication was not successful for user '{0}'".format(username))
response.status = AuthenticationResponseStatus.fail
except Exception as e:
log.error(e)
response.status = AuthenticationResponseStatus.fail
self.destroy_connection(connection)
return response
|
Performs a direct bind. We can do this since the RDN is the same
as the login attribute. Hence we just string together a dn to find
this user with.
Args:
username (str): Username of the user to bind (the field specified
as LDAP_BIND_RDN_ATTR)
password (str): User's password to bind with.
Returns:
AuthenticationResponse
|
codesearchnet
|
def get_special_tokens_mask(self, token_ids_0: List, token_ids_1: Optional[List]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.')
return [1 if token in self.all_special_ids else 0 for token in token_ids_0]
mask = [1] + [0] * len(token_ids_0) + [1]
if token_ids_1 is not None:
mask += [0] * len(token_ids_1) + [1]
return mask
|
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
Args:
token_ids_0 (`List[int]`):
List of ids of the first sequence.
token_ids_1 (`List[int]`, *optional*):
List of ids of the second sequence.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
github-repos
|
def GetRequestXML(self, method, *args):
self.suds_client.set_options(nosend=True)
service_request = (getattr(self, method))(*args).envelope
self.suds_client.set_options(nosend=False)
return lxml.etree.fromstring(service_request)
|
Get the raw SOAP XML for a request.
Args:
method: The method name.
*args: A list of arguments to be passed to the method.
Returns:
An element containing the raw XML that would be sent as the request.
|
juraj-google-style
|
def download_write_file(self, metadata, out_dir=None):
fileName = metadata['name']
path = os.path.join(out_dir or wandb_dir(), fileName)
if self.file_current(fileName, metadata['md5']):
return path, None
size, response = self.download_file(metadata['url'])
with open(path, "wb") as file:
for data in response.iter_content(chunk_size=1024):
file.write(data)
return path, response
|
Download a file from a run and write it to wandb/
Args:
metadata (obj): The metadata object for the file to download. Comes from Api.download_urls().
Returns:
A tuple of the file's local path and the streaming response. The streaming response is None if the file already existed and was up to date.
|
juraj-google-style
|
def Webhook(self, request, global_params=None):
config = self.GetMethodConfig('Webhook')
return self._RunMethod(config, request, global_params=global_params)
|
ReceiveTriggerWebhook [Experimental] is called when the API receives a webhook request targeted at a specific trigger.
Args:
request: (CloudbuildProjectsTriggersWebhookRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ReceiveTriggerWebhookResponse) The response message.
|
github-repos
|
def set_document_type(loader_cls: Type, type_: Type) -> None:
loader_cls.document_type = type_
if (not hasattr(loader_cls, '_registered_classes')):
loader_cls._registered_classes = dict()
|
Set the type corresponding to the whole document.
Args:
loader_cls: The loader class to set the document type for.
type_: The type to loader should process the document into.
|
codesearchnet
|
def validate_document(self, definition):
initial_document = {}
try:
initial_document = Loader.load(definition)
except RuntimeError as exception:
self.logger.error(str(exception))
sys.exit(1)
document = Validator().validate(initial_document)
if (document is None):
self.logger.info("Schema validation for '%s' has failed", definition)
sys.exit(1)
self.logger.info("Schema validation for '%s' succeeded", definition)
return document
|
Validate given pipeline document.
The method is trying to load, parse and validate the spline document.
The validator verifies the Python structure B{not} the file format.
Args:
definition (str): path and filename of a yaml file containing a valid spline definition.
Returns:
dict: loaded and validated spline document.
Note:
if validation fails the application does exit!
See Also:
spline.validation.Validator
|
codesearchnet
|
def get_class(class_key):
if (class_key not in CLASSES):
for basecls in (MediaMetadata, MediaCollection):
if class_key.startswith(basecls.__name__):
class_name = ('MS' + class_key.replace(basecls.__name__, ''))
if (sys.version_info[0] == 2):
class_name = class_name.encode('ascii')
CLASSES[class_key] = type(class_name, (basecls,), {})
_LOG.info('Class %s created', CLASSES[class_key])
return CLASSES[class_key]
|
Form a music service data structure class from the class key
Args:
class_key (str): A concatenation of the base class (e.g. MediaMetadata)
and the class name
Returns:
class: Subclass of MusicServiceItem
|
codesearchnet
|
def path(self, goal):
if (goal == self.name):
return [self]
if (goal not in self.routes):
raise ValueError("Unknown '{0}'".format(goal))
obj = self
path = [obj]
while True:
obj = obj.routes[goal].direction
path.append(obj)
if (obj.name == goal):
break
return path
|
Get the shortest way between two nodes of the graph
Args:
goal (str): Name of the targeted node
Return:
list of Node
|
codesearchnet
|
def notify(self, notices):
issues_html = get_template('unattached_ebs_volume.html')
issues_text = get_template('unattached_ebs_volume.txt')
for recipient, issues in list(notices.items()):
if issues:
message_html = issues_html.render(issues=issues)
message_text = issues_text.render(issues=issues)
send_notification(
subsystem=self.name,
recipients=[recipient],
subject=self.subject,
body_html=message_html,
body_text=message_text
)
|
Send notifications to the users via. the provided methods
Args:
notices (:obj:`dict` of `str`: `dict`): List of the notifications to send
Returns:
`None`
|
juraj-google-style
|
def _convert_to_compatible_tensor(value, target, error_prefix):
try:
tensor = tf_v1.convert_to_tensor_or_indexed_slices(value, target.dtype)
except TypeError as e:
raise TypeError("%s: %s" % (error_prefix, e))
if _is_sparse(tensor) != _is_sparse(target):
if _is_sparse(tensor):
raise TypeError("%s: Is sparse. Expected dense." % error_prefix)
else:
raise TypeError("%s: Is dense. Expected sparse." % error_prefix)
if not tensor.get_shape().is_compatible_with(target.get_shape()):
raise TypeError("%s: Shape %r is incompatible with %r" %
(error_prefix, tensor.get_shape(), target.get_shape()))
return tensor
|
Converts `value` into a tensor that can be feed into `tensor_info`.
Args:
value: A value to convert into Tensor or SparseTensor.
target: An object returned by `parse_tensor_info_map`.
error_prefix: A string to prefix on raised TypeErrors.
Raises:
TypeError: If it fails to convert.
Returns:
A Tensor or SparseTensor compatible with tensor_info.
|
juraj-google-style
|
def _setup(self, delete=True):
if delete:
self.clear()
with nn.context_scope(self.ctx):
outputs = self.func(
*(self.inputs_f + self.func_args), **self.func_kwargs)
if not hasattr(outputs, '__iter__'):
self.outputs = [outputs]
else:
self.outputs = outputs
self.func_ins = self.outputs[0].parent
self.inputs = self.func_ins.inputs
|
Create a function instance and execute setup.
Args:
delete (bool): Delete buffered variables.
|
juraj-google-style
|
def run(self, input_dir, output_file_path):
logging.info('Running defense %s', self.submission_id)
tmp_run_dir = self.temp_copy_extracted_submission()
output_dir = os.path.dirname(output_file_path)
output_filename = os.path.basename(output_file_path)
cmd = ['--network=none', '-m=24g', '--cpus=3.75', '-v', '{0}:/input_images:ro'.format(input_dir), '-v', '{0}:/output_data'.format(output_dir), '-v', '{0}:/code'.format(tmp_run_dir), '-w', '/code', self.container_name, ('./' + self.entry_point), '/input_images', ('/output_data/' + output_filename)]
elapsed_time_sec = self.run_with_time_limit(cmd)
sudo_remove_dirtree(tmp_run_dir)
return elapsed_time_sec
|
Runs defense inside Docker.
Args:
input_dir: directory with input (adversarial images).
output_file_path: path of the output file.
Returns:
how long it took to run submission in seconds
|
codesearchnet
|
def save_index(self, filename):
data = {}
for f in self.files.values():
entities = {v.entity.id: v.value for k, v in f.tags.items()}
data[f.path] = {'domains': f.domains, 'entities': entities}
with open(filename, 'w') as outfile:
json.dump(data, outfile)
|
Save the current Layout's index to a .json file.
Args:
filename (str): Filename to write to.
Note: At the moment, this won't serialize directory-specific config
files. This means reconstructed indexes will only work properly in
cases where there aren't multiple layout specs within a project.
|
juraj-google-style
|
def _convert_int(self, value):
try:
return int(value)
except:
return None
|
Converts a value into a integer.
Args:
value: String representation of a field from the Bulkdozer feed.
Returns:
If possible to convert value into an integer, returns the integer
representation, otherwise None.
|
github-repos
|
def _maybe_expand_labels(labels, predictions):
with ops.name_scope(None, 'expand_labels', (labels, predictions)) as scope:
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
if isinstance(labels, sparse_tensor.SparseTensor):
return cond.cond(math_ops.equal(array_ops.rank(predictions), array_ops.size(labels.dense_shape) + 1), lambda: sparse_ops.sparse_reshape(labels, shape=array_ops.concat((labels.dense_shape, (1,)), 0), name=scope), lambda: labels)
labels_rank = labels.get_shape().ndims
if labels_rank is not None:
predictions_rank = predictions.get_shape().ndims
if predictions_rank is not None:
if predictions_rank == labels_rank:
return labels
if predictions_rank == labels_rank + 1:
return array_ops.expand_dims(labels, -1, name=scope)
raise ValueError(f'Unexpected labels shape {labels.get_shape()} for predictions shape {predictions.get_shape()}. Predictions rank should be the same rank as labels rank or labels rank plus one .')
return cond.cond(math_ops.equal(array_ops.rank(predictions), array_ops.rank(labels) + 1), lambda: array_ops.expand_dims(labels, -1, name=scope), lambda: labels)
|
If necessary, expand `labels` along last dimension to match `predictions`.
Args:
labels: `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN]. The latter implies
num_labels=1, in which case the result is an expanded `labels` with shape
[D1, ... DN, 1].
predictions: `Tensor` with shape [D1, ... DN, num_classes].
Returns:
`labels` with the same rank as `predictions`.
Raises:
ValueError: if `labels` has invalid shape.
|
github-repos
|
def export(self, remote_function):
if (self._worker.mode is None):
self._functions_to_export.append(remote_function)
return
if (self._worker.mode != ray.worker.SCRIPT_MODE):
return
self._do_export(remote_function)
|
Export a remote function.
Args:
remote_function: the RemoteFunction object.
|
codesearchnet
|
def _activation_summary(x):
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
|
Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
|
juraj-google-style
|
def _find_dependencies(self, dataset_key, **dfilter):
try:
node = self.getitem(dataset_key)
LOG.trace("Found exact dataset already loaded: {}".format(node.name))
return node, set()
except KeyError:
LOG.trace("Exact dataset {} isn't loaded, will try reader...".format(dataset_key))
try:
node = self._find_reader_dataset(dataset_key, **dfilter)
except TooManyResults:
LOG.warning("Too many possible datasets to load for {}".format(dataset_key))
return None, set([dataset_key])
if node is not None:
LOG.trace("Found reader provided dataset:\n\tRequested: {}\n\tFound: {}".format(dataset_key, node.name))
return node, set()
LOG.trace("Could not find dataset in reader: {}".format(dataset_key))
try:
node = self[dataset_key]
LOG.trace("Composite already loaded:\n\tRequested: {}\n\tFound: {}".format(dataset_key, node.name))
return node, set()
except KeyError:
LOG.trace("Composite hasn't been loaded yet, will load: {}".format(dataset_key))
try:
node, unknowns = self._find_compositor(dataset_key, **dfilter)
LOG.trace("Found composite:\n\tRequested: {}\n\tFound: {}".format(dataset_key, node and node.name))
except KeyError:
node = None
unknowns = set([dataset_key])
LOG.trace("Composite not found: {}".format(dataset_key))
return node, unknowns
|
Find the dependencies for *dataset_key*.
Args:
dataset_key (str, float, DatasetID): Dataset identifier to locate
and find any additional
dependencies for.
**dfilter (dict): Additional filter parameters. See
`satpy.readers.get_key` for more details.
|
juraj-google-style
|
def __parameter_descriptor(self, subfield_list):
descriptor = {}
final_subfield = subfield_list[(- 1)]
if all((subfield.required for subfield in subfield_list)):
descriptor['required'] = True
descriptor['type'] = self.__field_to_parameter_type(final_subfield)
default = self.__parameter_default(final_subfield)
if (default is not None):
descriptor['default'] = default
if any((subfield.repeated for subfield in subfield_list)):
descriptor['repeated'] = True
enum_descriptor = self.__parameter_enum(final_subfield)
if (enum_descriptor is not None):
descriptor['enum'] = enum_descriptor
return descriptor
|
Creates descriptor for a parameter using the subfields that define it.
Each parameter is defined by a list of fields, with all but the last being
a message field and the final being a simple (non-message) field.
Many of the fields in the descriptor are determined solely by the simple
field at the end, though some (such as repeated and required) take the whole
chain of fields into consideration.
Args:
subfield_list: List of fields describing the parameter.
Returns:
Dictionary containing a descriptor for the parameter described by the list
of fields.
|
codesearchnet
|
def find_files(paths, file_predicate):
file_list = []
for path in paths:
p = abs_path(path)
for (dirPath, _, fileList) in os.walk(p):
for fname in fileList:
(name, ext) = os.path.splitext(fname)
if file_predicate(name, ext):
file_list.append((dirPath, name, ext))
return file_list
|
Locate files whose names and extensions match the given predicate in
the specified directories.
Args:
paths: A list of directory paths where to find the files.
file_predicate: A function that returns True if the file name and
extension are desired.
Returns:
A list of files that match the predicate.
|
codesearchnet
|
def remove(self, path, dir_fd=None):
path = self._path_with_dir_fd(path, self.remove, dir_fd)
self.filesystem.remove(path)
|
Remove the FakeFile object at the specified file path.
Args:
path: Path to file to be removed.
dir_fd: If not `None`, the file descriptor of a directory,
with `path` being relative to this directory.
New in Python 3.3.
Raises:
OSError: if path points to a directory.
OSError: if path does not exist.
OSError: if removal failed.
|
juraj-google-style
|
def add_operator(self, operator):
if (not isinstance(operator, Operator)):
raise FiqlObjectException(('%s is not a valid element type' % operator.__class__))
if (not self._working_fragment.operator):
self._working_fragment.operator = operator
elif (operator > self._working_fragment.operator):
last_constraint = self._working_fragment.elements.pop()
self._working_fragment = self._working_fragment.create_nested_expression()
self._working_fragment.add_element(last_constraint)
self._working_fragment.add_operator(operator)
elif (operator < self._working_fragment.operator):
if self._working_fragment.parent:
return self._working_fragment.parent.add_operator(operator)
else:
return Expression().add_element(self._working_fragment).add_operator(operator)
return self
|
Add an ``Operator`` to the ``Expression``.
The ``Operator`` may result in a new ``Expression`` if an ``Operator``
already exists and is of a different precedence.
There are three possibilities when adding an ``Operator`` to an
``Expression`` depending on whether or not an ``Operator`` already
exists:
- No ``Operator`` on the working ``Expression``; Simply set the
``Operator`` and return ``self``.
- ``Operator`` already exists and is higher in precedence; The
``Operator`` and last ``Constraint`` belong in a sub-expression of
the working ``Expression``.
- ``Operator`` already exists and is lower in precedence; The
``Operator`` belongs to the parent of the working ``Expression``
whether one currently exists or not. To remain in the context of
the top ``Expression``, this method will return the parent here
rather than ``self``.
Args:
operator (Operator): What we are adding.
Returns:
Expression: ``self`` or related ``Expression``.
Raises:
FiqlObjectExpression: Operator is not a valid ``Operator``.
|
codesearchnet
|
def on_graph_def(self, graph_def, device_name, wall_time):
del wall_time
self._graph_defs[device_name] = graph_def
if (not self._graph_defs_arrive_first):
self._add_graph_def(device_name, graph_def)
self._incoming_channel.get()
|
Implementation of the GraphDef-carrying Event proto callback.
Args:
graph_def: A GraphDef proto. N.B.: The GraphDef is from
the core runtime of a debugged Session::Run() call, after graph
partition. Therefore it may differ from the GraphDef available to
the general TensorBoard. For example, the GraphDef in general
TensorBoard may get partitioned for multiple devices (CPUs and GPUs),
each of which will generate a GraphDef event proto sent to this
method.
device_name: Name of the device on which the graph was created.
wall_time: An epoch timestamp (in microseconds) for the graph.
|
codesearchnet
|
def get_signature(self, base_commit=None):
if base_commit is None:
base_commit = 'HEAD'
self.run('add', '-A', self.path)
sha = self.run('rev-parse', '--verify', base_commit).strip()
diff = self.run('diff', sha).strip()
if len(diff) == 0:
try:
return self.get_signature(base_commit + '~1')
except CommandError:
pass
h = hashlib.sha1()
h.update(sha)
h.update(diff)
return h.hexdigest()
|
Get the signature of the current state of the repository
TODO right now `get_signature` is an effectful process in that
it adds all untracked file to staging. This is the only way to get
accruate diff on new files. This is ok because we only use it on a
disposable copy of the repo.
Args:
base_commit - the base commit ('HEAD', sha, etc.)
Returns:
str
|
juraj-google-style
|
def CopyAFF4ToLocal(aff4_urn, target_dir, token=None, overwrite=False):
try:
fd = aff4.FACTORY.Open(aff4_urn, token=token)
filepath = os.path.join(target_dir, fd.urn.Path()[1:])
if isinstance(fd, standard.VFSDirectory):
try:
os.makedirs(filepath)
except OSError:
pass
return None
elif isinstance(fd, aff4.AFF4Stream):
if (not os.path.isfile(filepath)):
try:
os.makedirs(os.path.dirname(filepath))
except OSError:
pass
DownloadFile(fd, filepath)
elif ((os.stat(filepath)[stat.ST_SIZE] != fd.Get(fd.Schema.SIZE)) or overwrite):
DownloadFile(fd, filepath)
else:
logging.info('File %s exists, skipping', filepath)
return filepath
else:
raise ValueError(('Opened urn is neither a downloaded file nor a directory: %s' % aff4_urn))
except IOError as e:
logging.exception('Failed to read %s due to %s', aff4_urn, e)
raise
|
Copy an AFF4 object that supports a read interface to local filesystem.
Args:
aff4_urn: URN of thing to copy.
target_dir: Directory to copy the file to.
token: Auth token.
overwrite: If True overwrite the file if it exists.
Returns:
If aff4_urn points to a file, returns path to the downloaded file.
Otherwise returns None.
By default file will only be overwritten if file size differs.
|
codesearchnet
|
def noise_op(latents, hparams):
if ((hparams.latent_noise == 0) or (hparams.mode != tf.estimator.ModeKeys.TRAIN)):
return latents
latent_shape = common_layers.shape_list(latents)
return (latents + tf.random_normal(latent_shape, stddev=hparams.latent_noise))
|
Adds isotropic gaussian-noise to each latent.
Args:
latents: 4-D or 5-D tensor, shape=(NTHWC) or (NHWC).
hparams: HParams.
Returns:
latents: latents with isotropic gaussian noise appended.
|
codesearchnet
|
def S2_surface(self, sizes, bounds, presets, covers, use_torch=False, num_samples=10):
args = self.inputs
Si = self.sobol_analysis(num_samples, {'num_vars': len(args), 'names': args, 'bounds': [bounds[arg] for arg in args]}, covers)
S2 = Si['S2']
(s2_max, v1, v2) = get_max_s2_sensitivity(S2)
x_var = args[v1]
y_var = args[v2]
search_space = [(x_var, bounds[x_var]), (y_var, bounds[y_var])]
preset_vals = {arg: presets[arg] for (i, arg) in enumerate(args) if ((i != v1) and (i != v2))}
X = np.linspace(*search_space[0][1], sizes[0])
Y = np.linspace(*search_space[1][1], sizes[1])
if use_torch:
(Xm, Ym) = torch.meshgrid(torch.tensor(X), torch.tensor(Y))
inputs = {n: torch.full_like(Xm, v) for (n, v) in presets.items()}
inputs.update({search_space[0][0]: Xm, search_space[1][0]: Ym})
Z = self.run(inputs, covers).numpy()
else:
(Xm, Ym) = np.meshgrid(X, Y)
Z = np.zeros((len(X), len(Y)))
for (x, y) in itertools.product(range(len(X)), range(len(Y))):
inputs = {n: v for (n, v) in presets.items()}
inputs.update({search_space[0][0]: x, search_space[1][0]: y})
Z[x][y] = self.run(inputs, covers)
return (X, Y, Z, x_var, y_var)
|
Calculates the sensitivity surface of a GrFN for the two variables with
the highest S2 index.
Args:
num_samples: Number of samples for sensitivity analysis.
sizes: Tuple of (number of x inputs, number of y inputs).
bounds: Set of bounds for GrFN inputs.
presets: Set of standard values for GrFN inputs.
Returns:
Tuple:
Tuple: The names of the two variables that were selected
Tuple: The X, Y vectors of eval values
Z: The numpy matrix of output evaluations
|
codesearchnet
|
def ParseFileObject(self, parser_mediator, file_object):
if not self._encoding:
self._encoding = parser_mediator.codepage
try:
if not self._HasExpectedLineLength(file_object):
display_name = parser_mediator.GetDisplayName()
raise errors.UnableToParseFile((
'[{0:s}] Unable to parse DSV file: {1:s} with error: '
'unexpected line length.').format(self.NAME, display_name))
except UnicodeDecodeError as exception:
display_name = parser_mediator.GetDisplayName()
raise errors.UnableToParseFile(
'[{0:s}] Unable to parse DSV file: {1:s} with error: {2!s}.'.format(
self.NAME, display_name, exception))
try:
line_reader = self._CreateLineReader(file_object)
reader = self._CreateDictReader(line_reader)
row_offset = line_reader.tell()
row = next(reader)
except (StopIteration, csv.Error, UnicodeDecodeError) as exception:
display_name = parser_mediator.GetDisplayName()
raise errors.UnableToParseFile(
'[{0:s}] Unable to parse DSV file: {1:s} with error: {2!s}.'.format(
self.NAME, display_name, exception))
number_of_columns = len(self.COLUMNS)
number_of_records = len(row)
if number_of_records != number_of_columns:
display_name = parser_mediator.GetDisplayName()
raise errors.UnableToParseFile((
'[{0:s}] Unable to parse DSV file: {1:s}. Wrong number of '
'records (expected: {2:d}, got: {3:d})').format(
self.NAME, display_name, number_of_columns,
number_of_records))
for key, value in row.items():
if self._MAGIC_TEST_STRING in (key, value):
display_name = parser_mediator.GetDisplayName()
raise errors.UnableToParseFile((
'[{0:s}] Unable to parse DSV file: {1:s}. Signature '
'mismatch.').format(self.NAME, display_name))
row = self._ConvertRowToUnicode(parser_mediator, row)
if not self.VerifyRow(parser_mediator, row):
display_name = parser_mediator.GetDisplayName()
raise errors.UnableToParseFile((
'[{0:s}] Unable to parse DSV file: {1:s}. Verification '
'failed.').format(self.NAME, display_name))
self.ParseRow(parser_mediator, row_offset, row)
row_offset = line_reader.tell()
for row in reader:
if parser_mediator.abort:
break
row = self._ConvertRowToUnicode(parser_mediator, row)
self.ParseRow(parser_mediator, row_offset, row)
row_offset = line_reader.tell()
|
Parses a DSV text file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
|
juraj-google-style
|
def _try_guard_against_uninitialized_dependencies(name, initial_value):
if not isinstance(initial_value, tensor_lib.Tensor):
raise TypeError('initial_value needs to be a Tensor: %s' % initial_value)
if _has_cycle(initial_value.op, state={}):
return initial_value
return _safe_initial_value_from_tensor(name, initial_value, op_cache={})
|
Attempt to guard against dependencies on uninitialized variables.
Replace references to variables in `initial_value` with references to the
variable's initialized values. The initialized values are essentially
conditional TensorFlow graphs that return a variable's value if it is
initialized or its `initial_value` if it hasn't been initialized. This
replacement is done on a best effort basis:
- If the `initial_value` graph contains cycles, we don't do any
replacements for that graph.
- If the variables that `initial_value` depends on are not present in the
`GLOBAL_VARIABLES` or `LOCAL_VARIABLES` we don't replace them.
In these cases, it is up to the caller to ensure that the `initial_value`
graph uses initialized variables or that they guard access to variables
using their `initialized_value` method.
Args:
name: Variable name.
initial_value: `Tensor`. The initial value.
Returns:
A `Tensor` suitable to initialize a variable.
Raises:
TypeError: If `initial_value` is not a `Tensor`.
|
github-repos
|
def deprecated(replacement=None, message=None):
def wrap(old):
def wrapped(*args, **kwargs):
msg = "%s is deprecated" % old.__name__
if replacement is not None:
if isinstance(replacement, property):
r = replacement.fget
elif isinstance(replacement, (classmethod, staticmethod)):
r = replacement.__func__
else:
r = replacement
msg += "; use %s in %s instead." % (r.__name__, r.__module__)
if message is not None:
msg += "\n" + message
warnings.simplefilter('default')
warnings.warn(msg, DeprecationWarning, stacklevel=2)
return old(*args, **kwargs)
return wrapped
return wrap
|
Decorator to mark classes or functions as deprecated,
with a possible replacement.
Args:
replacement (callable): A replacement class or method.
message (str): A warning message to be displayed.
Returns:
Original function, but with a warning to use the updated class.
|
juraj-google-style
|
def create(cls, extension_name=None, extension_tag=None, extension_type=None):
extension_name = ExtensionName(extension_name)
extension_tag = ExtensionTag(extension_tag)
extension_type = ExtensionType(extension_type)
return ExtensionInformation(extension_name=extension_name, extension_tag=extension_tag, extension_type=extension_type)
|
Construct an ExtensionInformation object from provided extension
values.
Args:
extension_name (str): The name of the extension. Optional,
defaults to None.
extension_tag (int): The tag number of the extension. Optional,
defaults to None.
extension_type (int): The type index of the extension. Optional,
defaults to None.
Returns:
ExtensionInformation: The newly created set of extension
information.
Example:
>>> x = ExtensionInformation.create('extension', 1, 1)
>>> x.extension_name.value
ExtensionName(value='extension')
>>> x.extension_tag.value
ExtensionTag(value=1)
>>> x.extension_type.value
ExtensionType(value=1)
|
codesearchnet
|
def _match_against_protocol(self, left, other_type, subst, view):
if isinstance(left.cls, abstract.AMBIGUOUS_OR_EMPTY):
return subst
elif left.cls.is_dynamic:
return self._subst_with_type_parameters_from(subst, other_type)
elif other_type.full_name == 'typing.Sequence' and any((cls.full_name == 'typing.Mapping' for cls in left.cls.mro)):
return None
left_attributes = self._get_attribute_names(left)
missing = other_type.protocol_attributes - left_attributes
if missing:
self._protocol_error = error_types.ProtocolMissingAttributesError(left.cls, other_type, missing)
return None
key = (left.cls, other_type)
if key in self._protocol_cache:
return subst
self._protocol_cache.add(key)
new_substs = []
for attribute in other_type.protocol_attributes:
new_subst = self._match_protocol_attribute(left, other_type, attribute, subst, view)
if new_subst is None:
return None
new_substs.append(new_subst)
return self._merge_substs(subst, new_substs)
|
Checks whether a type is compatible with a protocol.
Args:
left: An instance of a type.
other_type: A protocol.
subst: The current type parameter assignment.
view: The current mapping of Variable to Value.
Returns:
A new type parameter assignment if the matching succeeded, None otherwise.
|
github-repos
|
def get_graph_element_name(elem):
return elem.name if hasattr(elem, 'name') else str(elem)
|
Obtain the name or string representation of a graph element.
If the graph element has the attribute "name", return name. Otherwise, return
a __str__ representation of the graph element. Certain graph elements, such as
`SparseTensor`s, do not have the attribute "name".
Args:
elem: The graph element in question.
Returns:
If the attribute 'name' is available, return the name. Otherwise, return
str(fetch).
|
github-repos
|
def reset(self, ms=0, halt=True):
self._dll.JLINKARM_SetResetDelay(ms)
res = self._dll.JLINKARM_Reset()
if (res < 0):
raise errors.JLinkException(res)
elif (not halt):
self._dll.JLINKARM_Go()
return res
|
Resets the target.
This method resets the target, and by default toggles the RESET and
TRST pins.
Args:
self (JLink): the ``JLink`` instance
ms (int): Amount of milliseconds to delay after reset (default: 0)
halt (bool): if the CPU should halt after reset (default: True)
Returns:
Number of bytes read.
|
codesearchnet
|
def dot(self, y, t=None, A=None, U=None, V=None, kernel=None, check_sorted=True):
if (kernel is None):
kernel = self.kernel
if (t is not None):
t = np.atleast_1d(t)
if (check_sorted and np.any((np.diff(t) < 0.0))):
raise ValueError('the input coordinates must be sorted')
if (check_sorted and (len(t.shape) > 1)):
raise ValueError('dimension mismatch')
A = (np.empty(0) if (A is None) else A)
U = (np.empty((0, 0)) if (U is None) else U)
V = (np.empty((0, 0)) if (V is None) else V)
else:
if (not self.computed):
raise RuntimeError("you must call 'compute' first")
t = self._t
A = self._A
U = self._U
V = self._V
(alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag) = kernel.coefficients
return self.solver.dot(kernel.jitter, alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag, A, U, V, t, np.ascontiguousarray(y, dtype=float))
|
Dot the covariance matrix into a vector or matrix
Compute ``K.y`` where ``K`` is the covariance matrix of the GP without
the white noise or ``yerr`` values on the diagonal.
Args:
y (array[n] or array[n, nrhs]): The vector or matrix ``y``
described above.
kernel (Optional[terms.Term]): A different kernel can optionally
be provided to compute the matrix ``K`` from a different
kernel than the ``kernel`` property on this object.
Returns:
array[n] or array[n, nrhs]: The dot product ``K.y`` as described
above. This will have the same shape as ``y``.
Raises:
ValueError: For mismatched dimensions.
|
codesearchnet
|
def ParseContainersTable(
self, parser_mediator, database=None, table=None, **unused_kwargs):
if database is None:
raise ValueError('Missing database value.')
if table is None:
raise ValueError('Missing table value.')
for esedb_record in table.records:
if parser_mediator.abort:
break
record_values = self._GetRecordValues(
parser_mediator, table.name, esedb_record)
event_data = MsieWebCacheContainersEventData()
event_data.container_identifier = record_values.get('ContainerId', None)
event_data.directory = record_values.get('Directory', None)
event_data.name = record_values.get('Name', None)
event_data.set_identifier = record_values.get('SetId', None)
timestamp = record_values.get('LastScavengeTime', None)
if timestamp:
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, 'Last Scavenge Time')
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = record_values.get('LastAccessTime', None)
if timestamp:
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)
parser_mediator.ProduceEventWithEventData(event, event_data)
container_identifier = record_values.get('ContainerId', None)
container_name = record_values.get('Name', None)
if not container_identifier or not container_name:
continue
table_name = 'Container_{0:d}'.format(container_identifier)
esedb_table = database.get_table_by_name(table_name)
if not esedb_table:
parser_mediator.ProduceExtractionWarning(
'Missing table: {0:s}'.format(table_name))
continue
self._ParseContainerTable(parser_mediator, esedb_table, container_name)
|
Parses the Containers table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
database (Optional[pyesedb.file]): ESE database.
table (Optional[pyesedb.table]): table.
Raises:
ValueError: if the database or table value is missing.
|
juraj-google-style
|
def unpack_byte(self, offset):
o = self._offset + offset
try:
return struct.unpack_from("<B", self._buf, o)[0]
except struct.error:
raise OverrunBufferException(o, len(self._buf))
|
Returns a little-endian unsigned byte from the relative offset.
Arguments:
- `offset`: The relative offset from the start of the block.
Throws:
- `OverrunBufferException`
|
juraj-google-style
|
def consolidate(self, args):
result = dict(args)
for opt in self:
if (opt.name in result):
result[opt.name] = opt.convert(result[opt.name])
elif (opt.default is not None):
result[opt.name] = opt.convert(opt.default)
return result
|
Consolidate the provided arguments.
If the provided arguments have matching options, this performs a type conversion.
For any option that has a default value and is not present in the provided
arguments, the default value is added.
Args:
args (dict): A dictionary of the provided arguments.
Returns:
dict: A dictionary with the type converted and with default options enriched
arguments.
|
codesearchnet
|
def prange(N=1, dim=1):
A = {}
r = numpy.arange(N, dtype=int)
key = numpy.zeros(dim, dtype=int)
for i in range(N):
key[(- 1)] = i
A[tuple(key)] = (1 * (r == i))
return Poly(A, dim, (N,), int)
|
Constructor to create a range of polynomials where the exponent vary.
Args:
N (int):
Number of polynomials in the array.
dim (int):
The dimension the polynomial should span.
Returns:
(Poly):
A polynomial array of length N containing simple polynomials with
increasing exponent.
Examples:
>>> print(prange(4))
[1, q0, q0^2, q0^3]
>>> print(prange(4, dim=3))
[1, q2, q2^2, q2^3]
|
codesearchnet
|
def resize_tensor_input(self, input_index, tensor_size, strict=False):
self._ensure_safe()
tensor_size = np.array(tensor_size, dtype=np.int32)
self._interpreter.ResizeInputTensor(input_index, tensor_size, strict)
|
Resizes an input tensor.
Args:
input_index: Tensor index of input to set. This value can be gotten from
the 'index' field in get_input_details.
tensor_size: The tensor_shape to resize the input to.
strict: Only unknown dimensions can be resized when `strict` is True.
Unknown dimensions are indicated as `-1` in the `shape_signature`
attribute of a given tensor. (default False)
Raises:
ValueError: If the interpreter could not resize the input tensor.
Usage:
```
interpreter = Interpreter(model_content=tflite_model)
interpreter.resize_tensor_input(0, [num_test_images, 224, 224, 3])
interpreter.allocate_tensors()
interpreter.set_tensor(0, test_images)
interpreter.invoke()
```
|
github-repos
|
def WriteGraphSeries(graph_series, label, token=None):
if data_store.RelationalDBEnabled():
data_store.REL_DB.WriteClientGraphSeries(graph_series, label)
if _ShouldUseLegacyDatastore():
aff4_attr = _GetAFF4AttributeForReportType(graph_series.report_type)()
if isinstance(aff4_attr, rdf_stats.GraphSeries):
for graph in graph_series.graphs:
aff4_attr.Append(graph)
elif isinstance(aff4_attr, rdf_stats.Graph):
for sample in graph_series.graphs[0]:
aff4_attr.Append(x_value=sample.x_value, y_value=sample.y_value)
else:
raise AFF4AttributeTypeError(aff4_attr.__class__)
with aff4.FACTORY.Create(GetAFF4ClientReportsURN().Add(label), aff4_type=aff4_stats.ClientFleetStats, mode='w', token=token) as stats_for_label:
stats_for_label.AddAttribute(aff4_attr)
|
Writes graph series for a particular client label to the DB.
Args:
graph_series: A series of rdf_stats.Graphs containing aggregated data for a
particular report-type.
label: Client label by which data in the graph_series was aggregated.
token: ACL token to use for writing to the legacy (non-relational)
datastore.
Raises:
AFF4AttributeTypeError: If, when writing to the legacy DB, an unexpected
report-data type is encountered.
|
codesearchnet
|
def unpack_inputs(func):
original_signature = inspect.signature(func)
@functools.wraps(func)
def run_call_with_unpacked_inputs(self, *args, **kwargs):
kwargs_call = {key: val for key, val in kwargs.items() if key not in dict(original_signature.parameters)}
fn_args_and_kwargs = {key: val for key, val in kwargs.items() if key not in kwargs_call}
fn_args_and_kwargs.update({'kwargs_call': kwargs_call})
fn_args_and_kwargs.update(dict(zip(func.__code__.co_varnames[1:], args)))
if 'EncoderDecoder' in self.__class__.__name__:
config = None
else:
config = self.config
unpacked_inputs = input_processing(func, config, **fn_args_and_kwargs)
return func(self, **unpacked_inputs)
run_call_with_unpacked_inputs.__signature__ = original_signature
return run_call_with_unpacked_inputs
|
Decorator that processes the inputs to a Keras layer, passing them to the layer as keyword arguments. This enables
downstream use of the inputs by their variable name, even if they arrive packed as a dictionary in the first input
(common case in Keras).
Args:
func (`callable`):
The callable function of the TensorFlow model.
Returns:
A callable that wraps the original `func` with the behavior described above.
|
github-repos
|
def _url_format(self, service):
base_service_url = '{base}{service}'.format(
base=self.urlbase,
service=service
)
return base_service_url
|
Generate URL from urlbase and service.
Args:
service (str): The endpoint service to use, i.e. gradebook
Returns:
str: URL to where the request should be made
|
juraj-google-style
|
def create_from_binary(cls, binary_view):
(attr_type, attr_len, non_resident, name_len, name_offset, flags, attr_id, content_len, content_offset, indexed_flag) = cls._REPR.unpack(binary_view[:cls._REPR.size])
if name_len:
name = binary_view[name_offset:(name_offset + (2 * name_len))].tobytes().decode('utf_16_le')
else:
name = None
nw_obj = cls((AttrTypes(attr_type), attr_len, bool(non_resident), AttrFlags(flags), attr_id, name), (content_len, content_offset, indexed_flag))
return nw_obj
|
Creates a new object AttributeHeader from a binary stream. The binary
stream can be represented by a byte string, bytearray or a memoryview of the
bytearray.
Args:
binary_view (memoryview of bytearray) - A binary stream with the
information of the attribute
Returns:
AttributeHeader: New object using hte binary stream as source
|
codesearchnet
|
def __init__(self, tcex, name, to, from_addr, subject, body, header, owner=None, **kwargs):
super(Email, self).__init__(tcex, 'emails', name, owner, **kwargs)
self.api_entity = 'email'
self._data['to'] = to or kwargs.get('to')
self._data['from'] = from_addr or kwargs.get('from_addr')
self._data['subject'] = subject or kwargs.get('subject')
self._data['body'] = body or kwargs.get('body')
self._data['header'] = header or kwargs.get('header')
self._data['score'] = kwargs.get('score', 0)
|
Initialize Class Properties.
Args:
name (str): The name for this Group.
subject (str): The subject for this Email.
header (str): The header for this Email.
body (str): The body for this Email.
date_added (str, kwargs): The date timestamp the Indicator was created.
from_addr (str, kwargs): The **from** address for this Email.
to (str, kwargs): The **to** address for this Email.
|
juraj-google-style
|
def register(self, task_json=None, json_filename=None):
if not task_json and not json_filename:
raise Exception("Both task json and filename can't be none.")
if task_json and json_filename:
raise Exception("Both task json and filename can't be provided.")
if json_filename:
task_json = json.load(open(json_filename, 'r'))
r = self.gbdx_connection.post(self._base_url, json=task_json)
raise_for_status(r)
return r.text
|
Registers a new GBDX task.
Args:
task_json (dict): Dictionary representing task definition.
json_filename (str): A full path of a file with json representing the task definition.
Only one out of task_json and json_filename should be provided.
Returns:
Response (str).
|
juraj-google-style
|
def AtMaximumDepth(self, search_depth):
if self._key_path_segments is not None:
if search_depth >= self._number_of_key_path_segments:
return True
return False
|
Determines if the find specification is at maximum depth.
Args:
search_depth (int): number of key path segments to compare.
Returns:
bool: True if at maximum depth, False if not.
|
juraj-google-style
|
def binomial_coefficient(n, k):
if ((not isinstance(k, int)) or (not isinstance(n, int))):
raise TypeError('Expecting positive integers')
if (k > n):
raise ValueError('k must be lower or equal than n')
if ((k < 0) or (n < 0)):
raise ValueError('Expecting positive integers')
return (factorial(n)
|
Calculate the binomial coefficient indexed by n and k.
Args:
n (int): positive integer
k (int): positive integer
Returns:
The binomial coefficient indexed by n and k
Raises:
TypeError: If either n or k is not an integer
ValueError: If either n or k is negative, or if k is strictly greater than n
|
codesearchnet
|
async def get_jsone_context_and_template(chain, parent_link, decision_link, tasks_for):
if (tasks_for == 'action'):
(jsone_context, tmpl) = (await get_action_context_and_template(chain, parent_link, decision_link))
else:
tmpl = (await get_in_tree_template(decision_link))
jsone_context = (await populate_jsone_context(chain, parent_link, decision_link, tasks_for))
return (jsone_context, tmpl)
|
Get the appropriate json-e context and template for any parent task.
Args:
chain (ChainOfTrust): the chain of trust.
parent_link (LinkOfTrust): the parent link to test.
decision_link (LinkOfTrust): the parent link's decision task link.
tasks_for (str): the reason the parent link was created (cron,
hg-push, action)
Returns:
(dict, dict): the json-e context and template.
|
codesearchnet
|
def topdown(cls):
return tuple(unique_everseen((r for r in cls._instances.values() if (r.direction == 'topdown'))))
|
Get all topdown `Relationship` instances.
Returns:
:obj:`generator`
Example:
>>> from pronto import Relationship
>>> for r in Relationship.topdown():
... print(r)
Relationship('can_be')
Relationship('has_part')
|
codesearchnet
|
def target_code_to_name(code):
TARGET_NAMES = {v: k for (k, v) in TARGET_CODES.items()}
return TARGET_NAMES[code]
|
Converts an int target code to a target name
Since self.TARGET_CODES is a 1:1 mapping, perform a reverse lookup
to get the more readable name.
Args:
code: Value from self.TARGET_CODES
Returns:
String target name corresponding to the given code.
|
codesearchnet
|
def cluster_info(cpu, cfg):
cpus = cpu.cpu_count
pods_per_core = cfg.doc.find('pods-per-core')
pods_per_core_int = (int(pods_per_core.value) if pods_per_core else PODS_PER_CORE)
cfg_max_pods = cfg.doc.find('max-pods')
cfg_max_pods_int = (int(cfg_max_pods.value) if cfg_max_pods else MAX_PODS)
calc_max_pods = (cpus * pods_per_core_int)
return {'cpu_count': cpus, 'pods_per_core': pods_per_core_int, 'pods_per_core_customized': bool(pods_per_core), 'max_pods': min(cfg_max_pods_int, calc_max_pods), 'max_pods_customized': bool(cfg_max_pods)}
|
Collects fact for each host
Collects the cpu and node configuration facts to be used by the rule.
Arguments:
cpu (CpuInfo): Parser object for the cpu info.
cfg (NodeConfig): Parser object for the node configuration.
Returns:
dict: Dictionary of fact information including the keys
``cpu_count``, ``pods_per_core_int``, ``pods_per_core_customized``,
``max_pods``, and ``max_pods_customized``.
|
codesearchnet
|
def decode_from_file(estimator, vocabulary, model_type, batch_size, sequence_length, checkpoint_path='', input_filename=gin.REQUIRED, output_filename=gin.REQUIRED, eos_id=1):
with tf.gfile.Open(input_filename) as f:
text = f.read()
records = text.split('\n')
inputs = [record.strip() for record in records]
if (not inputs[(- 1)]):
inputs.pop()
n = len(inputs)
all_input_ids = []
for line in inputs:
ids = inputs_vocabulary(vocabulary).encode(line.strip())
if (model_type != 'lm'):
ids += [eos_id]
if (len(ids) > sequence_length):
ids = ids[:sequence_length]
else:
ids.extend(([0] * (sequence_length - len(ids))))
all_input_ids.append(ids)
all_input_ids.extend(([all_input_ids[0]] * ((- n) % batch_size)))
padded_n = len(all_input_ids)
all_input_ids = np.array(all_input_ids, dtype=np.int32)
def input_fn(params):
del params
dataset = tf.data.Dataset.from_tensor_slices({'inputs': all_input_ids})
dataset = dataset.batch(batch_size, drop_remainder=True)
return dataset
result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path)
vocab_size = targets_vocabulary(vocabulary).vocab_size
decodes = []
for (i, result) in enumerate(result_iter):
output_ids = clean_decodes(list(result['outputs']), vocab_size)
output_string = targets_vocabulary(vocabulary).decode([int(x) for x in output_ids])
decodes.append(output_string)
if ((i & (i - 1)) == 0):
if (i < len(inputs)):
tf.logging.info(('decode %d input = %s' % (i, inputs[i])))
tf.logging.info((' output = %s' % output_string))
if (len(decodes) == padded_n):
tf.logging.info('number of decodes matches number of inputs')
elif ((len(decodes) % padded_n) == 0):
num_cores = (len(decodes)
tf.logging.info('output is repeated num_cores times - removing extras')
def keep(i):
return ((i % (batch_size * num_cores)) < batch_size)
decodes = [d for (i, d) in enumerate(decodes) if keep(i)]
else:
raise ValueError('unexpected number of outputs')
output_file = tf.gfile.Open(output_filename, 'w')
decodes = decodes[:n]
for d in decodes:
output_file.write(d)
output_file.write('\n')
output_file.close()
|
Decode from a text file.
Args:
estimator: a TPUEstimator
vocabulary: a mtf.transformer.vocabulary.Vocabulary
model_type: a string
batch_size: an integer
sequence_length: an integer (maximum decode length)
checkpoint_path: an optional string
input_filename: a string
output_filename: a string
eos_id: EOS id
|
codesearchnet
|
def multi_frontier_two_objective_reward(example):
int_val = int(example * 10)
if int_val >= 0 and int_val < 3:
return [int_val, 10 - int_val]
elif int_val >= 3 and int_val < 7:
return [int_val * 10, 100 - int_val * 10]
else:
return [int_val, 10 - int_val]
|
Reward for the trivial search space.
The reward (i.e. fitness) is a 2-element list. The goal of the search,
therefore, is to find the pareto frontier in
multi_frontier_two_objective_pareto function.
Args:
example: a materialized value.
Returns:
A 2-element list.
|
github-repos
|
def add_chunk(self, chunk: Union[message.Message, bytes], field_tags: util.FieldTypes, index=None) -> None:
if self._parent_splitter is not None:
self._parent_splitter.add_chunk(chunk, self._fields_in_parent + field_tags, index)
else:
assert self._chunks is not None
assert self._chunked_message is not None
field = self._chunked_message.chunked_fields.add(field_tag=util.get_field_tag(self._proto, field_tags))
new_chunk_index = len(self._chunks)
field.message.chunk_index = new_chunk_index
self._add_chunk_order.append(id(chunk))
if index is None:
self._chunks.append(chunk)
else:
self._chunks.insert(index, chunk)
self._fix_chunk_order = True
|
Adds a new chunk and updates the ChunkedMessage proto.
Args:
chunk: Proto message or bytes.
field_tags: Field information about the placement of the chunked data
within self._proto.
index: Optional index at which to insert the chunk. The chunk ordering is
important for merging.
|
github-repos
|
def acquire(self, constructor_fn: Callable[[], Any], tag: Any=None) -> Any:
with self._lock:
if self._ref is None or self._ref() is None or self._tag != tag:
result = constructor_fn()
if result is None:
return None
self._ref = weakref.ref(result)
self._tag = tag
else:
result = self._ref()
return result
|
Acquire a reference to the object this shared control block manages.
Args:
constructor_fn: function that initialises / constructs the object if not
present in the cache. This function should take no arguments. It should
return an initialised object, or None if the object could not be
initialised / constructed.
tag: an optional indentifier to store with the cached object. If
subsequent calls to acquire use different tags, the object will be
reloaded rather than returned from cache.
Returns:
An initialised object, either from a previous initialisation, or
newly-constructed.
|
github-repos
|
def activate_async(fn, _engine):
@coroutine
@functools.wraps(fn)
def wrapper(*args, **kw):
_engine.activate()
try:
if iscoroutinefunction(fn):
(yield from fn(*args, **kw))
else:
fn(*args, **kw)
finally:
_engine.disable()
return wrapper
|
Async version of activate decorator
Arguments:
fn (function): function that be wrapped by decorator.
_engine (Engine): pook engine instance
Returns:
function: decorator wrapper function.
|
codesearchnet
|
def create_game(self, map_name):
map_inst = maps.get(map_name)
map_data = map_inst.data(self._run_config)
if (map_name not in self._saved_maps):
for controller in self._controllers:
controller.save_map(map_inst.path, map_data)
self._saved_maps.add(map_name)
create = sc_pb.RequestCreateGame(local_map=sc_pb.LocalMap(map_path=map_inst.path), disable_fog=False)
for _ in range(self._num_agents):
create.player_setup.add(type=sc_pb.Participant)
self._controllers[0].create_game(create)
|
Create a game for the agents to join.
Args:
map_name: The map to use.
|
codesearchnet
|
def from_primitive(cls, primitive: message.Message, context: Context) -> 'PrimitiveWrapper':
result = cls(primitive, context)
result.validate_wrapped()
return result
|
Instantiates a new version of PrimitiveWrapper wrapping primitive.
Args:
primitive: The FHIR primitive message to wrap and validate.
context: Related primitive information to use for printing/parsing a
wrapped primitive.
Returns:
An instance of PrimitiveWrapper.
|
github-repos
|
def apply_sync(fn: StreamFn, content: Iterable[_T]) -> list[_T]:
async def run_with_context():
async with context.context():
as_async = streams.stream_content(content)
return await streams.gather_stream(fn(as_async))
return asyncio.run(run_with_context())
|
Applies a part function synchronously.
Args:
fn: the part function to apply to the content.
content: a collection of inputs/parts on which to apply the function.
Returns:
the content, with the function `fn` applied to each input/part.
|
github-repos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.