code
stringlengths 59
4.4k
| docstring
stringlengths 5
7.69k
|
|---|---|
def info(filepath):
info_dictionary = {
'channels': channels(filepath),
'sample_rate': sample_rate(filepath),
'bitrate': bitrate(filepath),
'duration': duration(filepath),
'num_samples': num_samples(filepath),
'encoding': encoding(filepath),
'silent': silent(filepath)
}
return info_dictionary
|
Get a dictionary of file information
Parameters
----------
filepath : str
File path.
Returns:
--------
info_dictionary : dict
Dictionary of file information. Fields are:
* channels
* sample_rate
* bitrate
* duration
* num_samples
* encoding
* silent
|
def local_renderer(self):
if not self._local_renderer:
r = self.create_local_renderer()
self._local_renderer = r
return self._local_renderer
|
Retrieves the cached local renderer.
|
def run(self, inputRecord):
predictionNumber = self._numPredictions
self._numPredictions += 1
result = opf_utils.ModelResult(predictionNumber=predictionNumber,
rawInput=inputRecord)
return result
|
Run one iteration of this model.
:param inputRecord: (object)
A record object formatted according to
:meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecord` or
:meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecordDict`
result format.
:returns: (:class:`~nupic.frameworks.opf.opf_utils.ModelResult`)
An ModelResult namedtuple. The contents of ModelResult.inferences
depends on the the specific inference type of this model, which
can be queried by :meth:`.getInferenceType`.
|
def update_memo(self, task_id, task, r):
if not self.memoize or not task['memoize']:
return
if task['hashsum'] in self.memo_lookup_table:
logger.info('Updating appCache entry with latest %s:%s call' %
(task['func_name'], task_id))
self.memo_lookup_table[task['hashsum']] = r
else:
self.memo_lookup_table[task['hashsum']] = r
|
Updates the memoization lookup table with the result from a task.
Args:
- task_id (int): Integer task id
- task (dict) : A task dict from dfk.tasks
- r (Result future): Result future
A warning is issued when a hash collision occurs during the update.
This is not likely.
|
def get_arena_image(self, obj: BaseAttrDict):
badge_id = obj.arena.id
for i in self.constants.arenas:
if i.id == badge_id:
return 'https://royaleapi.github.io/cr-api-assets/arenas/arena{}.png'.format(i.arena_id)
|
Get the arena image URL
Parameters
---------
obj: official_api.models.BaseAttrDict
An object that has the arena ID in ``.arena.id``
Can be ``Profile`` for example.
Returns None or str
|
def request_roster(self, version = None):
processor = self.stanza_processor
request = Iq(stanza_type = "get")
request.set_payload(RosterPayload(version = version))
processor.set_response_handlers(request,
self._get_success, self._get_error)
processor.send(request)
|
Request roster from server.
:Parameters:
- `version`: if not `None` versioned roster will be requested
for given local version. Use "" to request full roster.
:Types:
- `version`: `unicode`
|
def create(self, stylename, **kwargs):
if stylename == "default":
self[stylename] = style(stylename, self._ctx, **kwargs)
return self[stylename]
k = kwargs.get("template", "default")
s = self[stylename] = self[k].copy(stylename)
for attr in kwargs:
if s.__dict__.has_key(attr):
s.__dict__[attr] = kwargs[attr]
return s
|
Creates a new style which inherits from the default style,
or any other style which name is supplied to the optional template parameter.
|
def _create_element_list(self):
element_set = stoich.elements(self.compounds)
return sorted(list(element_set))
|
Extract an alphabetically sorted list of elements from the
material's compounds.
:returns: Alphabetically sorted list of elements.
|
def python_value(self, value):
value = coerce_to_bytes(value)
obj = HashValue(value)
obj.field = self
return obj
|
Convert the database value to a pythonic value.
|
def encoding(self):
if self.redirect is not None:
return self.redirect.encoding
else:
return super(TeeStringIO, self).encoding
|
Gets the encoding of the `redirect` IO object
Doctest:
>>> redirect = io.StringIO()
>>> assert TeeStringIO(redirect).encoding is None
>>> assert TeeStringIO(None).encoding is None
>>> assert TeeStringIO(sys.stdout).encoding is sys.stdout.encoding
>>> redirect = io.TextIOWrapper(io.StringIO())
>>> assert TeeStringIO(redirect).encoding is redirect.encoding
|
def get_items(self):
ret=[]
l=self.xpath_ctxt.xpathEval("d:item")
if l is not None:
for i in l:
ret.append(DiscoItem(self, i))
return ret
|
Get the items contained in `self`.
:return: the items contained.
:returntype: `list` of `DiscoItem`
|
def dist(self, src, tar, weights='exponential', max_length=8):
return self.dist_abs(src, tar, weights, max_length, True)
|
Return normalized distance between the Eudex hashes of two terms.
This is Eudex distance normalized to [0, 1].
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
weights : str, iterable, or generator function
The weights or weights generator function
max_length : int
The number of characters to encode as a eudex hash
Returns
-------
int
The normalized Eudex Hamming distance
Examples
--------
>>> cmp = Eudex()
>>> round(cmp.dist('cat', 'hat'), 12)
0.062745098039
>>> round(cmp.dist('Niall', 'Neil'), 12)
0.000980392157
>>> round(cmp.dist('Colin', 'Cuilen'), 12)
0.004901960784
>>> round(cmp.dist('ATCG', 'TAGC'), 12)
0.197549019608
|
def _getClassInstance(path, args=None):
if not path.endswith(".py"):
return None
if args is None:
args = {}
classname = AtomShieldsScanner._getClassName(path)
basename = os.path.basename(path).replace(".py", "")
sys.path.append(os.path.dirname(path))
try:
mod = __import__(basename, globals(), locals(), [classname], -1)
class_ = getattr(mod, classname)
instance = class_(**args)
except Exception as e:
AtomShieldsScanner._debug("[!] %s" % e)
return None
finally:
sys.path.remove(os.path.dirname(path))
return instance
|
Returns a class instance from a .py file.
Args:
path (str): Absolute path to .py file
args (dict): Arguments passed via class constructor
Returns:
object: Class instance or None
|
def draw_buffers(self, near, far):
self.ctx.disable(moderngl.DEPTH_TEST)
helper.draw(self.gbuffer.color_attachments[0], pos=(0.0, 0.0), scale=(0.25, 0.25))
helper.draw(self.gbuffer.color_attachments[1], pos=(0.5, 0.0), scale=(0.25, 0.25))
helper.draw_depth(self.gbuffer.depth_attachment, near, far, pos=(1.0, 0.0), scale=(0.25, 0.25))
helper.draw(self.lightbuffer.color_attachments[0], pos=(1.5, 0.0), scale=(0.25, 0.25))
|
Draw framebuffers for debug purposes.
We need to supply near and far plane so the depth buffer can be linearized when visualizing.
:param near: Projection near value
:param far: Projection far value
|
def money(min=0, max=10):
value = random.choice(range(min * 100, max * 100))
return "%1.2f" % (float(value) / 100)
|
Return a str of decimal with two digits after a decimal mark.
|
def _tz(self, z):
return (z-self.param_dict['psf-zslab'])*self.param_dict[self.zscale]
|
Transform z to real-space coordinates from tile coordinates
|
def set_action(self,action):
if action is None:
if self.xmlnode.hasProp("action"):
self.xmlnode.unsetProp("action")
return
if action not in ("remove","update"):
raise ValueError("Action must be 'update' or 'remove'")
action = unicode(action)
self.xmlnode.setProp("action", action.encode("utf-8"))
|
Set the action of the item.
:Parameters:
- `action`: the new action or `None`.
:Types:
- `action`: `unicode`
|
def parallel_tfa_lcdir(lcdir,
templateinfo,
lcfileglob=None,
timecols=None,
magcols=None,
errcols=None,
lcformat='hat-sql',
lcformatdir=None,
interp='nearest',
sigclip=5.0,
mintemplatedist_arcmin=10.0,
nworkers=NCPUS,
maxworkertasks=1000):
if isinstance(templateinfo,str) and os.path.exists(templateinfo):
with open(templateinfo,'rb') as infd:
templateinfo = pickle.load(infd)
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
if lcfileglob is None:
lcfileglob = dfileglob
lclist = sorted(glob.glob(os.path.join(lcdir, lcfileglob)))
return parallel_tfa_lclist(
lclist,
templateinfo,
timecols=timecols,
magcols=magcols,
errcols=errcols,
lcformat=lcformat,
lcformatdir=None,
interp=interp,
sigclip=sigclip,
mintemplatedist_arcmin=mintemplatedist_arcmin,
nworkers=nworkers,
maxworkertasks=maxworkertasks
)
|
This applies TFA in parallel to all LCs in a directory.
Parameters
----------
lcdir : str
This is the directory containing the light curve files to process..
templateinfo : dict or str
This is either the dict produced by `tfa_templates_lclist` or the pickle
produced by the same function.
lcfileglob : str or None
The UNIX file glob to use when searching for light curve files in
`lcdir`. If None, the default file glob associated with registered LC
format provided is used.
timecols : list of str or None
The timecol keys to use from the lcdict in applying TFA corrections.
magcols : list of str or None
The magcol keys to use from the lcdict in applying TFA corrections.
errcols : list of str or None
The errcol keys to use from the lcdict in applying TFA corrections.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
interp : str
This is passed to scipy.interpolate.interp1d as the kind of
interpolation to use when reforming the light curves to the timebase of
the TFA templates.
sigclip : float or sequence of two floats or None
This is the sigma clip to apply to the light curves before running TFA
on it.
mintemplatedist_arcmin : float
This sets the minimum distance required from the target object for
objects in the TFA template ensemble. Objects closer than this distance
will be removed from the ensemble.
nworkers : int
The number of parallel workers to launch
maxworkertasks : int
The maximum number of tasks per worker allowed before it's replaced by a
fresh one.
Returns
-------
dict
Contains the input file names and output TFA light curve filenames per
input file organized by each `magcol` in `magcols`.
|
def build_message(self, data):
if not data:
return None
return Message(
id=data['message']['mid'],
platform=self.platform,
text=data['message']['text'],
user=data['sender']['id'],
timestamp=data['timestamp'],
raw=data,
chat=None,
)
|
Return a Message instance according to the data received from
Facebook Messenger API.
|
def _handle_array(toks):
if len(toks) == 5 and toks[1] == '{' and toks[4] == '}':
subtree = toks[2:4]
signature = ''.join(s for (_, s) in subtree)
[key_func, value_func] = [f for (f, _) in subtree]
def the_dict_func(a_dict, variant=0):
elements = \
[(key_func(x), value_func(y)) for (x, y) in a_dict.items()]
level = 0 if elements == [] \
else max(max(x, y) for ((_, x), (_, y)) in elements)
(obj_level, func_level) = \
_ToDbusXformer._variant_levels(level, variant)
return (dbus.types.Dictionary(
((x, y) for ((x, _), (y, _)) in elements),
signature=signature,
variant_level=obj_level), func_level)
return (the_dict_func, 'a{' + signature + '}')
if len(toks) == 2:
(func, sig) = toks[1]
def the_array_func(a_list, variant=0):
if isinstance(a_list, dict):
raise IntoDPValueError(a_list, "a_list",
"is a dict, must be an array")
elements = [func(x) for x in a_list]
level = 0 if elements == [] else max(x for (_, x) in elements)
(obj_level, func_level) = \
_ToDbusXformer._variant_levels(level, variant)
return (dbus.types.Array(
(x for (x, _) in elements),
signature=sig,
variant_level=obj_level), func_level)
return (the_array_func, 'a' + sig)
raise IntoDPValueError(toks, "toks",
"unexpected tokens")
|
Generate the correct function for an array signature.
:param toks: the list of parsed tokens
:returns: function that returns an Array or Dictionary value
:rtype: ((or list dict) -> ((or Array Dictionary) * int)) * str
|
def outer_right_join(self, join_streamlet, window_config, join_function):
from heronpy.streamlet.impl.joinbolt import JoinStreamlet, JoinBolt
join_streamlet_result = JoinStreamlet(JoinBolt.OUTER_RIGHT, window_config,
join_function, self, join_streamlet)
self._add_child(join_streamlet_result)
join_streamlet._add_child(join_streamlet_result)
return join_streamlet_result
|
Return a new Streamlet by outer right join_streamlet with this streamlet
|
def new(ABF,forceNewFigure=False,title=None,xlabel=None,ylabel=None):
if len(pylab.get_fignums()) and forceNewFigure==False:
return
pylab.figure(figsize=(8,6))
pylab.grid(alpha=.5)
pylab.title(ABF.ID)
pylab.ylabel(ABF.units)
pylab.xlabel("seconds")
if xlabel:
pylab.xlabel(xlabel)
if ylabel:
pylab.ylabel(ylabel)
if title:
pylab.title(title)
annotate(ABF)
|
makes a new matplotlib figure with default dims and DPI.
Also labels it with pA or mV depending on ABF.
|
def _getExperimentDescriptionSchema():
installPath = os.path.dirname(os.path.abspath(__file__))
schemaFilePath = os.path.join(installPath, "experimentDescriptionSchema.json")
return json.loads(open(schemaFilePath, 'r').read())
|
Returns the experiment description schema. This implementation loads it in
from file experimentDescriptionSchema.json.
Parameters:
--------------------------------------------------------------------------
Returns: returns a dict representing the experiment description schema.
|
def generate_dirlist_html(FS, filepath):
yield '<table class="dirlist">'
if filepath == '/':
filepath = ''
for name in FS.listdir(filepath):
full_path = pathjoin(filepath, name)
if FS.isdir(full_path):
full_path = full_path + '/'
yield u'<tr><td><a href="{0}">{0}</a></td></tr>'.format(
cgi.escape(full_path))
yield '</table>'
|
Generate directory listing HTML
Arguments:
FS (FS): filesystem object to read files from
filepath (str): path to generate directory listings for
Keyword Arguments:
list_dir (callable: list[str]): list file names in a directory
isdir (callable: bool): os.path.isdir
Yields:
str: lines of an HTML table
|
def _random_token(self, bits=128):
alphabet = string.ascii_letters + string.digits + '-_'
num_letters = int(math.ceil(bits / 6.0))
return ''.join(random.choice(alphabet) for i in range(num_letters))
|
Generates a random token, using the url-safe base64 alphabet.
The "bits" argument specifies the bits of randomness to use.
|
def run(self, data, results=None, mask=None, positions=None):
model_image = results.last.unmasked_model_image
galaxy_tuples = results.last.constant.name_instance_tuples_for_class(g.Galaxy)
results_copy = copy.copy(results.last)
for name, galaxy in galaxy_tuples:
optimizer = self.optimizer.copy_with_name_extension(name)
optimizer.variable.hyper_galaxy = g.HyperGalaxy
galaxy_image = results.last.unmasked_image_for_galaxy(galaxy)
optimizer.fit(self.__class__.Analysis(data, model_image, galaxy_image))
getattr(results_copy.variable, name).hyper_galaxy = optimizer.variable.hyper_galaxy
getattr(results_copy.constant, name).hyper_galaxy = optimizer.constant.hyper_galaxy
return results_copy
|
Run a fit for each galaxy from the previous phase.
Parameters
----------
data: LensData
results: ResultsCollection
Results from all previous phases
mask: Mask
The mask
positions
Returns
-------
results: HyperGalaxyResults
A collection of results, with one item per a galaxy
|
def set_share_path(self, share_path):
assert share_path == "" or share_path.startswith("/")
if share_path == "/":
share_path = ""
assert share_path in ("", "/") or not share_path.endswith("/")
self.share_path = share_path
|
Set application location for this resource provider.
@param share_path: a UTF-8 encoded, unquoted byte string.
|
def new_comment(self, string, start, end, line):
prefix = line[:start[1]]
if prefix.strip():
self.current_block.add(string, start, end, line)
else:
block = Comment(start[0], end[0], string)
self.blocks.append(block)
self.current_block = block
|
Possibly add a new comment.
Only adds a new comment if this comment is the only thing on the line.
Otherwise, it extends the noncomment block.
|
def make_sub_element(parent, tag, nsmap=None):
if use_lxml:
return etree.SubElement(parent, tag, nsmap=nsmap)
return etree.SubElement(parent, tag)
|
Wrapper for etree.SubElement, that takes care of unsupported nsmap option.
|
def open(self):
if self.seed_url:
self.driver_adapter.open(self.seed_url)
self.wait_for_page_to_load()
return self
raise UsageError("Set a base URL or URL_TEMPLATE to open this page.")
|
Open the page.
Navigates to :py:attr:`seed_url` and calls :py:func:`wait_for_page_to_load`.
:return: The current page object.
:rtype: :py:class:`Page`
:raises: UsageError
|
def get_request_date(cls, req):
date = None
for header in ['x-amz-date', 'date']:
if header not in req.headers:
continue
try:
date_str = cls.parse_date(req.headers[header])
except DateFormatError:
continue
try:
date = datetime.datetime.strptime(date_str, '%Y-%m-%d').date()
except ValueError:
continue
else:
break
return date
|
Try to pull a date from the request by looking first at the
x-amz-date header, and if that's not present then the Date header.
Return a datetime.date object, or None if neither date header
is found or is in a recognisable format.
req -- a requests PreparedRequest object
|
def _prepare_pending(self):
if not self._unprepared_pending:
return
for handler in list(self._unprepared_pending):
self._configure_io_handler(handler)
self.check_events()
|
Prepare pending handlers.
|
def get_parm(self, key):
if key in self.__parm.keys():
return self.__parm[key]
return None
|
Get parameter of FIO
|
def getdevice_by_uuid(uuid):
with settings(hide('running', 'warnings', 'stdout'), warn_only=True):
res = run_as_root('blkid -U %s' % uuid)
if not res.succeeded:
return None
return res
|
Get a HDD device by uuid
Example::
from burlap.disk import getdevice_by_uuid
device = getdevice_by_uuid("356fafdc-21d5-408e-a3e9-2b3f32cb2a8c")
if device:
mount(device,'/mountpoint')
|
def build_machine_type(cls, min_cores, min_ram):
min_cores = min_cores or job_model.DEFAULT_MIN_CORES
min_ram = min_ram or job_model.DEFAULT_MIN_RAM
min_ram *= GoogleV2CustomMachine._MB_PER_GB
cores = cls._validate_cores(min_cores)
ram = cls._validate_ram(min_ram)
memory_to_cpu_ratio = ram / cores
if memory_to_cpu_ratio < GoogleV2CustomMachine._MIN_MEMORY_PER_CPU:
adjusted_ram = GoogleV2CustomMachine._MIN_MEMORY_PER_CPU * cores
ram = cls._validate_ram(adjusted_ram)
elif memory_to_cpu_ratio > GoogleV2CustomMachine._MAX_MEMORY_PER_CPU:
adjusted_cores = math.ceil(
ram / GoogleV2CustomMachine._MAX_MEMORY_PER_CPU)
cores = cls._validate_cores(adjusted_cores)
else:
pass
return 'custom-{}-{}'.format(int(cores), int(ram))
|
Returns a custom machine type string.
|
def merge_dicts(d1, d2, _path=None):
if _path is None:
_path = ()
if isinstance(d1, dict) and isinstance(d2, dict):
for k, v in d2.items():
if isinstance(v, MissingValue) and v.name is None:
v.name = '.'.join(_path + (k,))
if isinstance(v, DeletedValue):
d1.pop(k, None)
elif k not in d1:
if isinstance(v, dict):
d1[k] = merge_dicts({}, v, _path + (k,))
else:
d1[k] = v
else:
if isinstance(d1[k], dict) and isinstance(v, dict):
d1[k] = merge_dicts(d1[k], v, _path + (k,))
elif isinstance(d1[k], list) and isinstance(v, list):
d1[k] += v
elif isinstance(d1[k], MissingValue):
d1[k] = v
elif d1[k] is None:
d1[k] = v
elif type(d1[k]) == type(v):
d1[k] = v
else:
raise TypeError('Refusing to replace a %s with a %s'
% (type(d1[k]), type(v)))
else:
raise TypeError('Cannot merge a %s with a %s' % (type(d1), type(d2)))
return d1
|
Merge dictionary d2 into d1, overriding entries in d1 with values from d2.
d1 is mutated.
_path is for internal, recursive use.
|
def cur_space(self, name=None):
if name is None:
return self._impl.model.currentspace.interface
else:
self._impl.model.currentspace = self._impl.spaces[name]
return self.cur_space()
|
Set the current space to Space ``name`` and return it.
If called without arguments, the current space is returned.
Otherwise, the current space is set to the space named ``name``
and the space is returned.
|
def _find_ancestor_from_name(self, name):
if self.parent is None:
return None
if self.parent.get_name() == name:
return self.parent
return self.parent._find_ancestor_from_name(name)
|
Returns the ancestor that has a task with the given name assigned.
Returns None if no such ancestor was found.
:type name: str
:param name: The name of the wanted task.
:rtype: Task
:returns: The ancestor.
|
def _apply_unique_checks(self, i, r, unique_sets,
summarize=False,
context=None):
for key, code, message in self._unique_checks:
value = None
values = unique_sets[key]
if isinstance(key, basestring):
fi = self._field_names.index(key)
if fi >= len(r):
continue
value = r[fi]
else:
value = []
for f in key:
fi = self._field_names.index(f)
if fi >= len(r):
break
value.append(r[fi])
value = tuple(value)
if value in values:
p = {'code': code}
if not summarize:
p['message'] = message
p['row'] = i + 1
p['record'] = r
p['key'] = key
p['value'] = value
if context is not None: p['context'] = context
yield p
values.add(value)
|
Apply unique checks on `r`.
|
def read(self, filenames):
for fn in filenames:
try:
self.configs[fn] = ordered_json.load(fn)
except IOError:
self.configs[fn] = OrderedDict()
except Exception as e:
self.configs[fn] = OrderedDict()
logging.warning(
"Failed to read settings file %s, it will be ignored. The error was: %s",
fn, e
)
|
Read a list of files. Their configuration values are merged, with
preference to values from files earlier in the list.
|
def rm(self, filename):
try:
self._ftp.delete(filename)
except error_perm:
try:
current_folder = self._ftp.pwd()
self.cd(filename)
except error_perm:
print('550 Delete operation failed %s '
'does not exist!' % (filename,))
else:
self.cd(current_folder)
print('550 Delete operation failed %s '
'is a folder. Use rmdir function '
'to delete it.' % (filename,))
|
Delete a file from the server.
:param filename: the file to be deleted.
:type filename: string
|
def update(ctx, name, description, tags, private):
user, project_name = get_project_or_local(ctx.obj.get('project'))
update_dict = {}
if name:
update_dict['name'] = name
if description:
update_dict['description'] = description
if private is not None:
update_dict['is_public'] = not private
tags = validate_tags(tags)
if tags:
update_dict['tags'] = tags
if not update_dict:
Printer.print_warning('No argument was provided to update the project.')
sys.exit(1)
try:
response = PolyaxonClient().project.update_project(user, project_name, update_dict)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not update project `{}`.'.format(project_name))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success("Project updated.")
get_project_details(response)
|
Update project.
Uses [Caching](/references/polyaxon-cli/#caching)
Example:
\b
```bash
$ polyaxon update foobar --description="Image Classification with DL using TensorFlow"
```
\b
```bash
$ polyaxon update mike1/foobar --description="Image Classification with DL using TensorFlow"
```
\b
```bash
$ polyaxon update --tags="foo, bar"
```
|
def open(self):
self.path = self._prepare_dir(self.topdir)
self._copy_executable(area_path=self.path)
self._save_logging_levels(area_path=self.path)
self._put_python_modules(modules=self.python_modules, area_path=self.path)
|
Open the working area
Returns
-------
None
|
def do_internal_run(self, initial_count=0, subblock=None, update_derr=True):
self._inner_run_counter = initial_count; good_step = True
n_good_steps = 0
CLOG.debug('Running...')
_last_residuals = self.calc_residuals().copy()
while ((self._inner_run_counter < self.run_length) & good_step &
(not self.check_terminate())):
if self.check_Broyden_J() and self._inner_run_counter != 0:
self.update_Broyden_J()
if self.check_update_eig_J() and self._inner_run_counter != 0:
self.update_eig_J()
er0 = 1*self.error
delta_vals = self.find_LM_updates(self.calc_grad(),
do_correct_damping=False, subblock=subblock)
er1 = self.update_function(self.param_vals + delta_vals)
good_step = er1 < er0
if good_step:
n_good_steps += 1
CLOG.debug('%f\t%f' % (er0, er1))
self.update_param_vals(delta_vals, incremental=True)
self._last_residuals = _last_residuals.copy()
if update_derr:
self._last_error = er0
self.error = er1
_last_residuals = self.calc_residuals().copy()
else:
er0_0 = self.update_function(self.param_vals)
CLOG.debug('Bad step!')
if np.abs(er0 - er0_0) > 1e-6:
raise RuntimeError('Function updates are not exact.')
self._inner_run_counter += 1
return n_good_steps
|
Takes more steps without calculating J again.
Given a fixed damping, J, JTJ, iterates calculating steps, with
optional Broyden or eigendirection updates. Iterates either until
a bad step is taken or for self.run_length times.
Called internally by do_run_2() but is also useful on its own.
Parameters
----------
initial_count : Int, optional
The initial count of the run. Default is 0. Increasing from
0 effectively temporarily decreases run_length.
subblock : None or np.ndarray of bools, optional
If not None, a boolean mask which determines which sub-
block of parameters to run over. Default is None, i.e.
all the parameters.
update_derr : Bool, optional
Set to False to not update the variable that determines
delta_err, preventing premature termination through errtol.
Notes
-----
It might be good to do something similar to update_derr with the
parameter values, but this is trickier because of Broyden updates
and _fresh_J.
|
def matlab_formatter(level, vertices, codes=None):
vertices = numpy_formatter(level, vertices, codes)
if codes is not None:
level = level[0]
headers = np.vstack((
[v.shape[0] for v in vertices],
[level]*len(vertices))).T
vertices = np.vstack(
list(it.__next__() for it in
itertools.cycle((iter(headers), iter(vertices)))))
return vertices
|
`MATLAB`_ style contour formatter.
Contours are returned as a single Nx2, `MATLAB`_ style, contour array.
There are two types of rows in this format:
* Header: The first element of a header row is the level of the contour
(the lower level for filled contours) and the second element is the
number of vertices (to follow) belonging to this contour line.
* Vertex: x,y coordinate pairs of the vertex.
A header row is always followed by the coresponding number of vertices.
Another header row may follow if there are more contour lines.
For filled contours the direction of vertices matters:
* CCW (ACW): The vertices give the exterior of a contour polygon.
* CW: The vertices give a hole of a contour polygon. This hole will
always be inside the exterior of the last contour exterior.
For further explanation of this format see the `Mathworks documentation
<https://www.mathworks.com/help/matlab/ref/contour-properties.html#prop_ContourMatrix>`_
noting that the MATLAB format used in the `contours` package is the
transpose of that used by `MATLAB`_ (since `MATLAB`_ is column-major
and `NumPy`_ is row-major by default).
.. _NumPy: http://www.numpy.org
.. _MATLAB: https://www.mathworks.com/products/matlab.html
|
def _stream_data_chunked(self, environ, block_size):
if "Darwin" in environ.get("HTTP_USER_AGENT", "") and environ.get(
"HTTP_X_EXPECTED_ENTITY_LENGTH"
):
WORKAROUND_CHUNK_LENGTH = True
buf = environ.get("HTTP_X_EXPECTED_ENTITY_LENGTH", "0")
length = int(buf)
else:
WORKAROUND_CHUNK_LENGTH = False
buf = environ["wsgi.input"].readline()
environ["wsgidav.some_input_read"] = 1
if buf == compat.b_empty:
length = 0
else:
length = int(buf, 16)
while length > 0:
buf = environ["wsgi.input"].read(block_size)
yield buf
if WORKAROUND_CHUNK_LENGTH:
environ["wsgidav.some_input_read"] = 1
if buf == compat.b_empty:
length = 0
else:
length -= len(buf)
else:
environ["wsgi.input"].readline()
buf = environ["wsgi.input"].readline()
if buf == compat.b_empty:
length = 0
else:
length = int(buf, 16)
environ["wsgidav.all_input_read"] = 1
|
Get the data from a chunked transfer.
|
def norm_and_check(source_tree, requested):
if os.path.isabs(requested):
raise ValueError("paths must be relative")
abs_source = os.path.abspath(source_tree)
abs_requested = os.path.normpath(os.path.join(abs_source, requested))
norm_source = os.path.normcase(abs_source)
norm_requested = os.path.normcase(abs_requested)
if os.path.commonprefix([norm_source, norm_requested]) != norm_source:
raise ValueError("paths must be inside source tree")
return abs_requested
|
Normalise and check a backend path.
Ensure that the requested backend path is specified as a relative path,
and resolves to a location under the given source tree.
Return an absolute version of the requested path.
|
def _days_in_month(year, month):
"year, month -> number of days in that month in that year."
assert 1 <= month <= 12, month
if month == 2 and _is_leap(year):
return 29
return _DAYS_IN_MONTH[month]
|
year, month -> number of days in that month in that year.
|
def _transmit_create(self, channel_metadata_item_map):
for chunk in chunks(channel_metadata_item_map, self.enterprise_configuration.transmission_chunk_size):
serialized_chunk = self._serialize_items(list(chunk.values()))
try:
self.client.create_content_metadata(serialized_chunk)
except ClientError as exc:
LOGGER.error(
'Failed to update [%s] content metadata items for integrated channel [%s] [%s]',
len(chunk),
self.enterprise_configuration.enterprise_customer.name,
self.enterprise_configuration.channel_code,
)
LOGGER.error(exc)
else:
self._create_transmissions(chunk)
|
Transmit content metadata creation to integrated channel.
|
def str_from_text(text):
REGEX = re.compile('<text>((.|\n)+)</text>', re.UNICODE)
match = REGEX.match(text)
if match:
return match.group(1)
else:
return None
|
Return content of a free form text block as a string.
|
def embed(self, url, **kwargs):
try:
provider = self.provider_for_url(url)
except OEmbedMissingEndpoint:
raise
else:
try:
stored_match = StoredOEmbed.objects.filter(
match=url,
maxwidth=kwargs.get('maxwidth', None),
maxheight=kwargs.get('maxheight', None),
date_expires__gte=datetime.datetime.now())[0]
return OEmbedResource.create_json(stored_match.response_json)
except IndexError:
params = dict([(k, v) for k, v in kwargs.items() if v])
resource = provider.request_resource(url, **params)
try:
cache_age = int(resource.cache_age)
if cache_age < MIN_OEMBED_TTL:
cache_age = MIN_OEMBED_TTL
except:
cache_age = DEFAULT_OEMBED_TTL
date_expires = datetime.datetime.now() + datetime.timedelta(seconds=cache_age)
stored_oembed, created = StoredOEmbed.objects.get_or_create(
match=url,
maxwidth=kwargs.get('maxwidth', None),
maxheight=kwargs.get('maxheight', None))
stored_oembed.response_json = resource.json
stored_oembed.resource_type = resource.type
stored_oembed.date_expires = date_expires
if resource.content_object:
stored_oembed.content_object = resource.content_object
stored_oembed.save()
return resource
|
The heart of the matter
|
def gen_drawdown_table(returns, top=10):
df_cum = ep.cum_returns(returns, 1.0)
drawdown_periods = get_top_drawdowns(returns, top=top)
df_drawdowns = pd.DataFrame(index=list(range(top)),
columns=['Net drawdown in %',
'Peak date',
'Valley date',
'Recovery date',
'Duration'])
for i, (peak, valley, recovery) in enumerate(drawdown_periods):
if pd.isnull(recovery):
df_drawdowns.loc[i, 'Duration'] = np.nan
else:
df_drawdowns.loc[i, 'Duration'] = len(pd.date_range(peak,
recovery,
freq='B'))
df_drawdowns.loc[i, 'Peak date'] = (peak.to_pydatetime()
.strftime('%Y-%m-%d'))
df_drawdowns.loc[i, 'Valley date'] = (valley.to_pydatetime()
.strftime('%Y-%m-%d'))
if isinstance(recovery, float):
df_drawdowns.loc[i, 'Recovery date'] = recovery
else:
df_drawdowns.loc[i, 'Recovery date'] = (recovery.to_pydatetime()
.strftime('%Y-%m-%d'))
df_drawdowns.loc[i, 'Net drawdown in %'] = (
(df_cum.loc[peak] - df_cum.loc[valley]) / df_cum.loc[peak]) * 100
df_drawdowns['Peak date'] = pd.to_datetime(df_drawdowns['Peak date'])
df_drawdowns['Valley date'] = pd.to_datetime(df_drawdowns['Valley date'])
df_drawdowns['Recovery date'] = pd.to_datetime(
df_drawdowns['Recovery date'])
return df_drawdowns
|
Places top drawdowns in a table.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
top : int, optional
The amount of top drawdowns to find (default 10).
Returns
-------
df_drawdowns : pd.DataFrame
Information about top drawdowns.
|
def _run_supervisor(self):
import time
still_supervising = lambda: (
multiprocessing.active_children()
or not self.log_queue.empty()
or not self.exception_queue.empty())
try:
while still_supervising():
try:
record = self.log_queue.get_nowait()
logger = logging.getLogger(record.name)
logger.handle(record)
except queue.Empty:
pass
try:
exception = self.exception_queue.get_nowait()
except queue.Empty:
pass
else:
raise exception
time.sleep(1/self.frame_rate)
self.elapsed_time += 1/self.frame_rate
if self.time_limit and self.elapsed_time > self.time_limit:
raise RuntimeError("timeout")
finally:
for process in multiprocessing.active_children():
process.terminate()
|
Poll the queues that the worker can use to communicate with the
supervisor, until all the workers are done and all the queues are
empty. Handle messages as they appear.
|
def rename_file(self, relativePath, name, newName, replace=False, verbose=True):
relativePath = os.path.normpath(relativePath)
if relativePath == '.':
relativePath = ''
dirInfoDict, errorMessage = self.get_directory_info(relativePath)
assert dirInfoDict is not None, errorMessage
assert name in dict.__getitem__(dirInfoDict, "files"), "file '%s' is not found in repository relative path '%s'"%(name, relativePath)
realPath = os.path.join(self.__path, relativePath, name)
assert os.path.isfile(realPath), "file '%s' is not found in system"%realPath
assert newName not in dict.__getitem__(dirInfoDict, "files"), "file '%s' already exists in repository relative path '%s'"%(newName, relativePath)
newRealPath = os.path.join(self.__path, relativePath, newName)
if os.path.isfile( newRealPath ):
if replace:
os.remove(newRealPath)
if verbose:
warnings.warn( "file '%s' already exists found in system, it is now replaced by '%s' because 'replace' flag is True."%(newRealPath,realPath) )
else:
raise Exception( "file '%s' already exists in system but not registered in repository."%newRealPath )
os.rename(realPath, newRealPath)
dict.__setitem__( dict.__getitem__(dirInfoDict, "files"),
newName,
dict.__getitem__(dirInfoDict, "files").pop(name) )
self.save()
|
Rename a directory in the repository. It insures renaming the file in the system.
:Parameters:
#. relativePath (string): The relative to the repository path of the directory where the file is located.
#. name (string): The file name.
#. newName (string): The file new name.
#. replace (boolean): Whether to force renaming when new folder name exists in the system.
It fails when new folder name is registered in repository.
#. verbose (boolean): Whether to be warn and informed about any abnormalities.
|
def comments_load(self):
self.comment_times,self.comment_sweeps,self.comment_tags=[],[],[]
self.comments=0
self.comment_text=""
try:
self.comment_tags = list(self.ABFblock.segments[0].eventarrays[0].annotations['comments'])
self.comment_times = list(self.ABFblock.segments[0].eventarrays[0].times/self.trace.itemsize)
self.comment_sweeps = list(self.comment_times)
except:
for events in self.ABFblock.segments[0].events:
self.comment_tags = events.annotations['comments'].tolist()
self.comment_times = np.array(events.times.magnitude/self.trace.itemsize)
self.comment_sweeps = self.comment_times/self.sweepInterval
for i,c in enumerate(self.comment_tags):
self.comment_tags[i]=c.decode("utf-8")
|
read the header and populate self with information about comments
|
def _createSegment(cls, connections, lastUsedIterationForSegment, cell,
iteration, maxSegmentsPerCell):
while connections.numSegments(cell) >= maxSegmentsPerCell:
leastRecentlyUsedSegment = min(
connections.segmentsForCell(cell),
key=lambda segment : lastUsedIterationForSegment[segment.flatIdx])
connections.destroySegment(leastRecentlyUsedSegment)
segment = connections.createSegment(cell)
if segment.flatIdx == len(lastUsedIterationForSegment):
lastUsedIterationForSegment.append(iteration)
elif segment.flatIdx < len(lastUsedIterationForSegment):
lastUsedIterationForSegment[segment.flatIdx] = iteration
else:
raise AssertionError(
"All segments should be created with the TM createSegment method.")
return segment
|
Create a segment on the connections, enforcing the maxSegmentsPerCell
parameter.
|
def loadJsonValueFromFile(inputFilePath):
with open(inputFilePath) as fileObj:
value = json.load(fileObj)
return value
|
Loads a json value from a file and converts it to the corresponding python
object.
inputFilePath:
Path of the json file;
Returns:
python value that represents the loaded json value
|
def handle_move(self, dest_path):
if "/by_tag/" not in self.path:
raise DAVError(HTTP_FORBIDDEN)
if "/by_tag/" not in dest_path:
raise DAVError(HTTP_FORBIDDEN)
catType, tag, _rest = util.save_split(self.path.strip("/"), "/", 2)
assert catType == "by_tag"
assert tag in self.data["tags"]
self.data["tags"].remove(tag)
catType, tag, _rest = util.save_split(dest_path.strip("/"), "/", 2)
assert catType == "by_tag"
if tag not in self.data["tags"]:
self.data["tags"].append(tag)
return True
|
Change semantic of MOVE to change resource tags.
|
def load_document(self, id):
fields = self.redis.hgetall(id)
if six.PY3:
f2 = {to_string(k): to_string(v) for k, v in fields.items()}
fields = f2
try:
del fields['id']
except KeyError:
pass
return Document(id=id, **fields)
|
Load a single document by id
|
def _toStringSubclass(self, text, subclass):
self.endData()
self.handle_data(text)
self.endData(subclass)
|
Adds a certain piece of text to the tree as a NavigableString
subclass.
|
def Loc(kind, loc=None):
@llrule(loc, lambda parser: [kind])
def rule(parser):
result = parser._accept(kind)
if result is unmatched:
return result
return result.loc
return rule
|
A rule that accepts a token of kind ``kind`` and returns its location, or returns None.
|
def add(self, *args, **kwargs):
if 'question' in kwargs and isinstance(kwargs['question'], Question):
question = kwargs['question']
else:
question = Question(*args, **kwargs)
self.questions.setdefault(question.key, []).append(question)
return question
|
Add a Question instance to the questions dict. Each key points
to a list of Question instances with that key. Use the `question`
kwarg to pass a Question instance if you want, or pass in the same
args you would pass to instantiate a question.
|
def detect_color_support(env):
if env.get('COLORFUL_DISABLE', '0') == '1':
return NO_COLORS
if env.get('COLORFUL_FORCE_8_COLORS', '0') == '1':
return ANSI_8_COLORS
if env.get('COLORFUL_FORCE_16_COLORS', '0') == '1':
return ANSI_16_COLORS
if env.get('COLORFUL_FORCE_256_COLORS', '0') == '1':
return ANSI_256_COLORS
if env.get('COLORFUL_FORCE_TRUE_COLORS', '0') == '1':
return TRUE_COLORS
if not sys.stdout.isatty():
return NO_COLORS
colorterm_env = env.get('COLORTERM')
if colorterm_env:
if colorterm_env in {'truecolor', '24bit'}:
return TRUE_COLORS
if colorterm_env in {'8bit'}:
return ANSI_256_COLORS
termprog_env = env.get('TERM_PROGRAM')
if termprog_env:
if termprog_env in {'iTerm.app', 'Hyper'}:
return TRUE_COLORS
if termprog_env in {'Apple_Terminal'}:
return ANSI_256_COLORS
term_env = env.get('TERM')
if term_env:
if term_env in {'screen-256', 'screen-256color', 'xterm-256', 'xterm-256color'}:
return ANSI_256_COLORS
if term_env in {'screen', 'xterm', 'vt100', 'color', 'ansi', 'cygwin', 'linux'}:
return ANSI_16_COLORS
if colorterm_env:
return ANSI_16_COLORS
return ANSI_8_COLORS
|
Detect what color palettes are supported.
It'll return a valid color mode to use
with colorful.
:param dict env: the environment dict like returned by ``os.envion``
|
def _getAttrMap(self):
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
|
Initializes a map representation of this tag's attributes,
if not already initialized.
|
def rollaxis(a, axis, start=0):
if isinstance(a, np.ndarray):
return np.rollaxis(a, axis, start)
if axis not in range(a.ndim):
raise ValueError(
'rollaxis: axis (%d) must be >=0 and < %d' % (axis, a.ndim))
if start not in range(a.ndim + 1):
raise ValueError(
'rollaxis: start (%d) must be >=0 and < %d' % (axis, a.ndim+1))
axes = list(range(a.ndim))
axes.remove(axis)
axes.insert(start, axis)
return transpose(a, axes)
|
Roll the specified axis backwards, until it lies in a given position.
Args:
a (array_like): Input array.
axis (int): The axis to roll backwards. The positions of the other axes
do not change relative to one another.
start (int, optional): The axis is rolled until it lies before this
position. The default, 0, results in a "complete" roll.
Returns:
res (ndarray)
|
def dist(
self,
src,
tar,
word_approx_min=0.3,
char_approx_min=0.73,
tests=2 ** 12 - 1,
):
return (
synoname(src, tar, word_approx_min, char_approx_min, tests, False)
/ 14
)
|
Return the normalized Synoname distance between two words.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
word_approx_min : float
The minimum word approximation value to signal a 'word_approx'
match
char_approx_min : float
The minimum character approximation value to signal a 'char_approx'
match
tests : int or Iterable
Either an integer indicating tests to perform or a list of test
names to perform (defaults to performing all tests)
Returns
-------
float
Normalized Synoname distance
|
def _gzip_sqlitecurve(sqlitecurve, force=False):
if force:
cmd = 'gzip -k -f %s' % sqlitecurve
else:
cmd = 'gzip -k %s' % sqlitecurve
try:
outfile = '%s.gz' % sqlitecurve
if os.path.exists(outfile) and not force:
os.remove(sqlitecurve)
return outfile
else:
subprocess.check_output(cmd, shell=True)
if os.path.exists(outfile):
return outfile
else:
return None
except subprocess.CalledProcessError:
return None
|
This just compresses the sqlitecurve in gzip format.
FIXME: this doesn't work with gzip < 1.6 or non-GNU gzip (probably).
|
def hash(self, id):
h = md5(id).hexdigest()
return os.path.join(self.path, h+self.type)
|
Creates a unique filename in the cache for the id.
|
def update(self, key, value):
if key not in self.value:
self.value[key] = ReducedMetric(self.reducer)
self.value[key].update(value)
|
Updates a value of a given key and apply reduction
|
def connected_channel(self):
if not self.channel_id:
return None
return self._lavalink.bot.get_channel(int(self.channel_id))
|
Returns the voice channel the player is connected to.
|
def gmv(a, b):
return np.exp(np.square(np.log(a) - np.log(b)).mean())
|
Geometric mean variance
|
def intersection(tiles, *args):
tiles = listify(tiles) + listify(args)
if len(tiles) < 2:
return tiles[0]
tile = tiles[0]
l, r = tile.l.copy(), tile.r.copy()
for tile in tiles[1:]:
l = amax(l, tile.l)
r = amin(r, tile.r)
return Tile(l, r, dtype=l.dtype)
|
Intersection of tiles, returned as a tile
>>> Tile.intersection(Tile([0, 1], [5, 4]), Tile([1, 0], [4, 5]))
Tile [1, 1] -> [4, 4] ([3, 3])
|
def has_credentials(self):
if not self.credentials:
return False
elif (self.credentials.access_token_expired and
not self.credentials.refresh_token):
return False
else:
return True
|
Returns True if there are valid credentials for the current user.
|
def _do_action_left(self, state):
reward = 0
for row in range(4):
merge_candidate = -1
merged = np.zeros((4,), dtype=np.bool)
for col in range(4):
if state[row, col] == 0:
continue
if (merge_candidate != -1 and
not merged[merge_candidate] and
state[row, merge_candidate] == state[row, col]):
state[row, col] = 0
merged[merge_candidate] = True
state[row, merge_candidate] += 1
reward += 2 ** state[row, merge_candidate]
else:
merge_candidate += 1
if col != merge_candidate:
state[row, merge_candidate] = state[row, col]
state[row, col] = 0
return reward
|
Executes action 'Left'.
|
def run(self, next_task):
self.event.wait()
self.task()
self.event.clear()
next_task.event.set()
|
Wait for the event, run the task, trigger the next task.
|
def save(self, msg, args):
self.send_message(msg.channel, "Saving current state...")
self._bot.plugins.save_state()
self.send_message(msg.channel, "Done.")
|
Causes the bot to write its current state to backend.
|
def add_link(cls, attr, title='', display=''):
global klass_count
klass_count += 1
fn_name = 'dyn_fn_%d' % klass_count
cls.list_display.append(fn_name)
if not title:
title = attr.capitalize()
_display = display
def _link(self, obj):
field_obj = admin_obj_attr(obj, attr)
if not field_obj:
return ''
text = _obj_display(field_obj, _display)
return admin_obj_link(field_obj, text)
_link.short_description = title
_link.allow_tags = True
_link.admin_order_field = attr
setattr(cls, fn_name, _link)
|
Adds a ``list_display`` attribute that appears as a link to the
django admin change page for the type of object being shown. Supports
double underscore attribute name dereferencing.
:param attr:
Name of the attribute to dereference from the corresponding
object, i.e. what will be lined to. This name supports double
underscore object link referencing for ``models.ForeignKey``
members.
:param title:
Title for the column of the django admin table. If not given it
defaults to a capitalized version of ``attr``
:param display:
What to display as the text for the link being shown. If not
given it defaults to the string representation of the object for
the row: ``str(obj)`` . This parameter supports django
templating, the context for which contains a dictionary key named
"obj" with the value being the object for the row.
Example usage:
.. code-block:: python
# ---- admin.py file ----
base = fancy_modeladmin('id')
base.add_link('author', 'Our Authors',
'{{obj.name}} (id={{obj.id}})')
@admin.register(Book)
class BookAdmin(base):
pass
The django admin change page for the Book class would have a column
for "id" and another titled "Our Authors". The "Our Authors" column
would have a link for each Author object referenced by "book.author".
The link would go to the Author django admin change listing. The
display of the link would be the name of the author with the id in
brakcets, e.g. "Douglas Adams (id=42)"
|
def install_apt(self, fn=None, package_name=None, update=0, list_only=0):
r = self.local_renderer
assert self.genv[ROLE]
apt_req_fqfn = fn or (self.env.apt_requirments_fn and self.find_template(self.env.apt_requirments_fn))
if not apt_req_fqfn:
return []
assert os.path.isfile(apt_req_fqfn)
lines = list(self.env.apt_packages or [])
for _ in open(apt_req_fqfn).readlines():
if _.strip() and not _.strip().startswith('
and (not package_name or _.strip() == package_name):
lines.extend(_pkg.strip() for _pkg in _.split(' ') if _pkg.strip())
if list_only:
return lines
tmp_fn = r.write_temp_file('\n'.join(lines))
apt_req_fqfn = tmp_fn
if not self.genv.is_local:
r.put(local_path=tmp_fn, remote_path=tmp_fn)
apt_req_fqfn = self.genv.put_remote_path
r.sudo('DEBIAN_FRONTEND=noninteractive apt-get -yq update --fix-missing')
r.sudo('DEBIAN_FRONTEND=noninteractive apt-get -yq install `cat "%s" | tr "\\n" " "`' % apt_req_fqfn)
|
Installs system packages listed in apt-requirements.txt.
|
def refresh_collections(self, accept=MEDIA_TYPE_TAXII_V20):
url = self.url + "collections/"
response = self._conn.get(url, headers={"Accept": accept})
self._collections = []
for item in response.get("collections", []):
collection_url = url + item["id"] + "/"
collection = Collection(collection_url, conn=self._conn,
collection_info=item)
self._collections.append(collection)
self._loaded_collections = True
|
Update the list of Collections contained by this API Root.
This invokes the ``Get Collections`` endpoint.
|
def parent_callback(self, executor_fu):
with self._update_lock:
if not executor_fu.done():
raise ValueError("done callback called, despite future not reporting itself as done")
if executor_fu != self.parent:
if executor_fu.exception() is None and not isinstance(executor_fu.result(), RemoteExceptionWrapper):
raise ValueError("internal consistency error: AppFuture done callback called without an exception, but parent has been changed since then")
try:
res = executor_fu.result()
if isinstance(res, RemoteExceptionWrapper):
res.reraise()
super().set_result(executor_fu.result())
except Exception as e:
if executor_fu.retries_left > 0:
pass
else:
super().set_exception(e)
|
Callback from a parent future to update the AppFuture.
Used internally by AppFuture, and should not be called by code using AppFuture.
Args:
- executor_fu (Future): Future returned by the executor along with callback.
This may not be the current parent future, as the parent future may have
already been updated to point to a retrying execution, and in that case,
this is logged.
In the case that a new parent has been attached, we must immediately discard
this result no matter what it contains (although it might be interesting
to log if it was successful...)
Returns:
- None
Updates the super() with the result() or exception()
|
def stat(self, input_filepath, scale=None, rms=False):
effect_args = ['channels', '1', 'stat']
if scale is not None:
if not is_number(scale) or scale <= 0:
raise ValueError("scale must be a positive number.")
effect_args.extend(['-s', '{:f}'.format(scale)])
if rms:
effect_args.append('-rms')
_, _, stat_output = self.build(
input_filepath, None, extra_args=effect_args, return_output=True
)
stat_dict = {}
lines = stat_output.split('\n')
for line in lines:
split_line = line.split()
if len(split_line) == 0:
continue
value = split_line[-1]
key = ' '.join(split_line[:-1])
stat_dict[key.strip(':')] = value
return stat_dict
|
Display time and frequency domain statistical information about the
audio. Audio is passed unmodified through the SoX processing chain.
Unlike other Transformer methods, this does not modify the transformer
effects chain. Instead it computes statistics on the output file that
would be created if the build command were invoked.
Note: The file is downmixed to mono prior to computation.
Parameters
----------
input_filepath : str
Path to input file to compute stats on.
scale : float or None, default=None
If not None, scales the input by the given scale factor.
rms : bool, default=False
If True, scales all values by the average rms amplitude.
Returns
-------
stat_dict : dict
Dictionary of statistics.
See Also
--------
stats, power_spectrum, sox.file_info
|
def _synoname_strip_punct(self, word):
stripped = ''
for char in word:
if char not in set(',-./:;"&\'()!{|}?$%*+<=>[\\]^_`~'):
stripped += char
return stripped.strip()
|
Return a word with punctuation stripped out.
Parameters
----------
word : str
A word to strip punctuation from
Returns
-------
str
The word stripped of punctuation
Examples
--------
>>> pe = Synoname()
>>> pe._synoname_strip_punct('AB;CD EF-GH$IJ')
'ABCD EFGHIJ'
|
def _updateBoostFactorsGlobal(self):
if (self._localAreaDensity > 0):
targetDensity = self._localAreaDensity
else:
inhibitionArea = ((2 * self._inhibitionRadius + 1)
** self._columnDimensions.size)
inhibitionArea = min(self._numColumns, inhibitionArea)
targetDensity = float(self._numActiveColumnsPerInhArea) / inhibitionArea
targetDensity = min(targetDensity, 0.5)
self._boostFactors = numpy.exp(
(targetDensity - self._activeDutyCycles) * self._boostStrength)
|
Update boost factors when global inhibition is used
|
def _hook_write_mem(self, uc, access, address, size, value, data):
self._mem_delta[address] = (value, size)
return True
|
Captures memory written by Unicorn
|
def _get_flow_for_token(csrf_token):
flow_pickle = session.pop(
_FLOW_KEY.format(csrf_token), None)
if flow_pickle is None:
return None
else:
return pickle.loads(flow_pickle)
|
Retrieves the flow instance associated with a given CSRF token from
the Flask session.
|
def upload_bel_namespace(self, update: bool = False) -> Namespace:
if not self.is_populated():
self.populate()
namespace = self._get_default_namespace()
if namespace is None:
log.info('making namespace for %s', self._get_namespace_name())
return self._make_namespace()
if update:
self._update_namespace(namespace)
return namespace
|
Upload the namespace to the PyBEL database.
:param update: Should the namespace be updated first?
|
def outputs(ctx):
user, project_name, _job = get_job_or_local(ctx.obj.get('project'), ctx.obj.get('job'))
try:
PolyaxonClient().job.download_outputs(user, project_name, _job)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not download outputs for job `{}`.'.format(_job))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success('Files downloaded.')
|
Download outputs for job.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon job -j 1 outputs
```
|
def _build_stat(self, idx):
nameordered = self.samples.keys()
nameordered.sort()
newdat = pd.DataFrame([self.samples[i].stats_dfs[idx] \
for i in nameordered], index=nameordered)\
.dropna(axis=1, how='all')
return newdat
|
Returns a data frame with Sample stats for each step
|
def csi_wrap(self, value, capname, *args):
if isinstance(value, str):
value = value.encode('utf-8')
return b''.join([
self.csi(capname, *args),
value,
self.csi('sgr0'),
])
|
Return a value wrapped in the selected CSI and does a reset.
|
def _extract_links(self):
extracted = dict()
try:
for key, value in self.request.links.items():
parsed = urlparse(value["url"])
fragment = "{path}?{query}".format(path=parsed[2], query=parsed[4])
extracted[key] = fragment
parsed = list(urlparse(self.self_link))
stripped = "&".join(
[
"%s=%s" % (p[0], p[1])
for p in parse_qsl(parsed[4])
if p[0] != "format"
]
)
extracted["self"] = urlunparse(
[parsed[0], parsed[1], parsed[2], parsed[3], stripped, parsed[5]]
)
return extracted
except KeyError:
return None
|
Extract self, first, next, last links from a request response
|
def remove_dcm2nii_underprocessed(filepaths):
cln_flist = []
len_sorted = sorted(filepaths, key=len)
for idx, fpath in enumerate(len_sorted):
remove = False
fname = op.basename(fpath)
rest = len_sorted[idx+1:]
for rest_fpath in rest:
rest_file = op.basename(rest_fpath)
if rest_file.endswith(fname):
remove = True
break
if not remove:
cln_flist.append(fpath)
return cln_flist
|
Return a subset of `filepaths`. Keep only the files that have a basename longer than the
others with same suffix.
This works based on that dcm2nii appends a preffix character for each processing
step it does automatically in the DICOM to NifTI conversion.
Parameters
----------
filepaths: iterable of str
Returns
-------
cleaned_paths: iterable of str
|
def platform_detect():
pi = pi_version()
if pi is not None:
return RASPBERRY_PI
plat = platform.platform()
if plat.lower().find('armv7l-with-debian') > -1:
return BEAGLEBONE_BLACK
elif plat.lower().find('armv7l-with-ubuntu') > -1:
return BEAGLEBONE_BLACK
elif plat.lower().find('armv7l-with-glibc2.4') > -1:
return BEAGLEBONE_BLACK
elif plat.lower().find('tegra-aarch64-with-ubuntu') > -1:
return JETSON_NANO
try:
import mraa
if mraa.getPlatformName()=='MinnowBoard MAX':
return MINNOWBOARD
except ImportError:
pass
return UNKNOWN
|
Detect if running on the Raspberry Pi or Beaglebone Black and return the
platform type. Will return RASPBERRY_PI, BEAGLEBONE_BLACK, or UNKNOWN.
|
def height(self):
if len(self.coords) <= 1:
return 0
return np.max(self.yy) - np.min(self.yy)
|
Get the height of a bounding box encapsulating the line.
|
def pushd(path):
saved = os.getcwd()
os.chdir(path)
try:
yield saved
finally:
os.chdir(saved)
|
A context that enters a given directory and restores the old state on exit.
The original directory is returned as the context variable.
|
def getLabels(self, start=None, end=None):
if len(self._recordsCache) == 0:
return {
'isProcessing': False,
'recordLabels': []
}
try:
start = int(start)
except Exception:
start = 0
try:
end = int(end)
except Exception:
end = self._recordsCache[-1].ROWID
if end <= start:
raise HTMPredictionModelInvalidRangeError("Invalid supplied range for 'getLabels'.",
debugInfo={
'requestRange': {
'startRecordID': start,
'endRecordID': end
},
'numRecordsStored': len(self._recordsCache)
})
results = {
'isProcessing': False,
'recordLabels': []
}
ROWIDX = numpy.array(
self._knnclassifier.getParameter('categoryRecencyList'))
validIdx = numpy.where((ROWIDX >= start) & (ROWIDX < end))[0].tolist()
categories = self._knnclassifier.getCategoryList()
for idx in validIdx:
row = dict(
ROWID=int(ROWIDX[idx]),
labels=self._categoryToLabelList(categories[idx]))
results['recordLabels'].append(row)
return results
|
Get the labels on classified points within range start to end. Not inclusive
of end.
:returns: (dict) with format:
::
{
'isProcessing': boolean,
'recordLabels': list of results
}
``isProcessing`` - currently always false as recalculation blocks; used if
reprocessing of records is still being performed;
Each item in ``recordLabels`` is of format:
::
{
'ROWID': id of the row,
'labels': list of strings
}
|
def to_representation(self, instance):
updated_course = copy.deepcopy(instance)
enterprise_customer_catalog = self.context['enterprise_customer_catalog']
updated_course['enrollment_url'] = enterprise_customer_catalog.get_course_enrollment_url(
updated_course['key']
)
for course_run in updated_course['course_runs']:
course_run['enrollment_url'] = enterprise_customer_catalog.get_course_run_enrollment_url(
course_run['key']
)
return updated_course
|
Return the updated course data dictionary.
Arguments:
instance (dict): The course data.
Returns:
dict: The updated course data.
|
def Match(pattern, s):
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
|
Matches the string with the pattern, caching the compiled regexp.
|
def end(self):
results = self.communicationChannel.receive()
if self.nruns != len(results):
import logging
logger = logging.getLogger(__name__)
logger.warning(
'too few results received: {} results received, {} expected'.format(
len(results),
self.nruns
))
return results
|
wait until all event loops end and returns the results.
|
def Negative(other_param, mode="invert", reroll_count_max=2):
return ForceSign(
other_param=other_param,
positive=False,
mode=mode,
reroll_count_max=reroll_count_max
)
|
Converts another parameter's results to negative values.
Parameters
----------
other_param : imgaug.parameters.StochasticParameter
Other parameter which's sampled values are to be
modified.
mode : {'invert', 'reroll'}, optional
How to change the signs. Valid values are ``invert`` and ``reroll``.
``invert`` means that wrong signs are simply flipped.
``reroll`` means that all samples with wrong signs are sampled again,
optionally many times, until they randomly end up having the correct
sign.
reroll_count_max : int, optional
If `mode` is set to ``reroll``, this determines how often values may
be rerolled before giving up and simply flipping the sign (as in
``mode="invert"``). This shouldn't be set too high, as rerolling is
expensive.
Examples
--------
>>> param = Negative(Normal(0, 1), mode="reroll")
Generates a normal distribution that has only negative values.
|
def IsErrorSuppressedByNolint(category, linenum):
return (_global_error_suppressions.get(category, False) or
linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
|
Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment or
global suppression.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.