code
stringlengths 59
4.4k
| docstring
stringlengths 5
7.69k
|
|---|---|
def missing_info(**kwargs) -> str:
func = kwargs.pop('func', 'unknown')
if 'ticker' in kwargs: kwargs['ticker'] = kwargs['ticker'].replace('/', '_')
info = utils.to_str(kwargs, fmt='{value}', sep='/')[1:-1]
return f'{func}/{info}'
|
Full infomation for missing query
|
def env():
if cij.ssh.env():
cij.err("cij.lnvm.env: invalid SSH environment")
return 1
lnvm = cij.env_to_dict(PREFIX, REQUIRED)
nvme = cij.env_to_dict("NVME", ["DEV_NAME"])
if "BGN" not in lnvm.keys():
cij.err("cij.lnvm.env: invalid LNVM_BGN")
return 1
if "END" not in lnvm.keys():
cij.err("cij.lnvm.env: invalid LNVM_END")
return 1
if "DEV_TYPE" not in lnvm.keys():
cij.err("cij.lnvm.env: invalid LNVM_DEV_TYPE")
return 1
lnvm["DEV_NAME"] = "%sb%03de%03d" % (nvme["DEV_NAME"], int(lnvm["BGN"]), int(lnvm["END"]))
lnvm["DEV_PATH"] = "/dev/%s" % lnvm["DEV_NAME"]
cij.env_export(PREFIX, EXPORTED, lnvm)
return 0
|
Verify LNVM variables and construct exported variables
|
def _sort(self, concepts, sort=None, language='any', reverse=False):
sorted = copy.copy(concepts)
if sort:
sorted.sort(key=methodcaller('_sortkey', sort, language), reverse=reverse)
return sorted
|
Returns a sorted version of a list of concepts. Will leave the original
list unsorted.
:param list concepts: A list of concepts and collections.
:param string sort: What to sort on: `id`, `label` or `sortlabel`
:param string language: Language to use when sorting on `label` or
`sortlabel`.
:param boolean reverse: Reverse the sort order?
:rtype: list
|
def _CCompiler_spawn_silent(cmd, dry_run=None):
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
if proc.returncode:
raise DistutilsExecError(err)
|
Spawn a process, and eat the stdio.
|
def hue(self, img1, img2):
import colorsys
p1 = list(img1.getdata())
p2 = list(img2.getdata())
for i in range(len(p1)):
r1, g1, b1, a1 = p1[i]
r1 = r1 / 255.0
g1 = g1 / 255.0
b1 = b1 / 255.0
h1, s1, v1 = colorsys.rgb_to_hsv(r1, g1, b1)
r2, g2, b2, a2 = p2[i]
r2 = r2 / 255.0
g2 = g2 / 255.0
b2 = b2 / 255.0
h2, s2, v2 = colorsys.rgb_to_hsv(r2, g2, b2)
r3, g3, b3 = colorsys.hsv_to_rgb(h2, s1, v1)
r3 = int(r3*255)
g3 = int(g3*255)
b3 = int(b3*255)
p1[i] = (r3, g3, b3, a1)
img = Image.new("RGBA", img1.size, 255)
img.putdata(p1)
return img
|
Applies the hue blend mode.
Hues image img1 with image img2.
The hue filter replaces the hues of pixels in img1
with the hues of pixels in img2.
Returns a composite image with the alpha channel retained.
|
def adsSyncWriteControlReqEx(
port, address, ads_state, device_state, data, plc_data_type
):
sync_write_control_request = _adsDLL.AdsSyncWriteControlReqEx
ams_address_pointer = ctypes.pointer(address.amsAddrStruct())
ads_state_c = ctypes.c_ulong(ads_state)
device_state_c = ctypes.c_ulong(device_state)
if plc_data_type == PLCTYPE_STRING:
data = ctypes.c_char_p(data.encode("utf-8"))
data_pointer = data
data_length = len(data_pointer.value) + 1
else:
data = plc_data_type(data)
data_pointer = ctypes.pointer(data)
data_length = ctypes.sizeof(data)
error_code = sync_write_control_request(
port,
ams_address_pointer,
ads_state_c,
device_state_c,
data_length,
data_pointer,
)
if error_code:
raise ADSError(error_code)
|
Change the ADS state and the machine-state of the ADS-server.
:param int port: local AMS port as returned by adsPortOpenEx()
:param pyads.structs.AmsAddr adr: local or remote AmsAddr
:param int ads_state: new ADS-state, according to ADSTATE constants
:param int device_state: new machine-state
:param data: additional data
:param int plc_data_type: plc datatype, according to PLCTYPE constants
|
def size_attachments(self):
total_size = 0
for attachment in self.fs_cleansed_attachments:
total_size += stat(attachment).st_size
return total_size
|
returns the number of bytes that the cleansed attachments take up on disk
|
def progress(length, **kwargs):
quiet = False
progress_class = kwargs.pop("progress_class", Progress)
kwargs["write_method"] = istdout.info
kwargs["width"] = kwargs.get("width", globals()["WIDTH"])
kwargs["length"] = length
pbar = progress_class(**kwargs)
pbar.update(0)
yield pbar
pbar.update(length)
br()
|
display a progress that can update in place
example --
total_length = 1000
with echo.progress(total_length) as p:
for x in range(total_length):
# do something crazy
p.update(x)
length -- int -- the total size of what you will be updating progress on
|
def _validate_license(model):
license_mapping = obtain_licenses()
try:
license_url = model.metadata['license_url']
except KeyError:
raise exceptions.MissingRequiredMetadata('license_url')
try:
license = license_mapping[license_url]
except KeyError:
raise exceptions.InvalidLicense(license_url)
if not license['is_valid_for_publication']:
raise exceptions.InvalidLicense(license_url)
|
Given the model, check the license is one valid for publication.
|
def tetrad(clr, angle=90):
clr = color(clr)
colors = colorlist(clr)
c = clr.rotate_ryb(angle)
if clr.brightness < 0.5:
c.brightness += 0.2
else:
c.brightness -= -0.2
colors.append(c)
c = clr.rotate_ryb(angle * 2)
if clr.brightness < 0.5:
c.brightness += 0.1
else:
c.brightness -= -0.1
colors.append(c)
colors.append(clr.rotate_ryb(angle * 3).lighten(0.1))
return colors
|
Returns a tetrad of colors.
The tetrad is made up of this color and three other colors
that together make up a cross on the artistic color wheel.
|
def setup(self, pin, mode):
self._setup_pin(pin, mode)
self.mpsse_write_gpio()
|
Set the input or output mode for a specified pin. Mode should be
either OUT or IN.
|
def _create_or_reuse_folder(local_folder, parent_folder_id,
reuse_existing=False):
local_folder_name = os.path.basename(local_folder)
folder_id = None
if reuse_existing:
children = session.communicator.folder_children(
session.token, parent_folder_id)
folders = children['folders']
for folder in folders:
if folder['name'] == local_folder_name:
folder_id = folder['folder_id']
break
if folder_id is None:
new_folder = session.communicator.create_folder(session.token,
local_folder_name,
parent_folder_id)
folder_id = new_folder['folder_id']
return folder_id
|
Create a folder from the local file in the midas folder corresponding to
the parent folder id.
:param local_folder: full path to a directory on the local file system
:type local_folder: string
:param parent_folder_id: id of parent folder on the Midas Server instance,
where the folder will be added
:type parent_folder_id: int | long
:param reuse_existing: (optional) whether to accept an existing folder of
the same name in the same location, or create a new one instead
:type reuse_existing: bool
|
def handler(self, conn, *args):
self.shell.stdout.write(self.shell.prompt)
line = self.shell.stdin.readline()
if not len(line):
line = 'EOF'
return False
else:
line = line.rstrip('\r\n')
line = self.shell.precmd(line)
stop = self.shell.onecmd(line)
stop = self.shell.postcmd(stop, line)
self.shell.stdout.flush()
self.shell.postloop()
if stop:
self.shell = None
conn.close()
return not stop
|
Asynchronous connection handler. Processes each line from the socket.
|
def icc_img_to_zscore(icc, center_image=False):
vol = read_img(icc).get_data()
v2 = vol[vol != 0]
if center_image:
v2 = detrend(v2, axis=0)
vstd = np.linalg.norm(v2, ord=2) / np.sqrt(np.prod(v2.shape) - 1)
eps = np.finfo(vstd.dtype).eps
vol /= (eps + vstd)
return vol
|
Return a z-scored version of `icc`.
This function is based on GIFT `icatb_convertImageToZScores` function.
|
def xformers(sig):
return \
[(_wrapper(f), l) for (f, l) in \
_XFORMER.PARSER.parseString(sig, parseAll=True)]
|
Get the list of xformer functions for the given signature.
:param str sig: a signature
:returns: a list of xformer functions for the given signature.
:rtype: list of tuple of a function * str
Each function catches all TypeErrors it encounters and raises
corresponding IntoDPValueError exceptions.
|
def get_all_values(self, constraints, expression, maxcnt=None, silent=False):
if not isinstance(expression, Expression):
return [expression]
assert isinstance(constraints, ConstraintSet)
assert isinstance(expression, Expression)
expression = simplify(expression)
if maxcnt is None:
maxcnt = consts.maxsolutions
with constraints as temp_cs:
if isinstance(expression, Bool):
var = temp_cs.new_bool()
elif isinstance(expression, BitVec):
var = temp_cs.new_bitvec(expression.size)
elif isinstance(expression, Array):
var = temp_cs.new_array(index_max=expression.index_max, value_bits=expression.value_bits, taint=expression.taint).array
else:
raise NotImplementedError(f"get_all_values only implemented for {type(expression)} expression type.")
temp_cs.add(var == expression)
self._reset(temp_cs.to_string(related_to=var))
result = []
while self._is_sat():
value = self._getvalue(var)
result.append(value)
self._assert(var != value)
if len(result) >= maxcnt:
if silent:
break
else:
raise TooManySolutions(result)
return result
|
Returns a list with all the possible values for the symbol x
|
def prompt(question, choices=None):
if not re.match("\s$", question):
question = "{}: ".format(question)
while True:
if sys.version_info[0] > 2:
answer = input(question)
else:
answer = raw_input(question)
if not choices or answer in choices:
break
return answer
|
echo a prompt to the user and wait for an answer
question -- string -- the prompt for the user
choices -- list -- if given, only exit when prompt matches one of the choices
return -- string -- the answer that was given by the user
|
def function_namespace_inclusion_builder(func: str, namespace: Strings) -> NodePredicate:
if isinstance(namespace, str):
def function_namespaces_filter(_: BELGraph, node: BaseEntity) -> bool:
if func != node[FUNCTION]:
return False
return NAMESPACE in node and node[NAMESPACE] == namespace
elif isinstance(namespace, Iterable):
namespaces = set(namespace)
def function_namespaces_filter(_: BELGraph, node: BaseEntity) -> bool:
if func != node[FUNCTION]:
return False
return NAMESPACE in node and node[NAMESPACE] in namespaces
else:
raise ValueError('Invalid type for argument: {}'.format(namespace))
return function_namespaces_filter
|
Build a filter function for matching the given BEL function with the given namespace or namespaces.
:param func: A BEL function
:param namespace: The namespace to search by
|
def get_attached_devices_2(self):
_LOGGER.info("Get attached devices 2")
success, response = self._make_request(SERVICE_DEVICE_INFO,
"GetAttachDevice2")
if not success:
return None
success, devices_node = _find_node(
response.text,
".//GetAttachDevice2Response/NewAttachDevice")
if not success:
return None
xml_devices = devices_node.findall("Device")
devices = []
for d in xml_devices:
ip = _xml_get(d, 'IP')
name = _xml_get(d, 'Name')
mac = _xml_get(d, 'MAC')
signal = _convert(_xml_get(d, 'SignalStrength'), int)
link_type = _xml_get(d, 'ConnectionType')
link_rate = _xml_get(d, 'Linkspeed')
allow_or_block = _xml_get(d, 'AllowOrBlock')
device_type = _convert(_xml_get(d, 'DeviceType'), int)
device_model = _xml_get(d, 'DeviceModel')
ssid = _xml_get(d, 'SSID')
conn_ap_mac = _xml_get(d, 'ConnAPMAC')
devices.append(Device(name, ip, mac, link_type, signal, link_rate,
allow_or_block, device_type, device_model,
ssid, conn_ap_mac))
return devices
|
Return list of connected devices to the router with details.
This call is slower and probably heavier on the router load.
Returns None if error occurred.
|
def validateOpfJsonValue(value, opfJsonSchemaFilename):
jsonSchemaPath = os.path.join(os.path.dirname(__file__),
"jsonschema",
opfJsonSchemaFilename)
jsonhelpers.validate(value, schemaPath=jsonSchemaPath)
return
|
Validate a python object against an OPF json schema file
:param value: target python object to validate (typically a dictionary)
:param opfJsonSchemaFilename: (string) OPF json schema filename containing the
json schema object. (e.g., opfTaskControlSchema.json)
:raises: jsonhelpers.ValidationError when value fails json validation
|
def parse(self, path_to_xml=None):
if not path_to_xml:
if not self.path:
self.logger.error("No path defined!")
return
path_to_xml = self.path
root = self._clean_xml(path_to_xml)
if root.tag.lower() == 'collection':
tree = ET.ElementTree(root)
self.records = element_tree_collection_to_records(tree)
elif root.tag.lower() == 'record':
new_root = ET.Element('collection')
new_root.append(root)
tree = ET.ElementTree(new_root)
self.records = element_tree_collection_to_records(tree)
else:
header_subs = get_request_subfields(root)
records = root.find('ListRecords')
if records is None:
records = root.find('GetRecord')
if records is None:
raise ValueError("Cannot find ListRecords or GetRecord!")
tree = ET.ElementTree(records)
for record, is_deleted in element_tree_oai_records(tree, header_subs):
if is_deleted:
self.deleted_records.append(
self.create_deleted_record(record)
)
else:
self.records.append(record)
|
Parse an XML document and clean any namespaces.
|
def pylint_color(score):
score_cutoffs = (10, 9.5, 8.5, 7.5, 5)
for i in range(len(score_cutoffs)):
if score >= score_cutoffs[i]:
return BADGE_COLORS[i]
return BADGE_COLORS[-1]
|
Return Pylint badge color.
Parameters
----------
score : float
A Pylint score
Returns
-------
str
Badge color
|
def to_svg(self, zoom):
def on_dump():
knitting_pattern = self.patterns.at(0)
layout = GridLayout(knitting_pattern)
instruction_to_svg = default_instruction_svg_cache()
builder = SVGBuilder()
kp_to_svg = KnittingPatternToSVG(knitting_pattern, layout,
instruction_to_svg, builder, zoom)
return kp_to_svg.build_SVG_dict()
return XMLDumper(on_dump)
|
Create an SVG from the knitting pattern set.
:param float zoom: the height and width of a knit instruction
:return: a dumper to save the svg to
:rtype: knittingpattern.Dumper.XMLDumper
Example:
.. code:: python
>>> knitting_pattern_set.to_svg(25).temporary_path(".svg")
"/the/path/to/the/file.svg"
|
def download_file_from_google_drive(ID, destination):
def save_response_content(response, destination, chunk_size=32 * 1024):
total_size = int(response.headers.get('content-length', 0))
with open(destination, "wb") as f:
for chunk in tqdm(response.iter_content(chunk_size), total=total_size, unit='B', unit_scale=True,
desc=destination):
if chunk:
f.write(chunk)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={'id': ID}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': ID, 'confirm': token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
|
Download file from Google Drive.
See ``tl.files.load_celebA_dataset`` for example.
Parameters
--------------
ID : str
The driver ID.
destination : str
The destination for save file.
|
def schedules(self, schedules):
url = PATHS['UPDATE_SCHEDULES'] % self.id
data_format = "schedules[0][%s][]=%s&"
post_data = ""
for format_type, values in schedules.iteritems():
for value in values:
post_data += data_format % (format_type, value)
self.api.post(url=url, data=post_data)
|
Set the posting schedules for the specified social media profile.
|
def image_path(instance, filename):
filename, ext = os.path.splitext(filename.lower())
instance_id_hash = hashlib.md5(str(instance.id)).hexdigest()
filename_hash = ''.join(random.sample(hashlib.md5(filename.encode('utf-8')).hexdigest(), 8))
return '{}/{}{}'.format(instance_id_hash, filename_hash, ext)
|
Generates likely unique image path using md5 hashes
|
def convert_text(content, from_fmt, to_fmt, deparagraph=False, mathjax=False,
smart=True, extra_args=None):
logger = logging.getLogger(__name__)
if extra_args is not None:
extra_args = list(extra_args)
else:
extra_args = []
if mathjax:
extra_args.append('--mathjax')
if smart:
extra_args.append('--smart')
if deparagraph:
extra_args.append('--filter=lsstprojectmeta-deparagraph')
extra_args.append('--wrap=none')
extra_args = set(extra_args)
logger.debug('Running pandoc from %s to %s with extra_args %s',
from_fmt, to_fmt, extra_args)
output = pypandoc.convert_text(content, to_fmt, format=from_fmt,
extra_args=extra_args)
return output
|
Convert text from one markup format to another using pandoc.
This function is a thin wrapper around `pypandoc.convert_text`.
Parameters
----------
content : `str`
Original content.
from_fmt : `str`
Format of the original ``content``. Format identifier must be one of
those known by Pandoc. See https://pandoc.org/MANUAL.html for details.
to_fmt : `str`
Output format for the content.
deparagraph : `bool`, optional
If `True`, then the
`lsstprojectmeta.pandoc.filters.deparagraph.deparagraph` filter is
used to remove paragraph (``<p>``, for example) tags around a single
paragraph of content. That filter does not affect content that
consists of multiple blocks (several paragraphs, or lists, for
example). Default is `False`.
For example, **without** this filter Pandoc will convert
the string ``"Title text"`` to ``"<p>Title text</p>"`` in HTML. The
paragraph tags aren't useful if you intend to wrap the converted
content in different tags, like ``<h1>``, using your own templating
system.
**With** this filter, Pandoc will convert the string ``"Title text"``
to ``"Title text"`` in HTML.
mathjax : `bool`, optional
If `True` then Pandoc will markup output content to work with MathJax.
Default is False.
smart : `bool`, optional
If `True` (default) then ascii characters will be converted to unicode
characters like smart quotes and em dashes.
extra_args : `list`, optional
Sequence of Pandoc arguments command line arguments (such as
``'--normalize'``). The ``deparagraph``, ``mathjax``, and ``smart``
arguments are convenience arguments that are equivalent to items
in ``extra_args``.
Returns
-------
output : `str`
Content in the output (``to_fmt``) format.
Notes
-----
This function will automatically install Pandoc if it is not available.
See `ensure_pandoc`.
|
def add_subtract(st, max_iter=7, max_npart='calc', max_mem=2e8,
always_check_remove=False, **kwargs):
if max_npart == 'calc':
max_npart = 0.05 * st.obj_get_positions().shape[0]
total_changed = 0
_change_since_opt = 0
removed_poses = []
added_poses0 = []
added_poses = []
nr = 1
for _ in range(max_iter):
if (nr != 0) or (always_check_remove):
nr, rposes = remove_bad_particles(st, **kwargs)
na, aposes = add_missing_particles(st, **kwargs)
current_changed = na + nr
removed_poses.extend(rposes)
added_poses0.extend(aposes)
total_changed += current_changed
_change_since_opt += current_changed
if current_changed == 0:
break
elif _change_since_opt > max_npart:
_change_since_opt *= 0
CLOG.info('Start add_subtract optimization.')
opt.do_levmarq(st, opt.name_globals(st, remove_params=st.get(
'psf').params), max_iter=1, run_length=4, num_eig_dirs=3,
max_mem=max_mem, eig_update_frequency=2, rz_order=0,
use_accel=True)
CLOG.info('After optimization:\t{:.6}'.format(st.error))
for p in added_poses0:
i = st.obj_closest_particle(p)
opt.do_levmarq_particles(st, np.array([i]), max_iter=2, damping=0.3)
added_poses.append(st.obj_get_positions()[i])
return total_changed, np.array(removed_poses), np.array(added_poses)
|
Automatically adds and subtracts missing & extra particles.
Operates by removing bad particles then adding missing particles on
repeat, until either no particles are added/removed or after `max_iter`
attempts.
Parameters
----------
st: :class:`peri.states.State`
The state to add and subtract particles to.
max_iter : Int, optional
The maximum number of add-subtract loops to use. Default is 7.
Terminates after either max_iter loops or when nothing has changed.
max_npart : Int or 'calc', optional
The maximum number of particles to add before optimizing the non-psf
globals. Default is ``'calc'``, which uses 5% of the initial number
of particles.
max_mem : Int, optional
The maximum memory to use for optimization after adding max_npart
particles. Default is 2e8.
always_check_remove : Bool, optional
Set to True to always check whether to remove particles. If ``False``,
only checks for removal while particles were removed on the previous
attempt. Default is False.
Other Parameters
----------------
invert : Bool, optional
``True`` if the particles are dark on a bright background, ``False``
if they are bright on a dark background. Default is ``True``.
min_rad : Float, optional
Particles with radius below ``min_rad`` are automatically deleted.
Default is ``'calc'`` = median rad - 25* radius std.
max_rad : Float, optional
Particles with radius above ``max_rad`` are automatically deleted.
Default is ``'calc'`` = median rad + 15* radius std, but you should
change this for your particle sizes.
min_edge_dist : Float, optional
Particles closer to the edge of the padded image than this are
automatically deleted. Default is 2.0.
check_rad_cutoff : 2-element float list.
Particles with ``radii < check_rad_cutoff[0]`` or ``> check...[1]``
are checked if they should be deleted (not automatic). Default is
``[3.5, 15]``.
check_outside_im : Bool, optional
Set to True to check whether to delete particles whose positions are
outside the un-padded image.
rad : Float, optional
The initial radius for added particles; added particles radii are
not fit until the end of ``add_subtract``. Default is ``'calc'``,
which uses the median radii of active particles.
tries : Int, optional
The number of particles to attempt to remove or add, per iteration.
Default is 50.
im_change_frac : Float, optional
How good the change in error needs to be relative to the change in
the difference image. Default is 0.2; i.e. if the error does not
decrease by 20% of the change in the difference image, do not add
the particle.
min_derr : Float, optional
The minimum change in the state's error to keep a particle in the
image. Default is ``'3sig'`` which uses ``3*st.sigma``.
do_opt : Bool, optional
Set to False to avoid optimizing particle positions after adding.
minmass : Float, optional
The minimum mass for a particle to be identified as a feature,
as used by trackpy. Defaults to a decent guess.
use_tp : Bool, optional
Set to True to use trackpy to find missing particles inside the
image. Not recommended since trackpy deliberately cuts out particles
at the edge of the image. Default is ``False``.
Returns
-------
total_changed : Int
The total number of adds and subtracts done on the data. Not the
same as ``changed_inds.size`` since the same particle or particle
index can be added/subtracted multiple times.
added_positions : [N_added,3] numpy.ndarray
The positions of particles that have been added at any point in the
add-subtract cycle.
removed_positions : [N_added,3] numpy.ndarray
The positions of particles that have been removed at any point in
the add-subtract cycle.
Notes
------
Occasionally after the intial featuring a cluster of particles is
featured as 1 big particle. To fix these mistakes, it helps to set
max_rad to a physical value. This removes the big particle and allows
it to be re-featured by (several passes of) the adds.
The added/removed positions returned are whether or not the position
has been added or removed ever. It's possible that a position is
added, then removed during a later iteration.
|
def GetTopLevelContainingType(self):
desc = self
while desc.containing_type is not None:
desc = desc.containing_type
return desc
|
Returns the root if this is a nested type, or itself if its the root.
|
def get_network(context, id, fields=None):
LOG.info("get_network %s for tenant %s fields %s" %
(id, context.tenant_id, fields))
network = db_api.network_find(context=context, limit=None, sorts=['id'],
marker=None, page_reverse=False,
id=id, join_subnets=True, scope=db_api.ONE)
if not network:
raise n_exc.NetworkNotFound(net_id=id)
return v._make_network_dict(network, fields=fields)
|
Retrieve a network.
: param context: neutron api request context
: param id: UUID representing the network to fetch.
: param fields: a list of strings that are valid keys in a
network dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
|
def contains_content_items(self, request, pk, course_run_ids, program_uuids):
enterprise_customer_catalog = self.get_object()
course_run_ids = [unquote(quote_plus(course_run_id)) for course_run_id in course_run_ids]
contains_content_items = True
if course_run_ids:
contains_content_items = enterprise_customer_catalog.contains_courses(course_run_ids)
if program_uuids:
contains_content_items = (
contains_content_items and
enterprise_customer_catalog.contains_programs(program_uuids)
)
return Response({'contains_content_items': contains_content_items})
|
Return whether or not the EnterpriseCustomerCatalog contains the specified content.
Multiple course_run_ids and/or program_uuids query parameters can be sent to this view to check
for their existence in the EnterpriseCustomerCatalog. At least one course run key
or program UUID value must be included in the request.
|
def copy(self, clr=None, d=0.0):
cr = ColorRange()
cr.name = self.name
cr.h = deepcopy(self.h)
cr.s = deepcopy(self.s)
cr.b = deepcopy(self.b)
cr.a = deepcopy(self.a)
cr.grayscale = self.grayscale
if not self.grayscale:
cr.black = self.black.copy()
cr.white = self.white.copy()
if clr != None:
cr.h, cr.a = clr.h + d * (random() * 2 - 1), clr.a
return cr
|
Returns a copy of the range.
Optionally, supply a color to get a range copy
limited to the hue of that color.
|
def clickMouseButtonRightWithMods(self, coord, modifiers):
modFlags = self._pressModifiers(modifiers)
self._queueMouseButton(coord, Quartz.kCGMouseButtonRight, modFlags)
self._releaseModifiers(modifiers)
self._postQueuedEvents()
|
Click the right mouse button with modifiers pressed.
Parameters: coordinates to click; modifiers (list)
Returns: None
|
def sim_typo(
src, tar, metric='euclidean', cost=(1, 1, 0.5, 0.5), layout='QWERTY'
):
return Typo().sim(src, tar, metric, cost, layout)
|
Return the normalized typo similarity between two strings.
This is a wrapper for :py:meth:`Typo.sim`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
metric : str
Supported values include: ``euclidean``, ``manhattan``,
``log-euclidean``, and ``log-manhattan``
cost : tuple
A 4-tuple representing the cost of the four possible edits: inserts,
deletes, substitutions, and shift, respectively (by default:
(1, 1, 0.5, 0.5)) The substitution & shift costs should be
significantly less than the cost of an insertion & deletion unless a
log metric is used.
layout : str
Name of the keyboard layout to use (Currently supported:
``QWERTY``, ``Dvorak``, ``AZERTY``, ``QWERTZ``)
Returns
-------
float
Normalized typo similarity
Examples
--------
>>> round(sim_typo('cat', 'hat'), 12)
0.472953716914
>>> round(sim_typo('Niall', 'Neil'), 12)
0.434971857071
>>> round(sim_typo('Colin', 'Cuilen'), 12)
0.430964390437
>>> sim_typo('ATCG', 'TAGC')
0.375
|
def _user_headers(self, headers=None):
h = self.copy()
if headers is not None:
keys = set(headers.keys())
if h.get('Authorization', False):
keys -= {'Authorization'}
for key in keys:
h[key] = headers[key]
return h
|
Make sure the user doesn't override the Authorization header
|
def create_vocab(sentences, word_counts_output_file, min_word_count=1):
tl.logging.info("Creating vocabulary.")
counter = Counter()
for c in sentences:
counter.update(c)
tl.logging.info(" Total words: %d" % len(counter))
word_counts = [x for x in counter.items() if x[1] >= min_word_count]
word_counts.sort(key=lambda x: x[1], reverse=True)
word_counts = [("<PAD>", 0)] + word_counts
tl.logging.info(" Words in vocabulary: %d" % len(word_counts))
with tf.gfile.FastGFile(word_counts_output_file, "w") as f:
f.write("\n".join(["%s %d" % (w, c) for w, c in word_counts]))
tl.logging.info(" Wrote vocabulary file: %s" % word_counts_output_file)
reverse_vocab = [x[0] for x in word_counts]
unk_id = len(reverse_vocab)
vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])
vocab = SimpleVocabulary(vocab_dict, unk_id)
return vocab
|
Creates the vocabulary of word to word_id.
See ``tutorial_tfrecord3.py``.
The vocabulary is saved to disk in a text file of word counts. The id of each
word in the file is its corresponding 0-based line number.
Parameters
------------
sentences : list of list of str
All sentences for creating the vocabulary.
word_counts_output_file : str
The file name.
min_word_count : int
Minimum number of occurrences for a word.
Returns
--------
:class:`SimpleVocabulary`
The simple vocabulary object, see :class:`Vocabulary` for more.
Examples
--------
Pre-process sentences
>>> captions = ["one two , three", "four five five"]
>>> processed_capts = []
>>> for c in captions:
>>> c = tl.nlp.process_sentence(c, start_word="<S>", end_word="</S>")
>>> processed_capts.append(c)
>>> print(processed_capts)
...[['<S>', 'one', 'two', ',', 'three', '</S>'], ['<S>', 'four', 'five', 'five', '</S>']]
Create vocabulary
>>> tl.nlp.create_vocab(processed_capts, word_counts_output_file='vocab.txt', min_word_count=1)
Creating vocabulary.
Total words: 8
Words in vocabulary: 8
Wrote vocabulary file: vocab.txt
Get vocabulary object
>>> vocab = tl.nlp.Vocabulary('vocab.txt', start_word="<S>", end_word="</S>", unk_word="<UNK>")
INFO:tensorflow:Initializing vocabulary from file: vocab.txt
[TL] Vocabulary from vocab.txt : <S> </S> <UNK>
vocabulary with 10 words (includes start_word, end_word, unk_word)
start_id: 2
end_id: 3
unk_id: 9
pad_id: 0
|
def getTableOfContents(self):
self.directory_size = self.getDirectorySize()
if self.directory_size > 65536:
self.directory_size += 2
self.requestContentDirectory()
directory_start = unpack("i", self.raw_bytes[self.directory_end + 16: self.directory_end + 20])[0]
self.raw_bytes = self.raw_bytes
current_start = directory_start - self.start
filestart = 0
compressedsize = 0
tableOfContents = []
try:
while True:
zip_n = unpack("H", self.raw_bytes[current_start + 28: current_start + 28 + 2])[0]
zip_m = unpack("H", self.raw_bytes[current_start + 30: current_start + 30 + 2])[0]
zip_k = unpack("H", self.raw_bytes[current_start + 32: current_start + 32 + 2])[0]
filename = self.raw_bytes[current_start + 46: current_start + 46 + zip_n]
filestart = unpack("I", self.raw_bytes[current_start + 42: current_start + 42 + 4])[0]
compressedsize = unpack("I", self.raw_bytes[current_start + 20: current_start + 20 + 4])[0]
uncompressedsize = unpack("I", self.raw_bytes[current_start + 24: current_start + 24 + 4])[0]
tableItem = {
'filename': filename,
'compressedsize': compressedsize,
'uncompressedsize': uncompressedsize,
'filestart': filestart
}
tableOfContents.append(tableItem)
current_start = current_start + 46 + zip_n + zip_m + zip_k
except:
pass
self.tableOfContents = tableOfContents
return tableOfContents
|
This function populates the internal tableOfContents list with the contents
of the zip file TOC. If the server does not support ranged requests, this will raise
and exception. It will also throw an exception if the TOC cannot be found.
|
def validate_response(expected_responses):
def internal_decorator(function):
@wraps(function)
async def wrapper(*args, **kwargs):
response = await function(*args, **kwargs)
for expected_response in expected_responses:
if response.startswith(expected_response):
return response
raise QRTCommandException(
"Expected %s but got %s" % (expected_responses, response)
)
return wrapper
return internal_decorator
|
Decorator to validate responses from QTM
|
def html_to_text(cls, html):
s = cls()
s.feed(html)
unescaped_data = s.unescape(s.get_data())
return escape_for_xml(unescaped_data, tags_to_keep=s.mathml_elements)
|
Return stripped HTML, keeping only MathML.
|
def create_milestone_payment(session, project_id, bidder_id, amount,
reason, description):
milestone_data = {
'project_id': project_id,
'bidder_id': bidder_id,
'amount': amount,
'reason': reason,
'description': description
}
response = make_post_request(session, 'milestones',
json_data=milestone_data)
json_data = response.json()
if response.status_code == 200:
milestone_data = json_data['result']
return Milestone(milestone_data)
else:
raise MilestoneNotCreatedException(message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id'])
|
Create a milestone payment
|
def calc_pts_hg(npts=20):
pts_hg, wts_hg = np.polynomial.hermite.hermgauss(npts*2)
pts_hg = pts_hg[npts:]
wts_hg = wts_hg[npts:] * np.exp(pts_hg*pts_hg)
return pts_hg, wts_hg
|
Returns Hermite-Gauss quadrature points for even functions
|
def save_image(tensor, filename, nrow=8, padding=2, pad_value=0):
from PIL import Image
grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value)
im = Image.fromarray(pre_pillow_float_img_process(grid))
im.save(filename)
|
Save a given Tensor into an image file.
Args:
tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,
saves the tensor as a grid of images by calling ``make_grid``.
**kwargs: Other arguments are documented in ``make_grid``.
|
def authorization_url(self, url, state=None, **kwargs):
state = state or self.new_state()
return (
self._client.prepare_request_uri(
url,
redirect_uri=self.redirect_uri,
scope=self.scope,
state=state,
**kwargs
),
state,
)
|
Form an authorization URL.
:param url: Authorization endpoint url, must be HTTPS.
:param state: An optional state string for CSRF protection. If not
given it will be generated for you.
:param kwargs: Extra parameters to include.
:return: authorization_url, state
|
def filter_objlist(olist, fieldname, fieldval):
return [x for x in olist if getattr(x, fieldname) == fieldval]
|
Returns a list with of the objects in olist that have a fieldname valued as fieldval
Parameters
----------
olist: list of objects
fieldname: string
fieldval: anything
Returns
-------
list of objets
|
def set_search_enviroment(cls, **kwargs):
initializer = _load_class(getattr(settings, "SEARCH_INITIALIZER", None), cls)()
return initializer.initialize(**kwargs)
|
Called from within search handler
Finds desired subclass and calls initialize method
|
def get_ISBNs(self):
invalid_isbns = set(self.get_invalid_ISBNs())
valid_isbns = [
self._clean_isbn(isbn)
for isbn in self["020a"]
if self._clean_isbn(isbn) not in invalid_isbns
]
if valid_isbns:
return valid_isbns
return [
self._clean_isbn(isbn)
for isbn in self["901i"]
]
|
Get list of VALID ISBN.
Returns:
list: List with *valid* ISBN strings.
|
def _get_param_names(cls):
init = cls.__init__
args, varargs = inspect.getargspec(init)[:2]
if varargs is not None:
raise RuntimeError('BaseTransformer objects cannot have varargs')
args.pop(0)
args.sort()
return args
|
Get the list of parameter names for the object
|
def execute_reliabledictionary(client, application_name, service_name, input_file):
cluster = Cluster.from_sfclient(client)
service = cluster.get_application(application_name).get_service(service_name)
with open(input_file) as json_file:
json_data = json.load(json_file)
service.execute(json_data)
return
|
Execute create, update, delete operations on existing reliable dictionaries.
carry out create, update and delete operations on existing reliable dictionaries for given application and service.
:param application_name: Name of the application.
:type application_name: str
:param service_name: Name of the service.
:type service_name: str
:param output_file: input file with list of json to provide the operation information for reliable dictionaries.
|
def configure_modsecurity(self):
r = self.local_renderer
if r.env.modsecurity_enabled and not self.last_manifest.modsecurity_enabled:
self.install_packages()
fn = self.render_to_file('apache/apache_modsecurity.template.conf')
r.put(local_path=fn, remote_path='/etc/modsecurity/modsecurity.conf', use_sudo=True)
r.env.modsecurity_download_filename = '/tmp/owasp-modsecurity-crs.tar.gz'
r.sudo('cd /tmp; wget --output-document={apache_modsecurity_download_filename} {apache_modsecurity_download_url}')
r.env.modsecurity_download_top = r.sudo(
"cd /tmp; "
"tar tzf %(apache_modsecurity_download_filename)s | sed -e 's@/.*@@' | uniq" % self.genv)
r.sudo('cd /tmp; tar -zxvf %(apache_modsecurity_download_filename)s' % self.genv)
r.sudo('cd /tmp; cp -R %(apache_modsecurity_download_top)s/* /etc/modsecurity/' % self.genv)
r.sudo('mv /etc/modsecurity/modsecurity_crs_10_setup.conf.example /etc/modsecurity/modsecurity_crs_10_setup.conf')
r.sudo('rm -f /etc/modsecurity/activated_rules/*')
r.sudo('cd /etc/modsecurity/base_rules; '
'for f in * ; do ln -s /etc/modsecurity/base_rules/$f /etc/modsecurity/activated_rules/$f ; done')
r.sudo('cd /etc/modsecurity/optional_rules; '
'for f in * ; do ln -s /etc/modsecurity/optional_rules/$f /etc/modsecurity/activated_rules/$f ; done')
r.env.httpd_conf_append.append('Include "/etc/modsecurity/activated_rules/*.conf"')
self.enable_mod('evasive')
self.enable_mod('headers')
elif not self.env.modsecurity_enabled and self.last_manifest.modsecurity_enabled:
self.disable_mod('modsecurity')
|
Installs the mod-security Apache module.
https://www.modsecurity.org
|
def get(self, ID, index='vector-web-s'):
url = self.get_url % index
r = self.gbdx_connection.get(url + ID)
r.raise_for_status()
return r.json()
|
Retrieves a vector. Not usually necessary because searching is the best way to find & get stuff.
Args:
ID (str): ID of the vector object
index (str): Optional. Index the object lives in. defaults to 'vector-web-s'
Returns:
record (dict): A dict object identical to the json representation of the catalog record
|
def create(self, workflow_id, email_id, data):
self.workflow_id = workflow_id
self.email_id = email_id
if 'email_address' not in data:
raise KeyError('The automation email queue must have an email_address')
check_email(data['email_address'])
response = self._mc_client._post(
url=self._build_path(workflow_id, 'emails', email_id, 'queue'),
data=data
)
if response is not None:
self.subscriber_hash = response['id']
else:
self.subscriber_hash = None
return response
|
Manually add a subscriber to a workflow, bypassing the default trigger
settings. You can also use this endpoint to trigger a series of
automated emails in an API 3.0 workflow type or add subscribers to an
automated email queue that uses the API request delay type.
:param workflow_id: The unique id for the Automation workflow.
:type workflow_id: :py:class:`str`
:param email_id: The unique id for the Automation workflow email.
:type email_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"email_address": string*
}
|
def numpy_array_2d_from_fits(file_path, hdu):
hdu_list = fits.open(file_path)
return np.flipud(np.array(hdu_list[hdu].data))
|
Read a 2D NumPy array to a .fits file.
After loading the NumPy array, the array is flipped upside-down using np.flipud. This is so that the arrays \
appear the same orientation as .fits files loaded in DS9.
Parameters
----------
file_path : str
The full path of the file that is loaded, including the file name and '.fits' extension.
hdu : int
The HDU extension of the array that is loaded from the .fits file.
Returns
-------
ndarray
The NumPy array that is loaded from the .fits file.
Examples
--------
array_2d = numpy_array_from_fits(file_path='/path/to/file/filename.fits', hdu=0)
|
def search(self, searchAreaWkt=None, filters=None, startDate=None, endDate=None, types=None):
if not types:
types = ['Acquisition']
if startDate:
startDateTime = datetime.datetime.strptime(startDate, '%Y-%m-%dT%H:%M:%S.%fZ')
if endDate:
endDateTime = datetime.datetime.strptime(endDate, '%Y-%m-%dT%H:%M:%S.%fZ')
if startDate and endDate:
diff = endDateTime - startDateTime
if diff.days < 0:
raise Exception("startDate must come before endDate.")
postdata = {
"searchAreaWkt": searchAreaWkt,
"types": types,
"startDate": startDate,
"endDate": endDate,
}
if filters:
postdata['filters'] = filters
if searchAreaWkt:
postdata['searchAreaWkt'] = searchAreaWkt
url = '%(base_url)s/search' % {
'base_url': self.base_url
}
headers = {'Content-Type':'application/json'}
r = self.gbdx_connection.post(url, headers=headers, data=json.dumps(postdata))
r.raise_for_status()
results = r.json()['results']
return results
|
Perform a catalog search
Args:
searchAreaWkt: WKT Polygon of area to search. Optional.
filters: Array of filters. Optional. Example:
[
"(sensorPlatformName = 'WORLDVIEW01' OR sensorPlatformName ='QUICKBIRD02')",
"cloudCover < 10",
"offNadirAngle < 10"
]
startDate: string. Optional. Example: "2004-01-01T00:00:00.000Z"
endDate: string. Optional. Example: "2004-01-01T00:00:00.000Z"
types: Array of types to search for. Optional. Example (and default): ["Acquisition"]
Returns:
catalog search resultset
|
def skip(course, num=1):
sel = None
try:
sel = Exercise.get_selected()
if sel.course.tid != course.tid:
sel = None
except NoExerciseSelected:
pass
if sel is None:
sel = course.exercises.first()
else:
try:
sel = Exercise.get(Exercise.id == sel.id + num)
except peewee.DoesNotExist:
print("There are no more exercises in this course.")
return False
sel.set_select()
list_all(single=sel)
|
Go to the next exercise.
|
def from_string(cls, key_pem, is_x509_cert):
key_pem = _helpers._to_bytes(key_pem)
if is_x509_cert:
der = rsa.pem.load_pem(key_pem, 'CERTIFICATE')
asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate())
if remaining != b'':
raise ValueError('Unused bytes', remaining)
cert_info = asn1_cert['tbsCertificate']['subjectPublicKeyInfo']
key_bytes = _bit_list_to_bytes(cert_info['subjectPublicKey'])
pubkey = rsa.PublicKey.load_pkcs1(key_bytes, 'DER')
else:
pubkey = rsa.PublicKey.load_pkcs1(key_pem, 'PEM')
return cls(pubkey)
|
Construct an RsaVerifier instance from a string.
Args:
key_pem: string, public key in PEM format.
is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it
is expected to be an RSA key in PEM format.
Returns:
RsaVerifier instance.
Raises:
ValueError: if the key_pem can't be parsed. In either case, error
will begin with 'No PEM start marker'. If
``is_x509_cert`` is True, will fail to find the
"-----BEGIN CERTIFICATE-----" error, otherwise fails
to find "-----BEGIN RSA PUBLIC KEY-----".
|
def matched_file_count(self, dataset_id, glob=".", is_dir=False):
list_result = self.list_files(dataset_id, glob, is_dir)
return len(list_result)
|
Returns the number of files matching a pattern in a dataset.
:param dataset_id: The ID of the dataset to search for files.
:type dataset_id: int
:param glob: A pattern which will be matched against files in the dataset.
:type glob: str
:param is_dir: A boolean indicating whether or not the pattern should match against the beginning of paths in the dataset.
:type is_dir: bool
:return: The number of matching files
:rtype: int
|
def get_object(cls, api_token):
acct = cls(token=api_token)
acct.load()
return acct
|
Class method that will return an Account object.
|
def parallel_starfeatures(lclist,
outdir,
lc_catalog_pickle,
neighbor_radius_arcsec,
maxobjects=None,
deredden=True,
custom_bandpasses=None,
lcformat='hat-sql',
lcformatdir=None,
nworkers=NCPUS):
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
if not os.path.exists(outdir):
os.makedirs(outdir)
if maxobjects:
lclist = lclist[:maxobjects]
with open(lc_catalog_pickle, 'rb') as infd:
kdt_dict = pickle.load(infd)
kdt = kdt_dict['kdtree']
objlist = kdt_dict['objects']['objectid']
objlcfl = kdt_dict['objects']['lcfname']
tasks = [(x, outdir, kdt, objlist, objlcfl,
neighbor_radius_arcsec,
deredden, custom_bandpasses, lcformat) for x in lclist]
with ProcessPoolExecutor(max_workers=nworkers) as executor:
resultfutures = executor.map(_starfeatures_worker, tasks)
results = [x for x in resultfutures]
resdict = {os.path.basename(x):y for (x,y) in zip(lclist, results)}
return resdict
|
This runs `get_starfeatures` in parallel for all light curves in `lclist`.
Parameters
----------
lclist : list of str
The list of light curve file names to process.
outdir : str
The output directory where the results will be placed.
lc_catalog_pickle : str
The path to a catalog containing at a dict with least:
- an object ID array accessible with `dict['objects']['objectid']`
- an LC filename array accessible with `dict['objects']['lcfname']`
- a `scipy.spatial.KDTree` or `cKDTree` object to use for finding
neighbors for each object accessible with `dict['kdtree']`
A catalog pickle of the form needed can be produced using
:py:func:`astrobase.lcproc.catalogs.make_lclist` or
:py:func:`astrobase.lcproc.catalogs.filter_lclist`.
neighbor_radius_arcsec : float
This indicates the radius in arcsec to search for neighbors for this
object using the light curve catalog's `kdtree`, `objlist`, `lcflist`,
and in GAIA.
maxobjects : int
The number of objects to process from `lclist`.
deredden : bool
This controls if the colors and any color classifications will be
dereddened using 2MASS DUST.
custom_bandpasses : dict or None
This is a dict used to define any custom bandpasses in the
`in_objectinfo` dict you want to make this function aware of and
generate colors for. Use the format below for this dict::
{
'<bandpass_key_1>':{'dustkey':'<twomass_dust_key_1>',
'label':'<band_label_1>'
'colors':[['<bandkey1>-<bandkey2>',
'<BAND1> - <BAND2>'],
['<bandkey3>-<bandkey4>',
'<BAND3> - <BAND4>']]},
.
...
.
'<bandpass_key_N>':{'dustkey':'<twomass_dust_key_N>',
'label':'<band_label_N>'
'colors':[['<bandkey1>-<bandkey2>',
'<BAND1> - <BAND2>'],
['<bandkey3>-<bandkey4>',
'<BAND3> - <BAND4>']]},
}
Where:
`bandpass_key` is a key to use to refer to this bandpass in the
`objectinfo` dict, e.g. 'sdssg' for SDSS g band
`twomass_dust_key` is the key to use in the 2MASS DUST result table for
reddening per band-pass. For example, given the following DUST result
table (using http://irsa.ipac.caltech.edu/applications/DUST/)::
|Filter_name|LamEff |A_over_E_B_V_SandF|A_SandF|A_over_E_B_V_SFD|A_SFD|
|char |float |float |float |float |float|
| |microns| |mags | |mags |
CTIO U 0.3734 4.107 0.209 4.968 0.253
CTIO B 0.4309 3.641 0.186 4.325 0.221
CTIO V 0.5517 2.682 0.137 3.240 0.165
.
.
...
The `twomass_dust_key` for 'vmag' would be 'CTIO V'. If you want to
skip DUST lookup and want to pass in a specific reddening magnitude
for your bandpass, use a float for the value of
`twomass_dust_key`. If you want to skip DUST lookup entirely for
this bandpass, use None for the value of `twomass_dust_key`.
`band_label` is the label to use for this bandpass, e.g. 'W1' for
WISE-1 band, 'u' for SDSS u, etc.
The 'colors' list contains color definitions for all colors you want
to generate using this bandpass. this list contains elements of the
form::
['<bandkey1>-<bandkey2>','<BAND1> - <BAND2>']
where the the first item is the bandpass keys making up this color,
and the second item is the label for this color to be used by the
frontends. An example::
['sdssu-sdssg','u - g']
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
nworkers : int
The number of parallel workers to launch.
Returns
-------
dict
A dict with key:val pairs of the input light curve filename and the
output star features pickle for each LC processed.
|
def status(self, pk=None, detail=False, **kwargs):
job = self.last_job_data(pk, **kwargs)
if detail:
return job
return {
'elapsed': job['elapsed'],
'failed': job['failed'],
'status': job['status'],
}
|
Print the status of the most recent update.
=====API DOCS=====
Print the status of the most recent update.
:param pk: Primary key of the resource to retrieve status from.
:type pk: int
:param detail: Flag that if set, return the full JSON of the job resource rather than a status summary.
:type detail: bool
:param `**kwargs`: Keyword arguments used to look up resource object to retrieve status from if ``pk``
is not provided.
:returns: full loaded JSON of the specified unified job if ``detail`` flag is on; trimed JSON containing
only "elapsed", "failed" and "status" fields of the unified job if ``detail`` flag is off.
:rtype: dict
=====API DOCS=====
|
def query_by_names(cls, names):
assert isinstance(names, list)
return cls.query.filter(cls.name.in_(names))
|
Query group by a list of group names.
:param list names: List of the group names.
:returns: Query object.
|
def read(cls, data,
protocol=None,
fallback_protocol=TBinaryProtocol,
finagle_thrift=False,
max_fields=MAX_FIELDS,
max_list_size=MAX_LIST_SIZE,
max_map_size=MAX_MAP_SIZE,
max_set_size=MAX_SET_SIZE,
read_values=False):
if len(data) < cls.MIN_MESSAGE_SIZE:
raise ValueError('not enough data')
if protocol is None:
protocol = cls.detect_protocol(data, fallback_protocol)
trans = TTransport.TMemoryBuffer(data)
proto = protocol(trans)
header = None
if finagle_thrift:
try:
header = ThriftStruct.read(
proto,
max_fields,
max_list_size,
max_map_size,
max_set_size,
read_values)
except:
trans = TTransport.TMemoryBuffer(data)
proto = protocol(trans)
method, mtype, seqid = proto.readMessageBegin()
mtype = cls.message_type_to_str(mtype)
if len(method) == 0 or method.isspace() or method.startswith(' '):
raise ValueError('no method name')
if len(method) > cls.MAX_METHOD_LENGTH:
raise ValueError('method name too long')
valid = range(33, 127)
if any(ord(char) not in valid for char in method):
raise ValueError('invalid method name' % method)
args = ThriftStruct.read(
proto,
max_fields,
max_list_size,
max_map_size,
max_set_size,
read_values)
proto.readMessageEnd()
msglen = trans._buffer.tell()
return cls(method, mtype, seqid, args, header, msglen), msglen
|
tries to deserialize a message, might fail if data is missing
|
def swap_buffers(self):
self.frames += 1
glfw.swap_buffers(self.window)
self.poll_events()
|
Swaps buffers, incement the framecounter and pull events.
|
def mousePressEvent(self, e):
super(PyInteractiveConsole, self).mousePressEvent(e)
cursor = self.cursorForPosition(e.pos())
p = cursor.positionInBlock()
usd = cursor.block().userData()
if usd and usd.start_pos_in_block <= p <= usd.end_pos_in_block:
if e.button() == QtCore.Qt.LeftButton:
self.open_file_requested.emit(usd.filename, usd.line)
|
Emits open_file_requested if the press event occured over
a file location string.
|
def write_document(document, out, validate=True):
messages = []
messages = document.validate(messages)
if validate and messages:
raise InvalidDocumentError(messages)
out.write('
write_value('SPDXVersion', str(document.version), out)
write_value('DataLicense', document.data_license.identifier, out)
write_value('DocumentName', document.name, out)
write_value('SPDXID', 'SPDXRef-DOCUMENT', out)
write_value('DocumentNamespace', document.namespace, out)
if document.has_comment:
write_text_value('DocumentComment', document.comment, out)
for doc_ref in document.ext_document_references:
doc_ref_str = ' '.join([doc_ref.external_document_id,
doc_ref.spdx_document_uri,
doc_ref.check_sum.identifier + ':' +
doc_ref.check_sum.value])
write_value('ExternalDocumentRef', doc_ref_str, out)
write_separators(out)
write_creation_info(document.creation_info, out)
write_separators(out)
for review in sorted(document.reviews):
write_review(review, out)
write_separators(out)
for annotation in sorted(document.annotations):
write_annotation(annotation, out)
write_separators(out)
write_package(document.package, out)
write_separators(out)
out.write('
for lic in sorted(document.extracted_licenses):
write_extracted_licenses(lic, out)
write_separators(out)
|
Write an SPDX tag value document.
- document - spdx.document instance.
- out - file like object that will be written to.
Optionally `validate` the document before writing and raise
InvalidDocumentError if document.validate returns False.
|
def nodes_by_eigenvalue(self, treshold=0.0):
nodes = [(n.eigenvalue, n) for n in self.nodes if n.eigenvalue > treshold]
nodes.sort(); nodes.reverse()
return [n for w, n in nodes]
|
Returns nodes sorted by eigenvector centrality.
Nodes with a lot of incoming traffic will be at the front of the list
|
def _deactivate(self):
self.cache.remove_fetcher(self)
if self.active:
self._deactivated()
|
Remove the fetcher from cache and mark it not active.
|
def store_all(self):
with h5py.File(self.database.input, 'a') as io5:
fillsets = io5["quartets"]
qiter = itertools.combinations(xrange(len(self.samples)), 4)
i = 0
while i < self.params.nquartets:
dat = np.array(list(itertools.islice(qiter, self._chunksize)))
end = min(self.params.nquartets, dat.shape[0]+i)
fillsets[i:end] = dat[:end-i]
i += self._chunksize
print(min(i, self.params.nquartets))
|
Populate array with all possible quartets. This allows us to
sample from the total, and also to continue from a checkpoint
|
def adapt_animation_layout(animation):
layout = animation.layout
required = getattr(animation, 'LAYOUT_CLASS', None)
if not required or isinstance(layout, required):
return
msg = LAYOUT_WARNING % (
type(animation).__name__, required.__name__, type(layout).__name__)
setter = layout.set
adaptor = None
if required is strip.Strip:
if isinstance(layout, matrix.Matrix):
width = layout.width
def adaptor(pixel, color=None):
y, x = divmod(pixel, width)
setter(x, y, color or BLACK)
elif isinstance(layout, cube.Cube):
lx, ly = layout.x, layout.y
def adaptor(pixel, color=None):
yz, x = divmod(pixel, lx)
z, y = divmod(yz, ly)
setter(x, y, z, color or BLACK)
elif isinstance(layout, circle.Circle):
def adaptor(pixel, color=None):
layout._set_base(pixel, color or BLACK)
elif required is matrix.Matrix:
if isinstance(layout, strip.Strip):
width = animation.width
def adaptor(x, y, color=None):
setter(x + y * width, color or BLACK)
if not adaptor:
raise ValueError(msg)
log.warning(msg)
animation.layout.set = adaptor
|
Adapt the setter in an animation's layout so that Strip animations can run
on on Matrix, Cube, or Circle layout, and Matrix or Cube animations can run
on a Strip layout.
|
def find_device(cls, timeout_sec=TIMEOUT_SEC):
return get_provider().find_device(service_uuids=cls.ADVERTISED, timeout_sec=timeout_sec)
|
Find the first available device that supports this service and return
it, or None if no device is found. Will wait for up to timeout_sec
seconds to find the device.
|
def viterbi_segment(text, P):
n = len(text)
words = [''] + list(text)
best = [1.0] + [0.0] * n
for i in range(n+1):
for j in range(0, i):
w = text[j:i]
if P[w] * best[i - len(w)] >= best[i]:
best[i] = P[w] * best[i - len(w)]
words[i] = w
sequence = []; i = len(words)-1
while i > 0:
sequence[0:0] = [words[i]]
i = i - len(words[i])
return sequence, best[-1]
|
Find the best segmentation of the string of characters, given the
UnigramTextModel P.
|
def trim(self, start_time, end_time, strict=False):
if self.file_metadata.duration is None:
raise JamsError(
'Duration must be set (jam.file_metadata.duration) before '
'trimming can be performed.')
if not (0 <= start_time <= end_time <= float(
self.file_metadata.duration)):
raise ParameterError(
'start_time and end_time must be within the original file '
'duration ({:f}) and end_time cannot be smaller than '
'start_time.'.format(float(self.file_metadata.duration)))
jam_trimmed = JAMS(annotations=None,
file_metadata=self.file_metadata,
sandbox=self.sandbox)
jam_trimmed.annotations = self.annotations.trim(
start_time, end_time, strict=strict)
if 'trim' not in jam_trimmed.sandbox.keys():
jam_trimmed.sandbox.update(
trim=[{'start_time': start_time, 'end_time': end_time}])
else:
jam_trimmed.sandbox.trim.append(
{'start_time': start_time, 'end_time': end_time})
return jam_trimmed
|
Trim all the annotations inside the jam and return as a new `JAMS`
object.
See `Annotation.trim` for details about how the annotations
are trimmed.
This operation is also documented in the jam-level sandbox
with a list keyed by ``JAMS.sandbox.trim`` containing a tuple for each
jam-level trim of the form ``(start_time, end_time)``.
This function also copies over all of the file metadata from the
original jam.
Note: trimming does not affect the duration of the jam, i.e. the value
of ``JAMS.file_metadata.duration`` will be the same for the original
and trimmed jams.
Parameters
----------
start_time : float
The desired start time for the trimmed annotations in seconds.
end_time
The desired end time for trimmed annotations in seconds. Must be
greater than ``start_time``.
strict : bool
When ``False`` (default) observations that lie at the boundaries of
the trimming range (see `Annotation.trim` for details), will have
their time and/or duration adjusted such that only the part of the
observation that lies within the trim range is kept. When ``True``
such observations are discarded and not included in the trimmed
annotation.
Returns
-------
jam_trimmed : JAMS
The trimmed jam with trimmed annotations, returned as a new JAMS
object.
|
def unique(list):
unique = []; [unique.append(x) for x in list if x not in unique]
return unique
|
Returns a copy of the list without duplicates.
|
def check_ip(self, ip):
self._last_result = None
if is_valid_ipv4(ip):
key = None
if self._use_cache:
key = self._make_cache_key(ip)
self._last_result = self._cache.get(key, version=self._cache_version)
if self._last_result is None:
error, age, threat, type = self._request_httpbl(ip)
if error == 127 or error == 0:
self._last_result = {
'error': error,
'age': age,
'threat': threat,
'type': type
}
if self._use_cache:
self._cache.set(key, self._last_result, timeout=self._api_timeout, version=self._cache_version)
if self._last_result is not None and settings.CACHED_HTTPBL_USE_LOGGING:
logger.info(
'httpBL check ip: {0}; '
'httpBL result: error: {1}, age: {2}, threat: {3}, type: {4}'.format(ip,
self._last_result['error'],
self._last_result['age'],
self._last_result['threat'],
self._last_result['type']
)
)
return self._last_result
|
Check IP trough the httpBL API
:param ip: ipv4 ip address
:return: httpBL results or None if any error is occurred
|
def unusedoptions(self, sections):
unused = set([])
for section in _list(sections):
if not self.has_section(section):
continue
options = self.options(section)
raw_values = [self.get(section, option, raw=True) for option in options]
for option in options:
formatter = "%(" + option + ")s"
for raw_value in raw_values:
if formatter in raw_value:
break
else:
unused.add(option)
return list(unused)
|
Lists options that have not been used to format other values in
their sections.
Good for finding out if the user has misspelled any of the options.
|
def get_zoom_levels(process_zoom_levels=None, init_zoom_levels=None):
process_zoom_levels = _validate_zooms(process_zoom_levels)
if init_zoom_levels is None:
return process_zoom_levels
else:
init_zoom_levels = _validate_zooms(init_zoom_levels)
if not set(init_zoom_levels).issubset(set(process_zoom_levels)):
raise MapcheteConfigError(
"init zooms must be a subset of process zoom")
return init_zoom_levels
|
Validate and return zoom levels.
|
def as_dict(self):
self_as_dict = {'chrom': self.chrom,
'start': self.start,
'ref_allele': self.ref_allele,
'alt_alleles': self.alt_alleles,
'alleles': [x.as_dict() for x in self.alleles]}
try:
self_as_dict['info'] = self.info
except AttributeError:
pass
return self_as_dict
|
Dict representation of parsed VCF data
|
def _MessageToJsonObject(message, including_default_value_fields):
message_descriptor = message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
return _WrapperMessageToJsonObject(message)
if full_name in _WKTJSONMETHODS:
return _WKTJSONMETHODS[full_name][0](
message, including_default_value_fields)
js = {}
return _RegularMessageToJsonObject(
message, js, including_default_value_fields)
|
Converts message to an object according to Proto3 JSON Specification.
|
def intraday(ticker, dt, session='', **kwargs) -> pd.DataFrame:
from xbbg.core import intervals
cur_data = bdib(ticker=ticker, dt=dt, typ=kwargs.get('typ', 'TRADE'))
if cur_data.empty: return pd.DataFrame()
fmt = '%H:%M:%S'
ss = intervals.SessNA
ref = kwargs.get('ref', None)
exch = pd.Series() if ref is None else const.exch_info(ticker=ref)
if session: ss = intervals.get_interval(
ticker=kwargs.get('ref', ticker), session=session
)
start_time = kwargs.get('start_time', None)
end_time = kwargs.get('end_time', None)
if ss != intervals.SessNA:
start_time = pd.Timestamp(ss.start_time).strftime(fmt)
end_time = pd.Timestamp(ss.end_time).strftime(fmt)
if start_time and end_time:
kw = dict(start_time=start_time, end_time=end_time)
if not exch.empty:
cur_tz = cur_data.index.tz
res = cur_data.tz_convert(exch.tz).between_time(**kw)
if kwargs.get('keep_tz', False):
res = res.tz_convert(cur_tz)
return pd.DataFrame(res)
return pd.DataFrame(cur_data.between_time(**kw))
return cur_data
|
Bloomberg intraday bar data within market session
Args:
ticker: ticker
dt: date
session: examples include
day_open_30, am_normal_30_30, day_close_30, allday_exact_0930_1000
**kwargs:
ref: reference ticker or exchange for timezone
keep_tz: if keep tz if reference ticker / exchange is given
start_time: start time
end_time: end time
typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]
Returns:
pd.DataFrame
|
def create_execution_state(self, topologyName, executionState):
if not executionState or not executionState.IsInitialized():
raise_(StateException("Execution State protobuf not init properly",
StateException.EX_TYPE_PROTOBUF_ERROR), sys.exc_info()[2])
path = self.get_execution_state_path(topologyName)
LOG.info("Adding topology: {0} to path: {1}".format(
topologyName, path))
executionStateString = executionState.SerializeToString()
try:
self.client.create(path, value=executionStateString, makepath=True)
return True
except NoNodeError:
raise_(StateException("NoNodeError while creating execution state",
StateException.EX_TYPE_NO_NODE_ERROR), sys.exc_info()[2])
except NodeExistsError:
raise_(StateException("NodeExistsError while creating execution state",
StateException.EX_TYPE_NODE_EXISTS_ERROR), sys.exc_info()[2])
except ZookeeperError:
raise_(StateException("Zookeeper while creating execution state",
StateException.EX_TYPE_ZOOKEEPER_ERROR), sys.exc_info()[2])
except Exception:
raise
|
create execution state
|
def _post_start(self):
flags = fcntl.fcntl(self._process.stdout, fcntl.F_GETFL)
fcntl.fcntl(self._process.stdout, fcntl.F_SETFL, flags | os.O_NONBLOCK)
|
Set stdout to non-blocking
VLC does not always return a newline when reading status so in order to
be lazy and still use the read API without caring about how much output
there is we switch stdout to nonblocking mode and just read a large
chunk of datin order to be lazy and still use the read API without
caring about how much output there is we switch stdout to nonblocking
mode and just read a large chunk of data.
|
def keypoint_random_crop(image, annos, mask=None, size=(368, 368)):
_target_height = size[0]
_target_width = size[1]
target_size = (_target_width, _target_height)
if len(np.shape(image)) == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
height, width, _ = np.shape(image)
for _ in range(50):
x = random.randrange(0, width - target_size[0]) if width > target_size[0] else 0
y = random.randrange(0, height - target_size[1]) if height > target_size[1] else 0
for joint in annos:
if x <= joint[0][0] < x + target_size[0] and y <= joint[0][1] < y + target_size[1]:
break
def pose_crop(image, annos, mask, x, y, w, h):
target_size = (w, h)
img = image
resized = img[y:y + target_size[1], x:x + target_size[0], :]
resized_mask = mask[y:y + target_size[1], x:x + target_size[0]]
adjust_joint_list = []
for joint in annos:
adjust_joint = []
for point in joint:
if point[0] < -10 or point[1] < -10:
adjust_joint.append((-1000, -1000))
continue
new_x, new_y = point[0] - x, point[1] - y
if new_x > w - 1 or new_y > h - 1:
adjust_joint.append((-1000, -1000))
continue
adjust_joint.append((new_x, new_y))
adjust_joint_list.append(adjust_joint)
return resized, adjust_joint_list, resized_mask
return pose_crop(image, annos, mask, x, y, target_size[0], target_size[1])
|
Randomly crop an image and corresponding keypoints without influence scales, given by ``keypoint_random_resize_shortestedge``.
Parameters
-----------
image : 3 channel image
The given image for augmentation.
annos : list of list of floats
The keypoints annotation of people.
mask : single channel image or None
The mask if available.
size : tuple of int
The size of returned image.
Returns
----------
preprocessed image, annotation, mask
|
def sigma2fwhm(sigma):
sigma = np.asarray(sigma)
return np.sqrt(8 * np.log(2)) * sigma
|
Convert a sigma in a Gaussian kernel to a FWHM value.
Parameters
----------
sigma: float or numpy.array
sigma value or values
Returns
-------
fwhm: float or numpy.array
fwhm values corresponding to `sigma` values
|
def iter_chunksize(num_samples, chunksize):
last_chunksize = int(np.mod(num_samples, chunksize))
chunksize = int(chunksize)
for _ in range(int(num_samples) // chunksize):
yield chunksize
if last_chunksize > 0:
yield last_chunksize
|
Iterator used to iterate in chunks over an array of size `num_samples`.
At each iteration returns `chunksize` except for the last iteration.
|
def make_hash(self, task):
t = [serialize_object(task['func_name'])[0],
serialize_object(task['fn_hash'])[0],
serialize_object(task['args'])[0],
serialize_object(task['kwargs'])[0],
serialize_object(task['env'])[0]]
x = b''.join(t)
hashedsum = hashlib.md5(x).hexdigest()
return hashedsum
|
Create a hash of the task inputs.
This uses a serialization library borrowed from ipyparallel.
If this fails here, then all ipp calls are also likely to fail due to failure
at serialization.
Args:
- task (dict) : Task dictionary from dfk.tasks
Returns:
- hash (str) : A unique hash string
|
def url_is_valid(self, url):
if url.startswith("file://"):
url = url.replace("file://","")
return os.path.exists(url)
|
Check if a URL exists
|
def create_comparison_state(image, position, radius=5.0, snr=20,
method='constrained-cubic', extrapad=2, zscale=1.0):
image = common.pad(image, extrapad, 0)
s = init.create_single_particle_state(imsize=np.array(image.shape), sigma=1.0/snr,
radius=radius, psfargs={'params': np.array([2.0, 1.0, 3.0]), 'error': 1e-6, 'threads': 2},
objargs={'method': method}, stateargs={'sigmapad': False, 'pad': 4, 'zscale': zscale})
s.obj.pos[0] = position + s.pad + extrapad
s.reset()
s.model_to_true_image()
timage = 1-np.pad(image, s.pad, mode='constant', constant_values=0)
timage = s.psf.execute(timage)
return s, timage[s.inner]
|
Take a platonic image and position and create a state which we can
use to sample the error for peri. Also return the blurred platonic
image so we can vary the noise on it later
|
def workflow_all_aggregate(graph: BELGraph,
key: Optional[str] = None,
tag: Optional[str] = None,
default_score: Optional[float] = None,
runs: Optional[int] = None,
aggregator: Optional[Callable[[Iterable[float]], float]] = None,
):
results = {}
bioprocess_nodes = list(get_nodes_by_function(graph, BIOPROCESS))
for bioprocess_node in tqdm(bioprocess_nodes):
subgraph = generate_mechanism(graph, bioprocess_node, key=key)
try:
results[bioprocess_node] = workflow_aggregate(
graph=subgraph,
node=bioprocess_node,
key=key,
tag=tag,
default_score=default_score,
runs=runs,
aggregator=aggregator
)
except Exception:
log.exception('could not run on %', bioprocess_node)
return results
|
Run the heat diffusion workflow to get average score for every possible candidate mechanism.
1. Get all biological processes
2. Get candidate mechanism induced two level back from each biological process
3. Heat diffusion workflow on each candidate mechanism for multiple runs
4. Report average scores for each candidate mechanism
:param graph: A BEL graph
:param key: The key in the node data dictionary representing the experimental data. Defaults to
:data:`pybel_tools.constants.WEIGHT`.
:param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score'
:param default_score: The initial score for all nodes. This number can go up or down.
:param runs: The number of times to run the heat diffusion workflow. Defaults to 100.
:param aggregator: A function that aggregates a list of scores. Defaults to :func:`numpy.average`.
Could also use: :func:`numpy.mean`, :func:`numpy.median`, :func:`numpy.min`, :func:`numpy.max`
:return: A dictionary of {node: upstream causal subgraph}
|
def _get_storage_service(credentials):
if credentials is None:
credentials = oauth2client.client.GoogleCredentials.get_application_default(
)
return discovery.build('storage', 'v1', credentials=credentials)
|
Get a storage client using the provided credentials or defaults.
|
def settle_to_markers(self, frame_no=0, max_distance=0.05, max_iters=300,
states=None):
if states is not None:
self.skeleton.set_body_states(states)
dist = None
for _ in range(max_iters):
for _ in self._step_to_marker_frame(frame_no):
pass
dist = np.nanmean(abs(self.markers.distances()))
logging.info('settling to frame %d: marker distance %.3f', frame_no, dist)
if dist < max_distance:
return self.skeleton.get_body_states()
for b in self.skeleton.bodies:
b.linear_velocity = 0, 0, 0
b.angular_velocity = 0, 0, 0
return states
|
Settle the skeleton to our marker data at a specific frame.
Parameters
----------
frame_no : int, optional
Settle the skeleton to marker data at this frame. Defaults to 0.
max_distance : float, optional
The settling process will stop when the mean marker distance falls
below this threshold. Defaults to 0.1m (10cm). Setting this too
small prevents the settling process from finishing (it will loop
indefinitely), and setting it too large prevents the skeleton from
settling to a stable state near the markers.
max_iters : int, optional
Attempt to settle markers for at most this many iterations. Defaults
to 1000.
states : list of body states, optional
If given, set the bodies in our skeleton to these kinematic states
before starting the settling process.
|
def get_2d_markers_linearized(
self, component_info=None, data=None, component_position=None, index=None
):
return self._get_2d_markers(
data, component_info, component_position, index=index
)
|
Get 2D linearized markers.
:param index: Specify which camera to get 2D from, will be returned as
first entry in the returned array.
|
def coordinates(self, x0, y0, distance, angle):
x = x0 + cos(radians(angle)) * distance
y = y0 + sin(radians(angle)) * distance
return Point(x, y)
|
Calculates the coordinates of a point from the origin.
|
def translate_value(document_field, form_value):
value = form_value
if isinstance(document_field, ReferenceField):
value = document_field.document_type.objects.get(id=form_value) if form_value else None
return value
|
Given a document_field and a form_value this will translate the value
to the correct result for mongo to use.
|
def __get_ml_configuration_status(self, job_id):
failure_message = "Get status on ml configuration failed"
response = self._get_success_json(self._get(
'v1/descriptors/builders/simple/default/' + job_id + '/status', None, failure_message=failure_message))[
'data']
return response
|
After invoking the create_ml_configuration async method, you can use this method to
check on the status of the builder job.
:param job_id: The identifier returned from create_ml_configuration
:return: Job status
|
def predict_peptides(self, peptides):
from mhcflurry.encodable_sequences import EncodableSequences
binding_predictions = []
encodable_sequences = EncodableSequences.create(peptides)
for allele in self.alleles:
predictions_df = self.predictor.predict_to_dataframe(
encodable_sequences, allele=allele)
for (_, row) in predictions_df.iterrows():
binding_prediction = BindingPrediction(
allele=allele,
peptide=row.peptide,
affinity=row.prediction,
percentile_rank=(
row.prediction_percentile
if 'prediction_percentile' in row else nan),
prediction_method_name="mhcflurry"
)
binding_predictions.append(binding_prediction)
return BindingPredictionCollection(binding_predictions)
|
Predict MHC affinity for peptides.
|
def create(self, data):
if 'recipients' not in data:
raise KeyError('The campaign must have recipients')
if 'list_id' not in data['recipients']:
raise KeyError('The campaign recipients must have a list_id')
if 'settings' not in data:
raise KeyError('The campaign must have settings')
if 'subject_line' not in data['settings']:
raise KeyError('The campaign settings must have a subject_line')
if 'from_name' not in data['settings']:
raise KeyError('The campaign settings must have a from_name')
if 'reply_to' not in data['settings']:
raise KeyError('The campaign settings must have a reply_to')
check_email(data['settings']['reply_to'])
if 'type' not in data:
raise KeyError('The campaign must have a type')
if not data['type'] in ['regular', 'plaintext', 'rss', 'variate', 'abspilt']:
raise ValueError('The campaign type must be one of "regular", "plaintext", "rss", or "variate"')
if data['type'] == 'variate':
if 'variate_settings' not in data:
raise KeyError('The variate campaign must have variate_settings')
if 'winner_criteria' not in data['variate_settings']:
raise KeyError('The campaign variate_settings must have a winner_criteria')
if data['variate_settings']['winner_criteria'] not in ['opens', 'clicks', 'total_revenue', 'manual']:
raise ValueError('The campaign variate_settings '
'winner_criteria must be one of "opens", "clicks", "total_revenue", or "manual"')
if data['type'] == 'rss':
if 'rss_opts' not in data:
raise KeyError('The rss campaign must have rss_opts')
if 'feed_url' not in data['rss_opts']:
raise KeyError('The campaign rss_opts must have a feed_url')
if not data['rss_opts']['frequency'] in ['daily', 'weekly', 'monthly']:
raise ValueError('The rss_opts frequency must be one of "daily", "weekly", or "monthly"')
response = self._mc_client._post(url=self._build_path(), data=data)
if response is not None:
self.campaign_id = response['id']
else:
self.campaign_id = None
return response
|
Create a new MailChimp campaign.
The ValueError raised by an invalid type in data does not mention
'absplit' as a potential value because the documentation indicates
that the absplit type has been deprecated.
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"recipients": object*
{
"list_id": string*
},
"settings": object*
{
"subject_line": string*,
"from_name": string*,
"reply_to": string*
},
"variate_settings": object* (Required if type is "variate")
{
"winner_criteria": string* (Must be one of "opens", "clicks", "total_revenue", or "manual")
},
"rss_opts": object* (Required if type is "rss")
{
"feed_url": string*,
"frequency": string* (Must be one of "daily", "weekly", or "monthly")
},
"type": string* (Must be one of "regular", "plaintext", "rss", "variate", or "absplit")
}
|
def create_disjunction_node(self, disjunction):
node = BNode()
type_triple = (node, RDF.type, self.spdx_namespace.DisjunctiveLicenseSet)
self.graph.add(type_triple)
licenses = self.licenses_from_tree(disjunction)
for lic in licenses:
member_triple = (node, self.spdx_namespace.member, lic)
self.graph.add(member_triple)
return node
|
Return a node representing a disjunction of licenses.
|
def sort_by_distance(self, reversed=False):
if len(self) == 0: return ColorList()
root = self[0]
for clr in self[1:]:
if clr.brightness < root.brightness:
root = clr
stack = [clr for clr in self]
stack.remove(root)
sorted = [root]
while len(stack) > 1:
closest, distance = stack[0], stack[0].distance(sorted[-1])
for clr in stack[1:]:
d = clr.distance(sorted[-1])
if d < distance:
closest, distance = clr, d
stack.remove(closest)
sorted.append(closest)
sorted.append(stack[0])
if reversed: _list.reverse(sorted)
return ColorList(sorted)
|
Returns a list with the smallest distance between two neighboring colors.
The algorithm has a factorial complexity so it may run slow.
|
def guess_name(clr):
clr = Color(clr)
if clr.is_transparent: return "transparent"
if clr.is_black: return "black"
if clr.is_white: return "white"
if clr.is_black: return "black"
for name in named_colors:
try:
r, g, b = named_colors[name]
except:
continue
if r == clr.r and g == clr.g and b == clr.b:
return name
for shade in shades:
if clr in shade:
return shade.name + " " + clr.nearest_hue()
break
return clr.nearest_hue()
|
Guesses the shade and hue name of a color.
If the given color is named in the named_colors list, return that name.
Otherwise guess its nearest hue and shade range.
|
def write_iodir(self, iodir=None):
if iodir is not None:
self.iodir = iodir
self._device.writeList(self.IODIR, self.iodir)
|
Write the specified byte value to the IODIR registor. If no value
specified the current buffered value will be written.
|
def _get_implicit_credentials(cls):
environ_checkers = [
cls._implicit_credentials_from_files,
cls._implicit_credentials_from_gae,
cls._implicit_credentials_from_gce,
]
for checker in environ_checkers:
credentials = checker()
if credentials is not None:
return credentials
raise ApplicationDefaultCredentialsError(ADC_HELP_MSG)
|
Gets credentials implicitly from the environment.
Checks environment in order of precedence:
- Environment variable GOOGLE_APPLICATION_CREDENTIALS pointing to
a file with stored credentials information.
- Stored "well known" file associated with `gcloud` command line tool.
- Google App Engine (production and testing)
- Google Compute Engine production environment.
Raises:
ApplicationDefaultCredentialsError: raised when the credentials
fail to be retrieved.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.