repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
wiheto/teneto
|
teneto/networkmeasures/fluctuability.py
|
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/networkmeasures/fluctuability.py#L8-L121
|
def fluctuability(netin, calc='global'):
r"""
Fluctuability of temporal networks. This is the variation of the network's edges over time. [fluct-1]_
This is the unique number of edges through time divided by the overall number of edges.
Parameters
----------
netin : array or dict
Temporal network input (graphlet or contact) (nettype: 'bd', 'bu', 'wu', 'wd')
calc : str
Version of fluctuabiility to calcualte. 'global'
Returns
-------
fluct : array
Fluctuability
Notes
------
Fluctuability quantifies the variability of edges.
Given x number of edges, F is higher when those are repeated edges among a smaller set of edges
and lower when there are distributed across more edges.
.. math:: F = {{\sum_{i,j} H_{i,j}} \over {\sum_{i,j,t} G_{i,j,t}}}
where :math:`H_{i,j}` is a binary matrix where it is 1 if there is at least one t such that G_{i,j,t} = 1 (i.e. at least one temporal edge exists).
F is not normalized which makes comparisions of F across very different networks difficult (could be added).
Examples
--------
This example compares the fluctability of two different networks with the same number of edges.
Below two temporal networks, both with 3 nodes and 3 time-points.
Both get 3 connections.
>>> import teneto
>>> import numpy as np
>>> # Manually specify node (i,j) and temporal (t) indicies.
>>> ind_highF_i = [0,0,1]
>>> ind_highF_j = [1,2,2]
>>> ind_highF_t = [1,2,2]
>>> ind_lowF_i = [0,0,0]
>>> ind_lowF_j = [1,1,1]
>>> ind_lowF_t = [0,1,2]
>>> # Define 2 networks below and set above edges to 1
>>> G_highF = np.zeros([3,3,3])
>>> G_lowF = np.zeros([3,3,3])
>>> G_highF[ind_highF_i,ind_highF_j,ind_highF_t] = 1
>>> G_lowF[ind_lowF_i,ind_lowF_j,ind_lowF_t] = 1
The two different networks look like this:
.. plot::
import teneto
import numpy as np
import matplotlib.pyplot as plt
# Manually specify node (i,j) and temporal (t) indicies.
ind_highF_i = [0,0,1]
ind_highF_j = [1,2,2]
ind_highF_t = [1,2,2]
ind_lowF_i = [0,0,0]
ind_lowF_j = [1,1,1]
ind_lowF_t = [0,1,2]
# Define 2 networks below and set above edges to 1
G_highF = np.zeros([3,3,3])
G_lowF = np.zeros([3,3,3])
G_highF[ind_highF_i,ind_highF_j,ind_highF_t] = 1
G_lowF[ind_lowF_i,ind_lowF_j,ind_lowF_t] = 1
fig, ax = plt.subplots(1,2)
teneto.plot.slice_plot(G_highF, ax[0], cmap='Pastel2', nodesize=20, nLabs=['0', '1', '2'])
teneto.plot.slice_plot(G_lowF, ax[1], cmap='Pastel2', nodesize=20, nLabs=['0', '1', '2'])
ax[0].set_title('G_highF')
ax[1].set_title('G_lowF')
ax[0].set_ylim([-0.25,2.25])
ax[1].set_ylim([-0.25,2.25])
plt.tight_layout()
fig.show()
Now calculate the fluctability of the two networks above.
>>> F_high = teneto.networkmeasures.fluctuability(G_highF)
>>> F_high
1.0
>>> F_low = teneto.networkmeasures.fluctuability(G_lowF)
>>> F_low
0.3333333333333333
Here we see that the network with more unique connections has the higher fluctuability.
Reference
---------
.. [fluct-1] Thompson et al (2017) "From static to temporal network theory applications to functional brain connectivity." Network Neuroscience, 2: 1. p.69-99 [`Link <https://www.mitpressjournals.org/doi/abs/10.1162/NETN_a_00011>`_]
"""
# Get input type (C or G)
netin, _ = process_input(netin, ['C', 'G', 'TN'])
netin[netin != 0] = 1
unique_edges = np.sum(netin, axis=2)
unique_edges[unique_edges > 0] = 1
unique_edges[unique_edges == 0] = 0
fluct = (np.sum(unique_edges)) / np.sum(netin)
return fluct
|
[
"def",
"fluctuability",
"(",
"netin",
",",
"calc",
"=",
"'global'",
")",
":",
"# Get input type (C or G)",
"netin",
",",
"_",
"=",
"process_input",
"(",
"netin",
",",
"[",
"'C'",
",",
"'G'",
",",
"'TN'",
"]",
")",
"netin",
"[",
"netin",
"!=",
"0",
"]",
"=",
"1",
"unique_edges",
"=",
"np",
".",
"sum",
"(",
"netin",
",",
"axis",
"=",
"2",
")",
"unique_edges",
"[",
"unique_edges",
">",
"0",
"]",
"=",
"1",
"unique_edges",
"[",
"unique_edges",
"==",
"0",
"]",
"=",
"0",
"fluct",
"=",
"(",
"np",
".",
"sum",
"(",
"unique_edges",
")",
")",
"/",
"np",
".",
"sum",
"(",
"netin",
")",
"return",
"fluct"
] |
r"""
Fluctuability of temporal networks. This is the variation of the network's edges over time. [fluct-1]_
This is the unique number of edges through time divided by the overall number of edges.
Parameters
----------
netin : array or dict
Temporal network input (graphlet or contact) (nettype: 'bd', 'bu', 'wu', 'wd')
calc : str
Version of fluctuabiility to calcualte. 'global'
Returns
-------
fluct : array
Fluctuability
Notes
------
Fluctuability quantifies the variability of edges.
Given x number of edges, F is higher when those are repeated edges among a smaller set of edges
and lower when there are distributed across more edges.
.. math:: F = {{\sum_{i,j} H_{i,j}} \over {\sum_{i,j,t} G_{i,j,t}}}
where :math:`H_{i,j}` is a binary matrix where it is 1 if there is at least one t such that G_{i,j,t} = 1 (i.e. at least one temporal edge exists).
F is not normalized which makes comparisions of F across very different networks difficult (could be added).
Examples
--------
This example compares the fluctability of two different networks with the same number of edges.
Below two temporal networks, both with 3 nodes and 3 time-points.
Both get 3 connections.
>>> import teneto
>>> import numpy as np
>>> # Manually specify node (i,j) and temporal (t) indicies.
>>> ind_highF_i = [0,0,1]
>>> ind_highF_j = [1,2,2]
>>> ind_highF_t = [1,2,2]
>>> ind_lowF_i = [0,0,0]
>>> ind_lowF_j = [1,1,1]
>>> ind_lowF_t = [0,1,2]
>>> # Define 2 networks below and set above edges to 1
>>> G_highF = np.zeros([3,3,3])
>>> G_lowF = np.zeros([3,3,3])
>>> G_highF[ind_highF_i,ind_highF_j,ind_highF_t] = 1
>>> G_lowF[ind_lowF_i,ind_lowF_j,ind_lowF_t] = 1
The two different networks look like this:
.. plot::
import teneto
import numpy as np
import matplotlib.pyplot as plt
# Manually specify node (i,j) and temporal (t) indicies.
ind_highF_i = [0,0,1]
ind_highF_j = [1,2,2]
ind_highF_t = [1,2,2]
ind_lowF_i = [0,0,0]
ind_lowF_j = [1,1,1]
ind_lowF_t = [0,1,2]
# Define 2 networks below and set above edges to 1
G_highF = np.zeros([3,3,3])
G_lowF = np.zeros([3,3,3])
G_highF[ind_highF_i,ind_highF_j,ind_highF_t] = 1
G_lowF[ind_lowF_i,ind_lowF_j,ind_lowF_t] = 1
fig, ax = plt.subplots(1,2)
teneto.plot.slice_plot(G_highF, ax[0], cmap='Pastel2', nodesize=20, nLabs=['0', '1', '2'])
teneto.plot.slice_plot(G_lowF, ax[1], cmap='Pastel2', nodesize=20, nLabs=['0', '1', '2'])
ax[0].set_title('G_highF')
ax[1].set_title('G_lowF')
ax[0].set_ylim([-0.25,2.25])
ax[1].set_ylim([-0.25,2.25])
plt.tight_layout()
fig.show()
Now calculate the fluctability of the two networks above.
>>> F_high = teneto.networkmeasures.fluctuability(G_highF)
>>> F_high
1.0
>>> F_low = teneto.networkmeasures.fluctuability(G_lowF)
>>> F_low
0.3333333333333333
Here we see that the network with more unique connections has the higher fluctuability.
Reference
---------
.. [fluct-1] Thompson et al (2017) "From static to temporal network theory applications to functional brain connectivity." Network Neuroscience, 2: 1. p.69-99 [`Link <https://www.mitpressjournals.org/doi/abs/10.1162/NETN_a_00011>`_]
|
[
"r",
"Fluctuability",
"of",
"temporal",
"networks",
".",
"This",
"is",
"the",
"variation",
"of",
"the",
"network",
"s",
"edges",
"over",
"time",
".",
"[",
"fluct",
"-",
"1",
"]",
"_",
"This",
"is",
"the",
"unique",
"number",
"of",
"edges",
"through",
"time",
"divided",
"by",
"the",
"overall",
"number",
"of",
"edges",
"."
] |
python
|
train
|
photo/openphoto-python
|
trovebox/api/api_album.py
|
https://github.com/photo/openphoto-python/blob/209a1da27c8d8c88dbcf4ea6c6f57031ea1bc44b/trovebox/api/api_album.py#L24-L42
|
def cover_update(self, album, photo, **kwds):
"""
Endpoint: /album/<album_id>/cover/<photo_id>/update.json
Update the cover photo of an album.
Returns the updated album object.
"""
result = self._client.post("/album/%s/cover/%s/update.json" %
(self._extract_id(album),
self._extract_id(photo)),
**kwds)["result"]
# API currently doesn't return the updated album
# (frontend issue #1369)
if isinstance(result, bool): # pragma: no cover
result = self._client.get("/album/%s/view.json" %
self._extract_id(album))["result"]
return Album(self._client, result)
|
[
"def",
"cover_update",
"(",
"self",
",",
"album",
",",
"photo",
",",
"*",
"*",
"kwds",
")",
":",
"result",
"=",
"self",
".",
"_client",
".",
"post",
"(",
"\"/album/%s/cover/%s/update.json\"",
"%",
"(",
"self",
".",
"_extract_id",
"(",
"album",
")",
",",
"self",
".",
"_extract_id",
"(",
"photo",
")",
")",
",",
"*",
"*",
"kwds",
")",
"[",
"\"result\"",
"]",
"# API currently doesn't return the updated album",
"# (frontend issue #1369)",
"if",
"isinstance",
"(",
"result",
",",
"bool",
")",
":",
"# pragma: no cover",
"result",
"=",
"self",
".",
"_client",
".",
"get",
"(",
"\"/album/%s/view.json\"",
"%",
"self",
".",
"_extract_id",
"(",
"album",
")",
")",
"[",
"\"result\"",
"]",
"return",
"Album",
"(",
"self",
".",
"_client",
",",
"result",
")"
] |
Endpoint: /album/<album_id>/cover/<photo_id>/update.json
Update the cover photo of an album.
Returns the updated album object.
|
[
"Endpoint",
":",
"/",
"album",
"/",
"<album_id",
">",
"/",
"cover",
"/",
"<photo_id",
">",
"/",
"update",
".",
"json"
] |
python
|
train
|
KnowledgeLinks/rdfframework
|
rdfframework/rml/processor.py
|
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/rml/processor.py#L397-L401
|
def add_to_triplestore(self, output):
"""Method attempts to add output to Blazegraph RDF Triplestore"""
if len(output) > 0:
result = self.ext_conn.load_data(data=output.serialize(),
datatype='rdf')
|
[
"def",
"add_to_triplestore",
"(",
"self",
",",
"output",
")",
":",
"if",
"len",
"(",
"output",
")",
">",
"0",
":",
"result",
"=",
"self",
".",
"ext_conn",
".",
"load_data",
"(",
"data",
"=",
"output",
".",
"serialize",
"(",
")",
",",
"datatype",
"=",
"'rdf'",
")"
] |
Method attempts to add output to Blazegraph RDF Triplestore
|
[
"Method",
"attempts",
"to",
"add",
"output",
"to",
"Blazegraph",
"RDF",
"Triplestore"
] |
python
|
train
|
OCR-D/core
|
ocrd_models/ocrd_models/ocrd_file.py
|
https://github.com/OCR-D/core/blob/57e68c578526cb955fd2e368207f5386c459d91d/ocrd_models/ocrd_models/ocrd_file.py#L96-L104
|
def pageId(self, pageId):
"""
Set the ID of the physical page this file manifests.
"""
if pageId is None:
return
if self.mets is None:
raise Exception("OcrdFile %s has no member 'mets' pointing to parent OcrdMets" % self)
self.mets.set_physical_page_for_file(pageId, self)
|
[
"def",
"pageId",
"(",
"self",
",",
"pageId",
")",
":",
"if",
"pageId",
"is",
"None",
":",
"return",
"if",
"self",
".",
"mets",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"OcrdFile %s has no member 'mets' pointing to parent OcrdMets\"",
"%",
"self",
")",
"self",
".",
"mets",
".",
"set_physical_page_for_file",
"(",
"pageId",
",",
"self",
")"
] |
Set the ID of the physical page this file manifests.
|
[
"Set",
"the",
"ID",
"of",
"the",
"physical",
"page",
"this",
"file",
"manifests",
"."
] |
python
|
train
|
facundobatista/yaswfp
|
yaswfp/swfparser.py
|
https://github.com/facundobatista/yaswfp/blob/2a2cc6ca4c0b4d52bd2e658fb5f80fdc0db4924c/yaswfp/swfparser.py#L1383-L1391
|
def _get_struct_blurfilter(self):
"""Get the values for the BLURFILTER record."""
obj = _make_object("BlurFilter")
obj.BlurX = unpack_fixed16(self._src)
obj.BlurY = unpack_fixed16(self._src)
bc = BitConsumer(self._src)
obj.Passes = bc.u_get(5)
obj.Reserved = bc.u_get(3)
return obj
|
[
"def",
"_get_struct_blurfilter",
"(",
"self",
")",
":",
"obj",
"=",
"_make_object",
"(",
"\"BlurFilter\"",
")",
"obj",
".",
"BlurX",
"=",
"unpack_fixed16",
"(",
"self",
".",
"_src",
")",
"obj",
".",
"BlurY",
"=",
"unpack_fixed16",
"(",
"self",
".",
"_src",
")",
"bc",
"=",
"BitConsumer",
"(",
"self",
".",
"_src",
")",
"obj",
".",
"Passes",
"=",
"bc",
".",
"u_get",
"(",
"5",
")",
"obj",
".",
"Reserved",
"=",
"bc",
".",
"u_get",
"(",
"3",
")",
"return",
"obj"
] |
Get the values for the BLURFILTER record.
|
[
"Get",
"the",
"values",
"for",
"the",
"BLURFILTER",
"record",
"."
] |
python
|
train
|
ryan-roemer/django-cloud-browser
|
cloud_browser/cloud/google.py
|
https://github.com/ryan-roemer/django-cloud-browser/blob/b06cdd24885a6309e843ed924dbf1705b67e7f48/cloud_browser/cloud/google.py#L56-L64
|
def is_prefix(cls, result):
"""Return ``True`` if result is a prefix object.
.. note::
Boto uses the S3 Prefix object for GS prefixes.
"""
from boto.s3.prefix import Prefix
return isinstance(result, Prefix) or cls._is_gs_folder(result)
|
[
"def",
"is_prefix",
"(",
"cls",
",",
"result",
")",
":",
"from",
"boto",
".",
"s3",
".",
"prefix",
"import",
"Prefix",
"return",
"isinstance",
"(",
"result",
",",
"Prefix",
")",
"or",
"cls",
".",
"_is_gs_folder",
"(",
"result",
")"
] |
Return ``True`` if result is a prefix object.
.. note::
Boto uses the S3 Prefix object for GS prefixes.
|
[
"Return",
"True",
"if",
"result",
"is",
"a",
"prefix",
"object",
"."
] |
python
|
train
|
googlemaps/google-maps-services-python
|
googlemaps/client.py
|
https://github.com/googlemaps/google-maps-services-python/blob/7ed40b4d8df63479794c46ce29d03ed6083071d7/googlemaps/client.py#L416-L430
|
def urlencode_params(params):
"""URL encodes the parameters.
:param params: The parameters
:type params: list of key/value tuples.
:rtype: string
"""
# urlencode does not handle unicode strings in Python 2.
# Firstly, normalize the values so they get encoded correctly.
params = [(key, normalize_for_urlencode(val)) for key, val in params]
# Secondly, unquote unreserved chars which are incorrectly quoted
# by urllib.urlencode, causing invalid auth signatures. See GH #72
# for more info.
return requests.utils.unquote_unreserved(urlencode(params))
|
[
"def",
"urlencode_params",
"(",
"params",
")",
":",
"# urlencode does not handle unicode strings in Python 2.",
"# Firstly, normalize the values so they get encoded correctly.",
"params",
"=",
"[",
"(",
"key",
",",
"normalize_for_urlencode",
"(",
"val",
")",
")",
"for",
"key",
",",
"val",
"in",
"params",
"]",
"# Secondly, unquote unreserved chars which are incorrectly quoted",
"# by urllib.urlencode, causing invalid auth signatures. See GH #72",
"# for more info.",
"return",
"requests",
".",
"utils",
".",
"unquote_unreserved",
"(",
"urlencode",
"(",
"params",
")",
")"
] |
URL encodes the parameters.
:param params: The parameters
:type params: list of key/value tuples.
:rtype: string
|
[
"URL",
"encodes",
"the",
"parameters",
"."
] |
python
|
train
|
paylogic/pip-accel
|
pip_accel/__init__.py
|
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/__init__.py#L323-L363
|
def decorate_arguments(self, arguments):
"""
Change pathnames of local files into ``file://`` URLs with ``#md5=...`` fragments.
:param arguments: The command line arguments to ``pip install ...`` (a
list of strings).
:returns: A copy of the command line arguments with pathnames of local
files rewritten to ``file://`` URLs.
When pip-accel calls pip to download missing distribution archives and
the user specified the pathname of a local distribution archive on the
command line, pip will (by default) *not* copy the archive into the
download directory if an archive for the same package name and
version is already present.
This can lead to the confusing situation where the user specifies a
local distribution archive to install, a different (older) archive for
the same package and version is present in the download directory and
`pip-accel` installs the older archive instead of the newer archive.
To avoid this confusing behavior, the :func:`decorate_arguments()`
method rewrites the command line arguments given to ``pip install`` so
that pathnames of local archives are changed into ``file://`` URLs that
include a fragment with the hash of the file's contents. Here's an
example:
- Local pathname: ``/tmp/pep8-1.6.3a0.tar.gz``
- File URL: ``file:///tmp/pep8-1.6.3a0.tar.gz#md5=19cbf0b633498ead63fb3c66e5f1caf6``
When pip fills the download directory and encounters a previously
cached distribution archive it will check the hash, realize the
contents have changed and replace the archive in the download
directory.
"""
arguments = list(arguments)
for i, value in enumerate(arguments):
is_constraint_file = (i >= 1 and match_option(arguments[i - 1], '-c', '--constraint'))
is_requirement_file = (i >= 1 and match_option(arguments[i - 1], '-r', '--requirement'))
if not is_constraint_file and not is_requirement_file and os.path.isfile(value):
arguments[i] = '%s#md5=%s' % (create_file_url(value), hash_files('md5', value))
return arguments
|
[
"def",
"decorate_arguments",
"(",
"self",
",",
"arguments",
")",
":",
"arguments",
"=",
"list",
"(",
"arguments",
")",
"for",
"i",
",",
"value",
"in",
"enumerate",
"(",
"arguments",
")",
":",
"is_constraint_file",
"=",
"(",
"i",
">=",
"1",
"and",
"match_option",
"(",
"arguments",
"[",
"i",
"-",
"1",
"]",
",",
"'-c'",
",",
"'--constraint'",
")",
")",
"is_requirement_file",
"=",
"(",
"i",
">=",
"1",
"and",
"match_option",
"(",
"arguments",
"[",
"i",
"-",
"1",
"]",
",",
"'-r'",
",",
"'--requirement'",
")",
")",
"if",
"not",
"is_constraint_file",
"and",
"not",
"is_requirement_file",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"value",
")",
":",
"arguments",
"[",
"i",
"]",
"=",
"'%s#md5=%s'",
"%",
"(",
"create_file_url",
"(",
"value",
")",
",",
"hash_files",
"(",
"'md5'",
",",
"value",
")",
")",
"return",
"arguments"
] |
Change pathnames of local files into ``file://`` URLs with ``#md5=...`` fragments.
:param arguments: The command line arguments to ``pip install ...`` (a
list of strings).
:returns: A copy of the command line arguments with pathnames of local
files rewritten to ``file://`` URLs.
When pip-accel calls pip to download missing distribution archives and
the user specified the pathname of a local distribution archive on the
command line, pip will (by default) *not* copy the archive into the
download directory if an archive for the same package name and
version is already present.
This can lead to the confusing situation where the user specifies a
local distribution archive to install, a different (older) archive for
the same package and version is present in the download directory and
`pip-accel` installs the older archive instead of the newer archive.
To avoid this confusing behavior, the :func:`decorate_arguments()`
method rewrites the command line arguments given to ``pip install`` so
that pathnames of local archives are changed into ``file://`` URLs that
include a fragment with the hash of the file's contents. Here's an
example:
- Local pathname: ``/tmp/pep8-1.6.3a0.tar.gz``
- File URL: ``file:///tmp/pep8-1.6.3a0.tar.gz#md5=19cbf0b633498ead63fb3c66e5f1caf6``
When pip fills the download directory and encounters a previously
cached distribution archive it will check the hash, realize the
contents have changed and replace the archive in the download
directory.
|
[
"Change",
"pathnames",
"of",
"local",
"files",
"into",
"file",
":",
"//",
"URLs",
"with",
"#md5",
"=",
"...",
"fragments",
"."
] |
python
|
train
|
tadashi-aikawa/owlmixin
|
owlmixin/owlcollections.py
|
https://github.com/tadashi-aikawa/owlmixin/blob/7c4a042c3008abddc56a8e8e55ae930d276071f5/owlmixin/owlcollections.py#L477-L488
|
def reject(self, func):
"""
:param func:
:type func: (K, T) -> bool
:rtype: TList[T]
Usage:
>>> TDict(k1=1, k2=2, k3=3).reject(lambda k, v: v < 3)
[3]
"""
return TList([v for k, v in self.items() if not func(k, v)])
|
[
"def",
"reject",
"(",
"self",
",",
"func",
")",
":",
"return",
"TList",
"(",
"[",
"v",
"for",
"k",
",",
"v",
"in",
"self",
".",
"items",
"(",
")",
"if",
"not",
"func",
"(",
"k",
",",
"v",
")",
"]",
")"
] |
:param func:
:type func: (K, T) -> bool
:rtype: TList[T]
Usage:
>>> TDict(k1=1, k2=2, k3=3).reject(lambda k, v: v < 3)
[3]
|
[
":",
"param",
"func",
":",
":",
"type",
"func",
":",
"(",
"K",
"T",
")",
"-",
">",
"bool",
":",
"rtype",
":",
"TList",
"[",
"T",
"]"
] |
python
|
train
|
SmBe19/praw-OAuth2Util
|
OAuth2Util/OAuth2Util.py
|
https://github.com/SmBe19/praw-OAuth2Util/blob/ca0a2d4d7eefcc681aac92c9cd4b83cd9ea6c5fe/OAuth2Util/OAuth2Util.py#L208-L218
|
def _migrate_config(self, oldname=DEFAULT_CONFIG, newname=DEFAULT_CONFIG):
"""
Migrates the old config file format to the new one
"""
self._log("Your OAuth2Util config file is in an old format and needs "
"to be changed. I tried as best as I could to migrate it.", logging.WARNING)
with open(oldname, "r") as old:
with open(newname, "w") as new:
new.write("[app]\n")
new.write(old.read())
|
[
"def",
"_migrate_config",
"(",
"self",
",",
"oldname",
"=",
"DEFAULT_CONFIG",
",",
"newname",
"=",
"DEFAULT_CONFIG",
")",
":",
"self",
".",
"_log",
"(",
"\"Your OAuth2Util config file is in an old format and needs \"",
"\"to be changed. I tried as best as I could to migrate it.\"",
",",
"logging",
".",
"WARNING",
")",
"with",
"open",
"(",
"oldname",
",",
"\"r\"",
")",
"as",
"old",
":",
"with",
"open",
"(",
"newname",
",",
"\"w\"",
")",
"as",
"new",
":",
"new",
".",
"write",
"(",
"\"[app]\\n\"",
")",
"new",
".",
"write",
"(",
"old",
".",
"read",
"(",
")",
")"
] |
Migrates the old config file format to the new one
|
[
"Migrates",
"the",
"old",
"config",
"file",
"format",
"to",
"the",
"new",
"one"
] |
python
|
test
|
Clinical-Genomics/scout
|
scout/utils/acmg.py
|
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/utils/acmg.py#L2-L55
|
def is_pathogenic(pvs, ps_terms, pm_terms, pp_terms):
"""Check if the criterias for Pathogenic is fullfilled
The following are descriptions of Pathogenic clasification from ACMG paper:
Pathogenic
(i) 1 Very strong (PVS1) AND
(a) ≥1 Strong (PS1–PS4) OR
(b) ≥2 Moderate (PM1–PM6) OR
(c) 1 Moderate (PM1–PM6) and 1 supporting (PP1–PP5) OR
(d) ≥2 Supporting (PP1–PP5)
(ii) ≥2 Strong (PS1–PS4) OR
(iii) 1 Strong (PS1–PS4) AND
(a)≥3 Moderate (PM1–PM6) OR
(b)2 Moderate (PM1–PM6) AND ≥2 Supporting (PP1–PP5) OR
(c)1 Moderate (PM1–PM6) AND ≥4 supporting (PP1–PP5)
Args:
pvs(bool): Pathogenic Very Strong
ps_terms(list(str)): Pathogenic Strong terms
pm_terms(list(str)): Pathogenic Moderate terms
pp_terms(list(str)): Pathogenic Supporting terms
Returns:
bool: if classification indicates Pathogenic level
"""
if pvs:
# Pathogenic (i)(a):
if ps_terms:
return True
if pm_terms:
# Pathogenic (i)(c):
if pp_terms:
return True
# Pathogenic (i)(b):
if len(pm_terms) >= 2:
return True
# Pathogenic (i)(d):
if len(pp_terms) >= 2:
return True
if ps_terms:
# Pathogenic (ii):
if len(ps_terms) >= 2:
return True
# Pathogenic (iii)(a):
if pm_terms:
if len(pm_terms) >= 3:
return True
elif len(pm_terms) >= 2:
if len(pp_terms) >= 2:
return True
elif len(pp_terms) >= 4:
return True
return False
|
[
"def",
"is_pathogenic",
"(",
"pvs",
",",
"ps_terms",
",",
"pm_terms",
",",
"pp_terms",
")",
":",
"if",
"pvs",
":",
"# Pathogenic (i)(a):",
"if",
"ps_terms",
":",
"return",
"True",
"if",
"pm_terms",
":",
"# Pathogenic (i)(c):",
"if",
"pp_terms",
":",
"return",
"True",
"# Pathogenic (i)(b):",
"if",
"len",
"(",
"pm_terms",
")",
">=",
"2",
":",
"return",
"True",
"# Pathogenic (i)(d):",
"if",
"len",
"(",
"pp_terms",
")",
">=",
"2",
":",
"return",
"True",
"if",
"ps_terms",
":",
"# Pathogenic (ii):",
"if",
"len",
"(",
"ps_terms",
")",
">=",
"2",
":",
"return",
"True",
"# Pathogenic (iii)(a):",
"if",
"pm_terms",
":",
"if",
"len",
"(",
"pm_terms",
")",
">=",
"3",
":",
"return",
"True",
"elif",
"len",
"(",
"pm_terms",
")",
">=",
"2",
":",
"if",
"len",
"(",
"pp_terms",
")",
">=",
"2",
":",
"return",
"True",
"elif",
"len",
"(",
"pp_terms",
")",
">=",
"4",
":",
"return",
"True",
"return",
"False"
] |
Check if the criterias for Pathogenic is fullfilled
The following are descriptions of Pathogenic clasification from ACMG paper:
Pathogenic
(i) 1 Very strong (PVS1) AND
(a) ≥1 Strong (PS1–PS4) OR
(b) ≥2 Moderate (PM1–PM6) OR
(c) 1 Moderate (PM1–PM6) and 1 supporting (PP1–PP5) OR
(d) ≥2 Supporting (PP1–PP5)
(ii) ≥2 Strong (PS1–PS4) OR
(iii) 1 Strong (PS1–PS4) AND
(a)≥3 Moderate (PM1–PM6) OR
(b)2 Moderate (PM1–PM6) AND ≥2 Supporting (PP1–PP5) OR
(c)1 Moderate (PM1–PM6) AND ≥4 supporting (PP1–PP5)
Args:
pvs(bool): Pathogenic Very Strong
ps_terms(list(str)): Pathogenic Strong terms
pm_terms(list(str)): Pathogenic Moderate terms
pp_terms(list(str)): Pathogenic Supporting terms
Returns:
bool: if classification indicates Pathogenic level
|
[
"Check",
"if",
"the",
"criterias",
"for",
"Pathogenic",
"is",
"fullfilled"
] |
python
|
test
|
jobovy/galpy
|
galpy/potential/FerrersPotential.py
|
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/FerrersPotential.py#L467-L474
|
def _FracInt(x,y,z,a,b,c,tau,n):
"""Returns
1 x^2 y^2 z^2
-------------------------- (1 - ------- - ------- - -------)^n
sqrt(tau+a)(tau+b)(tau+c)) tau+a tau+b tau+c
"""
denom = np.sqrt((a + tau)*(b + tau)*(c + tau))
return (1. - x**2/(a + tau) - y**2/(b + tau) - z**2/(c + tau))**n / denom
|
[
"def",
"_FracInt",
"(",
"x",
",",
"y",
",",
"z",
",",
"a",
",",
"b",
",",
"c",
",",
"tau",
",",
"n",
")",
":",
"denom",
"=",
"np",
".",
"sqrt",
"(",
"(",
"a",
"+",
"tau",
")",
"*",
"(",
"b",
"+",
"tau",
")",
"*",
"(",
"c",
"+",
"tau",
")",
")",
"return",
"(",
"1.",
"-",
"x",
"**",
"2",
"/",
"(",
"a",
"+",
"tau",
")",
"-",
"y",
"**",
"2",
"/",
"(",
"b",
"+",
"tau",
")",
"-",
"z",
"**",
"2",
"/",
"(",
"c",
"+",
"tau",
")",
")",
"**",
"n",
"/",
"denom"
] |
Returns
1 x^2 y^2 z^2
-------------------------- (1 - ------- - ------- - -------)^n
sqrt(tau+a)(tau+b)(tau+c)) tau+a tau+b tau+c
|
[
"Returns",
"1",
"x^2",
"y^2",
"z^2",
"--------------------------",
"(",
"1",
"-",
"-------",
"-",
"-------",
"-",
"-------",
")",
"^n",
"sqrt",
"(",
"tau",
"+",
"a",
")",
"(",
"tau",
"+",
"b",
")",
"(",
"tau",
"+",
"c",
"))",
"tau",
"+",
"a",
"tau",
"+",
"b",
"tau",
"+",
"c"
] |
python
|
train
|
fermiPy/fermipy
|
fermipy/diffuse/name_policy.py
|
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L404-L416
|
def mcube(self, **kwargs):
""" return the name of a model cube file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
kwargs_copy['component'] = kwargs.get(
'component', self.component(**kwargs))
self._replace_none(kwargs_copy)
localpath = NameFactory.mcube_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath
|
[
"def",
"mcube",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs_copy",
"=",
"self",
".",
"base_dict",
".",
"copy",
"(",
")",
"kwargs_copy",
".",
"update",
"(",
"*",
"*",
"kwargs",
")",
"kwargs_copy",
"[",
"'dataset'",
"]",
"=",
"kwargs",
".",
"get",
"(",
"'dataset'",
",",
"self",
".",
"dataset",
"(",
"*",
"*",
"kwargs",
")",
")",
"kwargs_copy",
"[",
"'component'",
"]",
"=",
"kwargs",
".",
"get",
"(",
"'component'",
",",
"self",
".",
"component",
"(",
"*",
"*",
"kwargs",
")",
")",
"self",
".",
"_replace_none",
"(",
"kwargs_copy",
")",
"localpath",
"=",
"NameFactory",
".",
"mcube_format",
".",
"format",
"(",
"*",
"*",
"kwargs_copy",
")",
"if",
"kwargs",
".",
"get",
"(",
"'fullpath'",
",",
"False",
")",
":",
"return",
"self",
".",
"fullpath",
"(",
"localpath",
"=",
"localpath",
")",
"return",
"localpath"
] |
return the name of a model cube file
|
[
"return",
"the",
"name",
"of",
"a",
"model",
"cube",
"file"
] |
python
|
train
|
ibis-project/ibis
|
ibis/impala/client.py
|
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/impala/client.py#L1779-L1792
|
def exists_uda(self, name, database=None):
"""
Checks if a given UDAF exists within a specified database
Parameters
----------
name : string, UDAF name
database : string, database name
Returns
-------
if_exists : boolean
"""
return len(self.list_udas(database=database, like=name)) > 0
|
[
"def",
"exists_uda",
"(",
"self",
",",
"name",
",",
"database",
"=",
"None",
")",
":",
"return",
"len",
"(",
"self",
".",
"list_udas",
"(",
"database",
"=",
"database",
",",
"like",
"=",
"name",
")",
")",
">",
"0"
] |
Checks if a given UDAF exists within a specified database
Parameters
----------
name : string, UDAF name
database : string, database name
Returns
-------
if_exists : boolean
|
[
"Checks",
"if",
"a",
"given",
"UDAF",
"exists",
"within",
"a",
"specified",
"database"
] |
python
|
train
|
rocky/python3-trepan
|
trepan/processor/cmdproc.py
|
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/processor/cmdproc.py#L639-L674
|
def process_commands(self):
"""Handle debugger commands."""
if self.core.execution_status != 'No program':
self.setup()
self.location()
pass
leave_loop = run_hooks(self, self.preloop_hooks)
self.continue_running = False
while not leave_loop:
try:
run_hooks(self, self.precmd_hooks)
# bdb had a True return to leave loop.
# A more straight-forward way is to set
# instance variable self.continue_running.
leave_loop = self.process_command()
if leave_loop or self.continue_running: break
except EOFError:
# If we have stacked interfaces, pop to the next
# one. If this is the last one however, we'll
# just stick with that. FIXME: Possibly we should
# check to see if we are interactive. and not
# leave if that's the case. Is this the right
# thing? investigate and fix.
if len(self.debugger.intf) > 1:
del self.debugger.intf[-1]
self.last_command = ''
else:
if self.debugger.intf[-1].output:
self.debugger.intf[-1].output.writeline('Leaving')
raise Mexcept.DebuggerQuit
pass
break
pass
pass
return run_hooks(self, self.postcmd_hooks)
|
[
"def",
"process_commands",
"(",
"self",
")",
":",
"if",
"self",
".",
"core",
".",
"execution_status",
"!=",
"'No program'",
":",
"self",
".",
"setup",
"(",
")",
"self",
".",
"location",
"(",
")",
"pass",
"leave_loop",
"=",
"run_hooks",
"(",
"self",
",",
"self",
".",
"preloop_hooks",
")",
"self",
".",
"continue_running",
"=",
"False",
"while",
"not",
"leave_loop",
":",
"try",
":",
"run_hooks",
"(",
"self",
",",
"self",
".",
"precmd_hooks",
")",
"# bdb had a True return to leave loop.",
"# A more straight-forward way is to set",
"# instance variable self.continue_running.",
"leave_loop",
"=",
"self",
".",
"process_command",
"(",
")",
"if",
"leave_loop",
"or",
"self",
".",
"continue_running",
":",
"break",
"except",
"EOFError",
":",
"# If we have stacked interfaces, pop to the next",
"# one. If this is the last one however, we'll",
"# just stick with that. FIXME: Possibly we should",
"# check to see if we are interactive. and not",
"# leave if that's the case. Is this the right",
"# thing? investigate and fix.",
"if",
"len",
"(",
"self",
".",
"debugger",
".",
"intf",
")",
">",
"1",
":",
"del",
"self",
".",
"debugger",
".",
"intf",
"[",
"-",
"1",
"]",
"self",
".",
"last_command",
"=",
"''",
"else",
":",
"if",
"self",
".",
"debugger",
".",
"intf",
"[",
"-",
"1",
"]",
".",
"output",
":",
"self",
".",
"debugger",
".",
"intf",
"[",
"-",
"1",
"]",
".",
"output",
".",
"writeline",
"(",
"'Leaving'",
")",
"raise",
"Mexcept",
".",
"DebuggerQuit",
"pass",
"break",
"pass",
"pass",
"return",
"run_hooks",
"(",
"self",
",",
"self",
".",
"postcmd_hooks",
")"
] |
Handle debugger commands.
|
[
"Handle",
"debugger",
"commands",
"."
] |
python
|
test
|
googleapis/google-cloud-python
|
spanner/google/cloud/spanner_v1/database.py
|
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/google/cloud/spanner_v1/database.py#L392-L407
|
def batch_snapshot(self, read_timestamp=None, exact_staleness=None):
"""Return an object which wraps a batch read / query.
:type read_timestamp: :class:`datetime.datetime`
:param read_timestamp: Execute all reads at the given timestamp.
:type exact_staleness: :class:`datetime.timedelta`
:param exact_staleness: Execute all reads at a timestamp that is
``exact_staleness`` old.
:rtype: :class:`~google.cloud.spanner_v1.database.BatchSnapshot`
:returns: new wrapper
"""
return BatchSnapshot(
self, read_timestamp=read_timestamp, exact_staleness=exact_staleness
)
|
[
"def",
"batch_snapshot",
"(",
"self",
",",
"read_timestamp",
"=",
"None",
",",
"exact_staleness",
"=",
"None",
")",
":",
"return",
"BatchSnapshot",
"(",
"self",
",",
"read_timestamp",
"=",
"read_timestamp",
",",
"exact_staleness",
"=",
"exact_staleness",
")"
] |
Return an object which wraps a batch read / query.
:type read_timestamp: :class:`datetime.datetime`
:param read_timestamp: Execute all reads at the given timestamp.
:type exact_staleness: :class:`datetime.timedelta`
:param exact_staleness: Execute all reads at a timestamp that is
``exact_staleness`` old.
:rtype: :class:`~google.cloud.spanner_v1.database.BatchSnapshot`
:returns: new wrapper
|
[
"Return",
"an",
"object",
"which",
"wraps",
"a",
"batch",
"read",
"/",
"query",
"."
] |
python
|
train
|
hadrianl/huobi
|
huobitrade/service.py
|
https://github.com/hadrianl/huobi/blob/bbfa2036703ee84a76d5d8e9f89c25fc8a55f2c7/huobitrade/service.py#L63-L74
|
def get_kline(self, symbol, period, size=150, _async=False):
"""
获取KLine
:param symbol
:param period: 可选值:{1min, 5min, 15min, 30min, 60min, 1day, 1mon, 1week, 1year }
:param size: 可选值: [1,2000]
:return:
"""
params = {'symbol': symbol, 'period': period, 'size': size}
url = u.MARKET_URL + '/market/history/kline'
return http_get_request(url, params, _async=_async)
|
[
"def",
"get_kline",
"(",
"self",
",",
"symbol",
",",
"period",
",",
"size",
"=",
"150",
",",
"_async",
"=",
"False",
")",
":",
"params",
"=",
"{",
"'symbol'",
":",
"symbol",
",",
"'period'",
":",
"period",
",",
"'size'",
":",
"size",
"}",
"url",
"=",
"u",
".",
"MARKET_URL",
"+",
"'/market/history/kline'",
"return",
"http_get_request",
"(",
"url",
",",
"params",
",",
"_async",
"=",
"_async",
")"
] |
获取KLine
:param symbol
:param period: 可选值:{1min, 5min, 15min, 30min, 60min, 1day, 1mon, 1week, 1year }
:param size: 可选值: [1,2000]
:return:
|
[
"获取KLine",
":",
"param",
"symbol",
":",
"param",
"period",
":",
"可选值:",
"{",
"1min",
"5min",
"15min",
"30min",
"60min",
"1day",
"1mon",
"1week",
"1year",
"}",
":",
"param",
"size",
":",
"可选值:",
"[",
"1",
"2000",
"]",
":",
"return",
":"
] |
python
|
train
|
edoburu/django-private-storage
|
private_storage/servers.py
|
https://github.com/edoburu/django-private-storage/blob/35b718024fee75b0ed3400f601976b20246c7d05/private_storage/servers.py#L43-L56
|
def add_no_cache_headers(func):
"""
Makes sure the retrieved file is not cached on disk, or cached by proxy servers in between.
This would circumvent any checking whether the user may even access the file.
"""
@wraps(func)
def _dec(*args, **kwargs):
response = func(*args, **kwargs)
response['Expires'] = 'Thu, 01 Jan 1970 00:00:00 GMT' # HTTP 1.0 proxies
response['Cache-Control'] = 'max-age=0, no-cache, must-revalidate, proxy-revalidate' # HTTP 1.1
return response
return _dec
|
[
"def",
"add_no_cache_headers",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"_dec",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"response",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"response",
"[",
"'Expires'",
"]",
"=",
"'Thu, 01 Jan 1970 00:00:00 GMT'",
"# HTTP 1.0 proxies",
"response",
"[",
"'Cache-Control'",
"]",
"=",
"'max-age=0, no-cache, must-revalidate, proxy-revalidate'",
"# HTTP 1.1",
"return",
"response",
"return",
"_dec"
] |
Makes sure the retrieved file is not cached on disk, or cached by proxy servers in between.
This would circumvent any checking whether the user may even access the file.
|
[
"Makes",
"sure",
"the",
"retrieved",
"file",
"is",
"not",
"cached",
"on",
"disk",
"or",
"cached",
"by",
"proxy",
"servers",
"in",
"between",
".",
"This",
"would",
"circumvent",
"any",
"checking",
"whether",
"the",
"user",
"may",
"even",
"access",
"the",
"file",
"."
] |
python
|
train
|
lextoumbourou/txstripe
|
txstripe/resource.py
|
https://github.com/lextoumbourou/txstripe/blob/a69e67f524258026fd1840655a0578311bba3b89/txstripe/resource.py#L52-L110
|
def make_request(
ins, method, url, stripe_account=None, params=None, headers=None, **kwargs
):
"""
Return a deferred or handle error.
For overriding in various classes.
"""
if txstripe.api_key is None:
raise error.AuthenticationError(
'No API key provided. (HINT: set your API key using '
'"stripe.api_key = <API-KEY>"). You can generate API keys '
'from the Stripe web interface. See https://stripe.com/api '
'for details, or email support@stripe.com if you have any '
'questions.')
abs_url = '{}{}'.format(txstripe.api_base, url)
ua = {
'lang': 'python',
'publisher': 'lextoumbourou',
'httplib': 'Twisted',
}
headers = headers or {}
headers.update({
'X-Stripe-Client-User-Agent': util.json.dumps(ua),
'User-Agent': 'txstripe',
'Authorization': 'Bearer %s' % (txstripe.api_key,)
})
if stripe_account:
headers['Stripe-Account'] = stripe_account
if txstripe.api_version is not None:
headers['Stripe-Version'] = txstripe.api_version
if method == 'get' or method == 'delete':
data = None
elif method == 'post':
data = {k: v for (k, v) in _api_encode(params)}
params = None
else:
raise error.APIConnectionError(
'Unrecognized HTTP method %r. This may indicate a bug in the '
'Stripe bindings.' % (method,))
resp = yield treq.request(
method, abs_url, params=params, data=data, headers=headers, **kwargs)
if resp.code >= 400:
yield util.handle_api_error(resp)
return
body = yield resp.json()
defer.returnValue(
convert_to_stripe_object(
body, txstripe.api_key, stripe_account))
|
[
"def",
"make_request",
"(",
"ins",
",",
"method",
",",
"url",
",",
"stripe_account",
"=",
"None",
",",
"params",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"txstripe",
".",
"api_key",
"is",
"None",
":",
"raise",
"error",
".",
"AuthenticationError",
"(",
"'No API key provided. (HINT: set your API key using '",
"'\"stripe.api_key = <API-KEY>\"). You can generate API keys '",
"'from the Stripe web interface. See https://stripe.com/api '",
"'for details, or email support@stripe.com if you have any '",
"'questions.'",
")",
"abs_url",
"=",
"'{}{}'",
".",
"format",
"(",
"txstripe",
".",
"api_base",
",",
"url",
")",
"ua",
"=",
"{",
"'lang'",
":",
"'python'",
",",
"'publisher'",
":",
"'lextoumbourou'",
",",
"'httplib'",
":",
"'Twisted'",
",",
"}",
"headers",
"=",
"headers",
"or",
"{",
"}",
"headers",
".",
"update",
"(",
"{",
"'X-Stripe-Client-User-Agent'",
":",
"util",
".",
"json",
".",
"dumps",
"(",
"ua",
")",
",",
"'User-Agent'",
":",
"'txstripe'",
",",
"'Authorization'",
":",
"'Bearer %s'",
"%",
"(",
"txstripe",
".",
"api_key",
",",
")",
"}",
")",
"if",
"stripe_account",
":",
"headers",
"[",
"'Stripe-Account'",
"]",
"=",
"stripe_account",
"if",
"txstripe",
".",
"api_version",
"is",
"not",
"None",
":",
"headers",
"[",
"'Stripe-Version'",
"]",
"=",
"txstripe",
".",
"api_version",
"if",
"method",
"==",
"'get'",
"or",
"method",
"==",
"'delete'",
":",
"data",
"=",
"None",
"elif",
"method",
"==",
"'post'",
":",
"data",
"=",
"{",
"k",
":",
"v",
"for",
"(",
"k",
",",
"v",
")",
"in",
"_api_encode",
"(",
"params",
")",
"}",
"params",
"=",
"None",
"else",
":",
"raise",
"error",
".",
"APIConnectionError",
"(",
"'Unrecognized HTTP method %r. This may indicate a bug in the '",
"'Stripe bindings.'",
"%",
"(",
"method",
",",
")",
")",
"resp",
"=",
"yield",
"treq",
".",
"request",
"(",
"method",
",",
"abs_url",
",",
"params",
"=",
"params",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"headers",
",",
"*",
"*",
"kwargs",
")",
"if",
"resp",
".",
"code",
">=",
"400",
":",
"yield",
"util",
".",
"handle_api_error",
"(",
"resp",
")",
"return",
"body",
"=",
"yield",
"resp",
".",
"json",
"(",
")",
"defer",
".",
"returnValue",
"(",
"convert_to_stripe_object",
"(",
"body",
",",
"txstripe",
".",
"api_key",
",",
"stripe_account",
")",
")"
] |
Return a deferred or handle error.
For overriding in various classes.
|
[
"Return",
"a",
"deferred",
"or",
"handle",
"error",
"."
] |
python
|
train
|
arista-eosplus/pyeapi
|
pyeapi/api/vlans.py
|
https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/api/vlans.py#L90-L112
|
def get(self, value):
"""Returns the VLAN configuration as a resource dict.
Args:
vid (string): The vlan identifier to retrieve from the
running configuration. Valid values are in the range
of 1 to 4095
Returns:
A Python dict object containing the VLAN attributes as
key/value pairs.
"""
config = self.get_block('vlan %s' % value)
if not config:
return None
response = dict(vlan_id=value)
response.update(self._parse_name(config))
response.update(self._parse_state(config))
response.update(self._parse_trunk_groups(config))
return response
|
[
"def",
"get",
"(",
"self",
",",
"value",
")",
":",
"config",
"=",
"self",
".",
"get_block",
"(",
"'vlan %s'",
"%",
"value",
")",
"if",
"not",
"config",
":",
"return",
"None",
"response",
"=",
"dict",
"(",
"vlan_id",
"=",
"value",
")",
"response",
".",
"update",
"(",
"self",
".",
"_parse_name",
"(",
"config",
")",
")",
"response",
".",
"update",
"(",
"self",
".",
"_parse_state",
"(",
"config",
")",
")",
"response",
".",
"update",
"(",
"self",
".",
"_parse_trunk_groups",
"(",
"config",
")",
")",
"return",
"response"
] |
Returns the VLAN configuration as a resource dict.
Args:
vid (string): The vlan identifier to retrieve from the
running configuration. Valid values are in the range
of 1 to 4095
Returns:
A Python dict object containing the VLAN attributes as
key/value pairs.
|
[
"Returns",
"the",
"VLAN",
"configuration",
"as",
"a",
"resource",
"dict",
"."
] |
python
|
train
|
polyaxon/polyaxon
|
polyaxon/db/models/pipelines.py
|
https://github.com/polyaxon/polyaxon/blob/e1724f0756b1a42f9e7aa08a976584a84ef7f016/polyaxon/db/models/pipelines.py#L533-L547
|
def check_concurrency(self) -> bool:
"""Checks the concurrency of the operation run.
Checks the concurrency of the operation run
to validate if we can start a new operation run.
Returns:
boolean: Whether to start a new operation run or not.
"""
if not self.operation.concurrency: # No concurrency set
return True
ops_count = self.operation.runs.filter(
status__status__in=self.STATUSES.RUNNING_STATUS).count()
return ops_count < self.operation.concurrency
|
[
"def",
"check_concurrency",
"(",
"self",
")",
"->",
"bool",
":",
"if",
"not",
"self",
".",
"operation",
".",
"concurrency",
":",
"# No concurrency set",
"return",
"True",
"ops_count",
"=",
"self",
".",
"operation",
".",
"runs",
".",
"filter",
"(",
"status__status__in",
"=",
"self",
".",
"STATUSES",
".",
"RUNNING_STATUS",
")",
".",
"count",
"(",
")",
"return",
"ops_count",
"<",
"self",
".",
"operation",
".",
"concurrency"
] |
Checks the concurrency of the operation run.
Checks the concurrency of the operation run
to validate if we can start a new operation run.
Returns:
boolean: Whether to start a new operation run or not.
|
[
"Checks",
"the",
"concurrency",
"of",
"the",
"operation",
"run",
"."
] |
python
|
train
|
Duke-GCB/DukeDSClient
|
ddsc/cmdparser.py
|
https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/cmdparser.py#L359-L372
|
def register_add_user_command(self, add_user_func):
"""
Add the add-user command to the parser and call add_user_func(project_name, user_full_name, auth_role)
when chosen.
:param add_user_func: func Called when this option is chosen: upload_func(project_name, user_full_name, auth_role).
"""
description = "Gives user permission to access a remote project."
add_user_parser = self.subparsers.add_parser('add-user', description=description)
add_project_name_or_id_arg(add_user_parser, help_text_suffix="add a user to")
user_or_email = add_user_parser.add_mutually_exclusive_group(required=True)
add_user_arg(user_or_email)
add_email_arg(user_or_email)
_add_auth_role_arg(add_user_parser, default_permissions='project_admin')
add_user_parser.set_defaults(func=add_user_func)
|
[
"def",
"register_add_user_command",
"(",
"self",
",",
"add_user_func",
")",
":",
"description",
"=",
"\"Gives user permission to access a remote project.\"",
"add_user_parser",
"=",
"self",
".",
"subparsers",
".",
"add_parser",
"(",
"'add-user'",
",",
"description",
"=",
"description",
")",
"add_project_name_or_id_arg",
"(",
"add_user_parser",
",",
"help_text_suffix",
"=",
"\"add a user to\"",
")",
"user_or_email",
"=",
"add_user_parser",
".",
"add_mutually_exclusive_group",
"(",
"required",
"=",
"True",
")",
"add_user_arg",
"(",
"user_or_email",
")",
"add_email_arg",
"(",
"user_or_email",
")",
"_add_auth_role_arg",
"(",
"add_user_parser",
",",
"default_permissions",
"=",
"'project_admin'",
")",
"add_user_parser",
".",
"set_defaults",
"(",
"func",
"=",
"add_user_func",
")"
] |
Add the add-user command to the parser and call add_user_func(project_name, user_full_name, auth_role)
when chosen.
:param add_user_func: func Called when this option is chosen: upload_func(project_name, user_full_name, auth_role).
|
[
"Add",
"the",
"add",
"-",
"user",
"command",
"to",
"the",
"parser",
"and",
"call",
"add_user_func",
"(",
"project_name",
"user_full_name",
"auth_role",
")",
"when",
"chosen",
".",
":",
"param",
"add_user_func",
":",
"func",
"Called",
"when",
"this",
"option",
"is",
"chosen",
":",
"upload_func",
"(",
"project_name",
"user_full_name",
"auth_role",
")",
"."
] |
python
|
train
|
gccxml/pygccxml
|
pygccxml/declarations/type_traits.py
|
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/declarations/type_traits.py#L512-L524
|
def is_std_string(type_):
"""
Returns True, if type represents C++ `std::string`, False otherwise.
"""
if utils.is_str(type_):
return type_ in string_equivalences
type_ = remove_alias(type_)
type_ = remove_reference(type_)
type_ = remove_cv(type_)
return type_.decl_string in string_equivalences
|
[
"def",
"is_std_string",
"(",
"type_",
")",
":",
"if",
"utils",
".",
"is_str",
"(",
"type_",
")",
":",
"return",
"type_",
"in",
"string_equivalences",
"type_",
"=",
"remove_alias",
"(",
"type_",
")",
"type_",
"=",
"remove_reference",
"(",
"type_",
")",
"type_",
"=",
"remove_cv",
"(",
"type_",
")",
"return",
"type_",
".",
"decl_string",
"in",
"string_equivalences"
] |
Returns True, if type represents C++ `std::string`, False otherwise.
|
[
"Returns",
"True",
"if",
"type",
"represents",
"C",
"++",
"std",
"::",
"string",
"False",
"otherwise",
"."
] |
python
|
train
|
finklabs/korg
|
korg/pattern.py
|
https://github.com/finklabs/korg/blob/e931a673ce4bc79cdf26cb4f697fa23fa8a72e4f/korg/pattern.py#L73-L81
|
def _load_patterns(self, folders, pattern_dict=None):
"""Load all pattern from all the files in folders"""
if pattern_dict is None:
pattern_dict = {}
for folder in folders:
for file in os.listdir(folder):
if regex.match('^[\w-]+$', file):
self._load_pattern_file(os.path.join(folder, file), pattern_dict)
return pattern_dict
|
[
"def",
"_load_patterns",
"(",
"self",
",",
"folders",
",",
"pattern_dict",
"=",
"None",
")",
":",
"if",
"pattern_dict",
"is",
"None",
":",
"pattern_dict",
"=",
"{",
"}",
"for",
"folder",
"in",
"folders",
":",
"for",
"file",
"in",
"os",
".",
"listdir",
"(",
"folder",
")",
":",
"if",
"regex",
".",
"match",
"(",
"'^[\\w-]+$'",
",",
"file",
")",
":",
"self",
".",
"_load_pattern_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"file",
")",
",",
"pattern_dict",
")",
"return",
"pattern_dict"
] |
Load all pattern from all the files in folders
|
[
"Load",
"all",
"pattern",
"from",
"all",
"the",
"files",
"in",
"folders"
] |
python
|
train
|
rigetti/grove
|
grove/alpha/jordan_gradient/gradient_utils.py
|
https://github.com/rigetti/grove/blob/dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3/grove/alpha/jordan_gradient/gradient_utils.py#L6-L27
|
def binary_float_to_decimal_float(number: Union[float, str]) -> float:
"""
Convert binary floating point to decimal floating point.
:param number: Binary floating point.
:return: Decimal floating point representation of binary floating point.
"""
if isinstance(number, str):
if number[0] == '-':
n_sign = -1
else:
n_sign = 1
elif isinstance(number, float):
n_sign = np.sign(number)
number = str(number)
deci = 0
for ndx, val in enumerate(number.split('.')[-1]):
deci += float(val) / 2**(ndx+1)
deci *= n_sign
return deci
|
[
"def",
"binary_float_to_decimal_float",
"(",
"number",
":",
"Union",
"[",
"float",
",",
"str",
"]",
")",
"->",
"float",
":",
"if",
"isinstance",
"(",
"number",
",",
"str",
")",
":",
"if",
"number",
"[",
"0",
"]",
"==",
"'-'",
":",
"n_sign",
"=",
"-",
"1",
"else",
":",
"n_sign",
"=",
"1",
"elif",
"isinstance",
"(",
"number",
",",
"float",
")",
":",
"n_sign",
"=",
"np",
".",
"sign",
"(",
"number",
")",
"number",
"=",
"str",
"(",
"number",
")",
"deci",
"=",
"0",
"for",
"ndx",
",",
"val",
"in",
"enumerate",
"(",
"number",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
")",
":",
"deci",
"+=",
"float",
"(",
"val",
")",
"/",
"2",
"**",
"(",
"ndx",
"+",
"1",
")",
"deci",
"*=",
"n_sign",
"return",
"deci"
] |
Convert binary floating point to decimal floating point.
:param number: Binary floating point.
:return: Decimal floating point representation of binary floating point.
|
[
"Convert",
"binary",
"floating",
"point",
"to",
"decimal",
"floating",
"point",
"."
] |
python
|
train
|
mlperf/training
|
rnn_translator/pytorch/seq2seq/inference/inference.py
|
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/rnn_translator/pytorch/seq2seq/inference/inference.py#L273-L289
|
def run_sacrebleu(self, detok_eval_path, reference_path):
"""
Executes sacrebleu and returns BLEU score.
:param detok_eval_path: path to the test file
:param reference_path: path to the reference file
"""
if reference_path is None:
reference_path = os.path.join(self.dataset_dir,
config.TGT_TEST_TARGET_FNAME)
sacrebleu_params = '--score-only -lc --tokenize intl'
logging.info(f'Running sacrebleu (parameters: {sacrebleu_params})')
sacrebleu = subprocess.run([f'sacrebleu --input {detok_eval_path} \
{reference_path} {sacrebleu_params}'],
stdout=subprocess.PIPE, shell=True)
test_bleu = float(sacrebleu.stdout.strip())
return test_bleu
|
[
"def",
"run_sacrebleu",
"(",
"self",
",",
"detok_eval_path",
",",
"reference_path",
")",
":",
"if",
"reference_path",
"is",
"None",
":",
"reference_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"dataset_dir",
",",
"config",
".",
"TGT_TEST_TARGET_FNAME",
")",
"sacrebleu_params",
"=",
"'--score-only -lc --tokenize intl'",
"logging",
".",
"info",
"(",
"f'Running sacrebleu (parameters: {sacrebleu_params})'",
")",
"sacrebleu",
"=",
"subprocess",
".",
"run",
"(",
"[",
"f'sacrebleu --input {detok_eval_path} \\\n {reference_path} {sacrebleu_params}'",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"shell",
"=",
"True",
")",
"test_bleu",
"=",
"float",
"(",
"sacrebleu",
".",
"stdout",
".",
"strip",
"(",
")",
")",
"return",
"test_bleu"
] |
Executes sacrebleu and returns BLEU score.
:param detok_eval_path: path to the test file
:param reference_path: path to the reference file
|
[
"Executes",
"sacrebleu",
"and",
"returns",
"BLEU",
"score",
"."
] |
python
|
train
|
QualiSystems/vCenterShell
|
package/cloudshell/cp/vcenter/commands/disconnect_dvswitch.py
|
https://github.com/QualiSystems/vCenterShell/blob/e2e24cd938a92a68f4a8e6a860810d3ef72aae6d/package/cloudshell/cp/vcenter/commands/disconnect_dvswitch.py#L64-L99
|
def disconnect(self, si, logger, vcenter_data_model, vm_uuid, network_name=None, vm=None):
"""
disconnect network adapter of the vm. If 'network_name' = None - disconnect ALL interfaces
:param <str> si:
:param logger:
:param VMwarevCenterResourceModel vcenter_data_model:
:param <str> vm_uuid: the uuid of the vm
:param <str | None> network_name: the name of the specific network to disconnect
:param <pyvmomi vm object> vm: If the vm obj is None will use vm_uuid to fetch the object
:return: Started Task
"""
logger.debug("Disconnect Interface VM: '{0}' Network: '{1}' ...".format(vm_uuid, network_name or "ALL"))
if vm is None:
vm = self.pyvmomi_service.find_by_uuid(si, vm_uuid)
if not vm:
return "Warning: failed to locate vm {0} in vCenter".format(vm_uuid)
if network_name:
network = self.pyvmomi_service.vm_get_network_by_name(vm, network_name)
if network is None:
raise KeyError('Network not found ({0})'.format(network_name))
else:
network = None
network_full_name = VMLocation.combine(
[vcenter_data_model.default_datacenter, vcenter_data_model.holding_network])
default_network = self.pyvmomi_service.get_network_by_full_name(si, network_full_name)
if network:
return self.port_group_configurer.disconnect_network(vm, network, default_network,
vcenter_data_model.reserved_networks,
logger=logger)
else:
return self.port_group_configurer.disconnect_all_networks(vm, default_network,
vcenter_data_model.reserved_networks,
logger=logger)
|
[
"def",
"disconnect",
"(",
"self",
",",
"si",
",",
"logger",
",",
"vcenter_data_model",
",",
"vm_uuid",
",",
"network_name",
"=",
"None",
",",
"vm",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"\"Disconnect Interface VM: '{0}' Network: '{1}' ...\"",
".",
"format",
"(",
"vm_uuid",
",",
"network_name",
"or",
"\"ALL\"",
")",
")",
"if",
"vm",
"is",
"None",
":",
"vm",
"=",
"self",
".",
"pyvmomi_service",
".",
"find_by_uuid",
"(",
"si",
",",
"vm_uuid",
")",
"if",
"not",
"vm",
":",
"return",
"\"Warning: failed to locate vm {0} in vCenter\"",
".",
"format",
"(",
"vm_uuid",
")",
"if",
"network_name",
":",
"network",
"=",
"self",
".",
"pyvmomi_service",
".",
"vm_get_network_by_name",
"(",
"vm",
",",
"network_name",
")",
"if",
"network",
"is",
"None",
":",
"raise",
"KeyError",
"(",
"'Network not found ({0})'",
".",
"format",
"(",
"network_name",
")",
")",
"else",
":",
"network",
"=",
"None",
"network_full_name",
"=",
"VMLocation",
".",
"combine",
"(",
"[",
"vcenter_data_model",
".",
"default_datacenter",
",",
"vcenter_data_model",
".",
"holding_network",
"]",
")",
"default_network",
"=",
"self",
".",
"pyvmomi_service",
".",
"get_network_by_full_name",
"(",
"si",
",",
"network_full_name",
")",
"if",
"network",
":",
"return",
"self",
".",
"port_group_configurer",
".",
"disconnect_network",
"(",
"vm",
",",
"network",
",",
"default_network",
",",
"vcenter_data_model",
".",
"reserved_networks",
",",
"logger",
"=",
"logger",
")",
"else",
":",
"return",
"self",
".",
"port_group_configurer",
".",
"disconnect_all_networks",
"(",
"vm",
",",
"default_network",
",",
"vcenter_data_model",
".",
"reserved_networks",
",",
"logger",
"=",
"logger",
")"
] |
disconnect network adapter of the vm. If 'network_name' = None - disconnect ALL interfaces
:param <str> si:
:param logger:
:param VMwarevCenterResourceModel vcenter_data_model:
:param <str> vm_uuid: the uuid of the vm
:param <str | None> network_name: the name of the specific network to disconnect
:param <pyvmomi vm object> vm: If the vm obj is None will use vm_uuid to fetch the object
:return: Started Task
|
[
"disconnect",
"network",
"adapter",
"of",
"the",
"vm",
".",
"If",
"network_name",
"=",
"None",
"-",
"disconnect",
"ALL",
"interfaces",
":",
"param",
"<str",
">",
"si",
":",
":",
"param",
"logger",
":",
":",
"param",
"VMwarevCenterResourceModel",
"vcenter_data_model",
":",
":",
"param",
"<str",
">",
"vm_uuid",
":",
"the",
"uuid",
"of",
"the",
"vm",
":",
"param",
"<str",
"|",
"None",
">",
"network_name",
":",
"the",
"name",
"of",
"the",
"specific",
"network",
"to",
"disconnect",
":",
"param",
"<pyvmomi",
"vm",
"object",
">",
"vm",
":",
"If",
"the",
"vm",
"obj",
"is",
"None",
"will",
"use",
"vm_uuid",
"to",
"fetch",
"the",
"object",
":",
"return",
":",
"Started",
"Task"
] |
python
|
train
|
treycucco/pyebnf
|
pyebnf/primitive.py
|
https://github.com/treycucco/pyebnf/blob/3634ddabbe5d73508bcc20f4a591f86a46634e1d/pyebnf/primitive.py#L320-L330
|
def _get_terminal(value, text):
"""Checks the beginning of text for a value. If it is found, a terminal ParseNode is returned
filled out appropriately for the value it found. DeadEnd is raised if the value does not match.
"""
if text and text.startswith(value):
return ParseNode(ParseNodeType.terminal,
children=[value],
consumed=len(value),
position=-len(text))
else:
raise DeadEnd()
|
[
"def",
"_get_terminal",
"(",
"value",
",",
"text",
")",
":",
"if",
"text",
"and",
"text",
".",
"startswith",
"(",
"value",
")",
":",
"return",
"ParseNode",
"(",
"ParseNodeType",
".",
"terminal",
",",
"children",
"=",
"[",
"value",
"]",
",",
"consumed",
"=",
"len",
"(",
"value",
")",
",",
"position",
"=",
"-",
"len",
"(",
"text",
")",
")",
"else",
":",
"raise",
"DeadEnd",
"(",
")"
] |
Checks the beginning of text for a value. If it is found, a terminal ParseNode is returned
filled out appropriately for the value it found. DeadEnd is raised if the value does not match.
|
[
"Checks",
"the",
"beginning",
"of",
"text",
"for",
"a",
"value",
".",
"If",
"it",
"is",
"found",
"a",
"terminal",
"ParseNode",
"is",
"returned",
"filled",
"out",
"appropriately",
"for",
"the",
"value",
"it",
"found",
".",
"DeadEnd",
"is",
"raised",
"if",
"the",
"value",
"does",
"not",
"match",
"."
] |
python
|
test
|
prometheus/client_python
|
prometheus_client/exposition.py
|
https://github.com/prometheus/client_python/blob/31f5557e2e84ca4ffa9a03abf6e3f4d0c8b8c3eb/prometheus_client/exposition.py#L190-L196
|
def start_http_server(port, addr='', registry=REGISTRY):
"""Starts an HTTP server for prometheus metrics as a daemon thread"""
CustomMetricsHandler = MetricsHandler.factory(registry)
httpd = _ThreadingSimpleServer((addr, port), CustomMetricsHandler)
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
|
[
"def",
"start_http_server",
"(",
"port",
",",
"addr",
"=",
"''",
",",
"registry",
"=",
"REGISTRY",
")",
":",
"CustomMetricsHandler",
"=",
"MetricsHandler",
".",
"factory",
"(",
"registry",
")",
"httpd",
"=",
"_ThreadingSimpleServer",
"(",
"(",
"addr",
",",
"port",
")",
",",
"CustomMetricsHandler",
")",
"t",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"httpd",
".",
"serve_forever",
")",
"t",
".",
"daemon",
"=",
"True",
"t",
".",
"start",
"(",
")"
] |
Starts an HTTP server for prometheus metrics as a daemon thread
|
[
"Starts",
"an",
"HTTP",
"server",
"for",
"prometheus",
"metrics",
"as",
"a",
"daemon",
"thread"
] |
python
|
train
|
coldmind/django-postgres-pgpfields
|
django_postgres_pgpfields/managers.py
|
https://github.com/coldmind/django-postgres-pgpfields/blob/8ad7ab6254f06104012696fa7f99d0f6727fb667/django_postgres_pgpfields/managers.py#L22-L36
|
def get_queryset(self, *args, **kwargs):
"""Django queryset.extra() is used here to add decryption sql to query."""
select_sql = {}
encrypted_fields = []
for f in self.model._meta.get_fields_with_model():
field = f[0]
if isinstance(field, PGPMixin):
select_sql[field.name] = self.get_decrypt_sql(field).format(
field.model._meta.db_table,
field.name,
settings.PGPFIELDS_PRIVATE_KEY,
)
encrypted_fields.append(field.name)
return super(PGPEncryptedManager, self).get_queryset(
*args, **kwargs).defer(*encrypted_fields).extra(select=select_sql)
|
[
"def",
"get_queryset",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"select_sql",
"=",
"{",
"}",
"encrypted_fields",
"=",
"[",
"]",
"for",
"f",
"in",
"self",
".",
"model",
".",
"_meta",
".",
"get_fields_with_model",
"(",
")",
":",
"field",
"=",
"f",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"field",
",",
"PGPMixin",
")",
":",
"select_sql",
"[",
"field",
".",
"name",
"]",
"=",
"self",
".",
"get_decrypt_sql",
"(",
"field",
")",
".",
"format",
"(",
"field",
".",
"model",
".",
"_meta",
".",
"db_table",
",",
"field",
".",
"name",
",",
"settings",
".",
"PGPFIELDS_PRIVATE_KEY",
",",
")",
"encrypted_fields",
".",
"append",
"(",
"field",
".",
"name",
")",
"return",
"super",
"(",
"PGPEncryptedManager",
",",
"self",
")",
".",
"get_queryset",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
".",
"defer",
"(",
"*",
"encrypted_fields",
")",
".",
"extra",
"(",
"select",
"=",
"select_sql",
")"
] |
Django queryset.extra() is used here to add decryption sql to query.
|
[
"Django",
"queryset",
".",
"extra",
"()",
"is",
"used",
"here",
"to",
"add",
"decryption",
"sql",
"to",
"query",
"."
] |
python
|
train
|
cloudnull/cloudlib
|
cloudlib/http.py
|
https://github.com/cloudnull/cloudlib/blob/5038111ce02521caa2558117e3bae9e1e806d315/cloudlib/http.py#L118-L124
|
def _report_error(self, request, exp):
"""When making the request, if an error happens, log it."""
message = (
"Failure to perform %s due to [ %s ]" % (request, exp)
)
self.log.fatal(message)
raise requests.RequestException(message)
|
[
"def",
"_report_error",
"(",
"self",
",",
"request",
",",
"exp",
")",
":",
"message",
"=",
"(",
"\"Failure to perform %s due to [ %s ]\"",
"%",
"(",
"request",
",",
"exp",
")",
")",
"self",
".",
"log",
".",
"fatal",
"(",
"message",
")",
"raise",
"requests",
".",
"RequestException",
"(",
"message",
")"
] |
When making the request, if an error happens, log it.
|
[
"When",
"making",
"the",
"request",
"if",
"an",
"error",
"happens",
"log",
"it",
"."
] |
python
|
train
|
silver-castle/mach9
|
mach9/http.py
|
https://github.com/silver-castle/mach9/blob/7a623aab3c70d89d36ade6901b6307e115400c5e/mach9/http.py#L202-L231
|
def get_message(self, transport, http_version: str, method: bytes,
url: bytes, headers: List[List[bytes]]) -> Dict[str, Any]:
'''
http://channels.readthedocs.io/en/stable/asgi/www.html#request
'''
url_obj = parse_url(url)
if url_obj.schema is None:
if transport.get_extra_info('sslcontext'):
scheme = 'https'
else:
scheme = 'http'
else:
scheme = url_obj.schema.decode()
path = '' if url_obj.path is None else url_obj.path.decode('utf-8')
query = b'' if url_obj.query is None else url_obj.query
return {
'channel': 'http.request',
'reply_channel': None,
'http_version': http_version,
'method': method.decode(),
'scheme': scheme,
'path': path,
'query_string': query,
'root_path': '',
'headers': headers,
'body': b'',
'body_channel': None,
'client': transport.get_extra_info('peername'),
'server': transport.get_extra_info('sockname')
}
|
[
"def",
"get_message",
"(",
"self",
",",
"transport",
",",
"http_version",
":",
"str",
",",
"method",
":",
"bytes",
",",
"url",
":",
"bytes",
",",
"headers",
":",
"List",
"[",
"List",
"[",
"bytes",
"]",
"]",
")",
"->",
"Dict",
"[",
"str",
",",
"Any",
"]",
":",
"url_obj",
"=",
"parse_url",
"(",
"url",
")",
"if",
"url_obj",
".",
"schema",
"is",
"None",
":",
"if",
"transport",
".",
"get_extra_info",
"(",
"'sslcontext'",
")",
":",
"scheme",
"=",
"'https'",
"else",
":",
"scheme",
"=",
"'http'",
"else",
":",
"scheme",
"=",
"url_obj",
".",
"schema",
".",
"decode",
"(",
")",
"path",
"=",
"''",
"if",
"url_obj",
".",
"path",
"is",
"None",
"else",
"url_obj",
".",
"path",
".",
"decode",
"(",
"'utf-8'",
")",
"query",
"=",
"b''",
"if",
"url_obj",
".",
"query",
"is",
"None",
"else",
"url_obj",
".",
"query",
"return",
"{",
"'channel'",
":",
"'http.request'",
",",
"'reply_channel'",
":",
"None",
",",
"'http_version'",
":",
"http_version",
",",
"'method'",
":",
"method",
".",
"decode",
"(",
")",
",",
"'scheme'",
":",
"scheme",
",",
"'path'",
":",
"path",
",",
"'query_string'",
":",
"query",
",",
"'root_path'",
":",
"''",
",",
"'headers'",
":",
"headers",
",",
"'body'",
":",
"b''",
",",
"'body_channel'",
":",
"None",
",",
"'client'",
":",
"transport",
".",
"get_extra_info",
"(",
"'peername'",
")",
",",
"'server'",
":",
"transport",
".",
"get_extra_info",
"(",
"'sockname'",
")",
"}"
] |
http://channels.readthedocs.io/en/stable/asgi/www.html#request
|
[
"http",
":",
"//",
"channels",
".",
"readthedocs",
".",
"io",
"/",
"en",
"/",
"stable",
"/",
"asgi",
"/",
"www",
".",
"html#request"
] |
python
|
train
|
pvlib/pvlib-python
|
pvlib/tracking.py
|
https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/tracking.py#L153-L214
|
def get_irradiance(self, surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth, dni, ghi, dhi,
dni_extra=None, airmass=None, model='haydavies',
**kwargs):
"""
Uses the :func:`irradiance.get_total_irradiance` function to
calculate the plane of array irradiance components on a tilted
surface defined by the input data and ``self.albedo``.
For a given set of solar zenith and azimuth angles, the
surface tilt and azimuth parameters are typically determined
by :py:meth:`~SingleAxisTracker.singleaxis`.
Parameters
----------
surface_tilt : numeric
Panel tilt from horizontal.
surface_azimuth : numeric
Panel azimuth from north
solar_zenith : numeric
Solar zenith angle.
solar_azimuth : numeric
Solar azimuth angle.
dni : float or Series
Direct Normal Irradiance
ghi : float or Series
Global horizontal irradiance
dhi : float or Series
Diffuse horizontal irradiance
dni_extra : float or Series, default None
Extraterrestrial direct normal irradiance
airmass : float or Series, default None
Airmass
model : String, default 'haydavies'
Irradiance model.
**kwargs
Passed to :func:`irradiance.total_irrad`.
Returns
-------
poa_irradiance : DataFrame
Column names are: ``total, beam, sky, ground``.
"""
# not needed for all models, but this is easier
if dni_extra is None:
dni_extra = irradiance.get_extra_radiation(solar_zenith.index)
if airmass is None:
airmass = atmosphere.get_relative_airmass(solar_zenith)
return irradiance.get_total_irradiance(surface_tilt,
surface_azimuth,
solar_zenith,
solar_azimuth,
dni, ghi, dhi,
dni_extra=dni_extra,
airmass=airmass,
model=model,
albedo=self.albedo,
**kwargs)
|
[
"def",
"get_irradiance",
"(",
"self",
",",
"surface_tilt",
",",
"surface_azimuth",
",",
"solar_zenith",
",",
"solar_azimuth",
",",
"dni",
",",
"ghi",
",",
"dhi",
",",
"dni_extra",
"=",
"None",
",",
"airmass",
"=",
"None",
",",
"model",
"=",
"'haydavies'",
",",
"*",
"*",
"kwargs",
")",
":",
"# not needed for all models, but this is easier",
"if",
"dni_extra",
"is",
"None",
":",
"dni_extra",
"=",
"irradiance",
".",
"get_extra_radiation",
"(",
"solar_zenith",
".",
"index",
")",
"if",
"airmass",
"is",
"None",
":",
"airmass",
"=",
"atmosphere",
".",
"get_relative_airmass",
"(",
"solar_zenith",
")",
"return",
"irradiance",
".",
"get_total_irradiance",
"(",
"surface_tilt",
",",
"surface_azimuth",
",",
"solar_zenith",
",",
"solar_azimuth",
",",
"dni",
",",
"ghi",
",",
"dhi",
",",
"dni_extra",
"=",
"dni_extra",
",",
"airmass",
"=",
"airmass",
",",
"model",
"=",
"model",
",",
"albedo",
"=",
"self",
".",
"albedo",
",",
"*",
"*",
"kwargs",
")"
] |
Uses the :func:`irradiance.get_total_irradiance` function to
calculate the plane of array irradiance components on a tilted
surface defined by the input data and ``self.albedo``.
For a given set of solar zenith and azimuth angles, the
surface tilt and azimuth parameters are typically determined
by :py:meth:`~SingleAxisTracker.singleaxis`.
Parameters
----------
surface_tilt : numeric
Panel tilt from horizontal.
surface_azimuth : numeric
Panel azimuth from north
solar_zenith : numeric
Solar zenith angle.
solar_azimuth : numeric
Solar azimuth angle.
dni : float or Series
Direct Normal Irradiance
ghi : float or Series
Global horizontal irradiance
dhi : float or Series
Diffuse horizontal irradiance
dni_extra : float or Series, default None
Extraterrestrial direct normal irradiance
airmass : float or Series, default None
Airmass
model : String, default 'haydavies'
Irradiance model.
**kwargs
Passed to :func:`irradiance.total_irrad`.
Returns
-------
poa_irradiance : DataFrame
Column names are: ``total, beam, sky, ground``.
|
[
"Uses",
"the",
":",
"func",
":",
"irradiance",
".",
"get_total_irradiance",
"function",
"to",
"calculate",
"the",
"plane",
"of",
"array",
"irradiance",
"components",
"on",
"a",
"tilted",
"surface",
"defined",
"by",
"the",
"input",
"data",
"and",
"self",
".",
"albedo",
"."
] |
python
|
train
|
rdussurget/py-altimetry
|
altimetry/tools/interp_tools.py
|
https://github.com/rdussurget/py-altimetry/blob/57ce7f2d63c6bbc4993821af0bbe46929e3a2d98/altimetry/tools/interp_tools.py#L59-L85
|
def extrap1d(Z,mask):
"""
EXTRAP1D : Extrapolate values from a 1D vector at its beginning and end using reversal of this array
@note : gaps in vector Z should be filled first as values from the vector are replicated out of edges
@note : if isinstance(Z,np.ma.masked_array) : mask = Z.mask
@param Z: 1D vector to extrapolate
@param mask: mask flag of the vector
@author: Renaud DUSSURGET, LER/PAC, Ifremer La Seyne
"""
Zout=Z.copy()
N=len(Zout)
ind=np.arange(N)
xout=ind[mask]
hist=(~mask).astype(int)
dhist=hist[1:]-hist[:-1]
st=ind[dhist==1]+1
if len(st) > 0 :
st=st[0]
Zout[:st]=Z[st] - (np.roll(Z,-st-1)[:st][::-1] - Z[st])
en=ind[dhist==-1]
if len(en) > 0 :
en=en[-1]
Zout[en+1:]=Z[en] - (np.roll(Z,-en)[::-1][:N-en-1] - Z[en])
return Zout
|
[
"def",
"extrap1d",
"(",
"Z",
",",
"mask",
")",
":",
"Zout",
"=",
"Z",
".",
"copy",
"(",
")",
"N",
"=",
"len",
"(",
"Zout",
")",
"ind",
"=",
"np",
".",
"arange",
"(",
"N",
")",
"xout",
"=",
"ind",
"[",
"mask",
"]",
"hist",
"=",
"(",
"~",
"mask",
")",
".",
"astype",
"(",
"int",
")",
"dhist",
"=",
"hist",
"[",
"1",
":",
"]",
"-",
"hist",
"[",
":",
"-",
"1",
"]",
"st",
"=",
"ind",
"[",
"dhist",
"==",
"1",
"]",
"+",
"1",
"if",
"len",
"(",
"st",
")",
">",
"0",
":",
"st",
"=",
"st",
"[",
"0",
"]",
"Zout",
"[",
":",
"st",
"]",
"=",
"Z",
"[",
"st",
"]",
"-",
"(",
"np",
".",
"roll",
"(",
"Z",
",",
"-",
"st",
"-",
"1",
")",
"[",
":",
"st",
"]",
"[",
":",
":",
"-",
"1",
"]",
"-",
"Z",
"[",
"st",
"]",
")",
"en",
"=",
"ind",
"[",
"dhist",
"==",
"-",
"1",
"]",
"if",
"len",
"(",
"en",
")",
">",
"0",
":",
"en",
"=",
"en",
"[",
"-",
"1",
"]",
"Zout",
"[",
"en",
"+",
"1",
":",
"]",
"=",
"Z",
"[",
"en",
"]",
"-",
"(",
"np",
".",
"roll",
"(",
"Z",
",",
"-",
"en",
")",
"[",
":",
":",
"-",
"1",
"]",
"[",
":",
"N",
"-",
"en",
"-",
"1",
"]",
"-",
"Z",
"[",
"en",
"]",
")",
"return",
"Zout"
] |
EXTRAP1D : Extrapolate values from a 1D vector at its beginning and end using reversal of this array
@note : gaps in vector Z should be filled first as values from the vector are replicated out of edges
@note : if isinstance(Z,np.ma.masked_array) : mask = Z.mask
@param Z: 1D vector to extrapolate
@param mask: mask flag of the vector
@author: Renaud DUSSURGET, LER/PAC, Ifremer La Seyne
|
[
"EXTRAP1D",
":",
"Extrapolate",
"values",
"from",
"a",
"1D",
"vector",
"at",
"its",
"beginning",
"and",
"end",
"using",
"reversal",
"of",
"this",
"array"
] |
python
|
train
|
bjmorgan/lattice_mc
|
lattice_mc/simulation.py
|
https://github.com/bjmorgan/lattice_mc/blob/7fa7be85f2f23a2d8dfd0830ecdb89d0dbf2bfd5/lattice_mc/simulation.py#L152-L179
|
def run( self, for_time=None ):
"""
Run the simulation.
Args:
for_time (:obj:Float, optional): If `for_time` is set, then run the simulation until a set amount of time has passed. Otherwise, run the simulation for a set number of jumps. Defaults to None.
Returns:
None
"""
self.for_time = for_time
try:
self.is_initialised()
except AttributeError:
raise
if self.number_of_equilibration_jumps > 0:
for step in range( self.number_of_equilibration_jumps ):
self.lattice.jump()
self.reset()
if self.for_time:
self.number_of_jumps = 0
while self.lattice.time < self.for_time:
self.lattice.jump()
self.number_of_jumps += 1
else:
for step in range( self.number_of_jumps ):
self.lattice.jump()
self.has_run = True
|
[
"def",
"run",
"(",
"self",
",",
"for_time",
"=",
"None",
")",
":",
"self",
".",
"for_time",
"=",
"for_time",
"try",
":",
"self",
".",
"is_initialised",
"(",
")",
"except",
"AttributeError",
":",
"raise",
"if",
"self",
".",
"number_of_equilibration_jumps",
">",
"0",
":",
"for",
"step",
"in",
"range",
"(",
"self",
".",
"number_of_equilibration_jumps",
")",
":",
"self",
".",
"lattice",
".",
"jump",
"(",
")",
"self",
".",
"reset",
"(",
")",
"if",
"self",
".",
"for_time",
":",
"self",
".",
"number_of_jumps",
"=",
"0",
"while",
"self",
".",
"lattice",
".",
"time",
"<",
"self",
".",
"for_time",
":",
"self",
".",
"lattice",
".",
"jump",
"(",
")",
"self",
".",
"number_of_jumps",
"+=",
"1",
"else",
":",
"for",
"step",
"in",
"range",
"(",
"self",
".",
"number_of_jumps",
")",
":",
"self",
".",
"lattice",
".",
"jump",
"(",
")",
"self",
".",
"has_run",
"=",
"True"
] |
Run the simulation.
Args:
for_time (:obj:Float, optional): If `for_time` is set, then run the simulation until a set amount of time has passed. Otherwise, run the simulation for a set number of jumps. Defaults to None.
Returns:
None
|
[
"Run",
"the",
"simulation",
"."
] |
python
|
train
|
orb-framework/orb
|
orb/core/column_types/reference.py
|
https://github.com/orb-framework/orb/blob/575be2689cb269e65a0a2678232ff940acc19e5a/orb/core/column_types/reference.py#L146-L155
|
def referenceModel(self):
"""
Returns the model that this column references.
:return <Table> || None
"""
model = orb.system.model(self.__reference)
if not model:
raise orb.errors.ModelNotFound(schema=self.__reference)
return model
|
[
"def",
"referenceModel",
"(",
"self",
")",
":",
"model",
"=",
"orb",
".",
"system",
".",
"model",
"(",
"self",
".",
"__reference",
")",
"if",
"not",
"model",
":",
"raise",
"orb",
".",
"errors",
".",
"ModelNotFound",
"(",
"schema",
"=",
"self",
".",
"__reference",
")",
"return",
"model"
] |
Returns the model that this column references.
:return <Table> || None
|
[
"Returns",
"the",
"model",
"that",
"this",
"column",
"references",
"."
] |
python
|
train
|
census-instrumentation/opencensus-python
|
contrib/opencensus-ext-jaeger/opencensus/ext/jaeger/trace_exporter/__init__.py
|
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-jaeger/opencensus/ext/jaeger/trace_exporter/__init__.py#L238-L244
|
def _convert_reftype_to_jaeger_reftype(ref):
"""Convert opencensus reference types to jaeger reference types."""
if ref == link_module.Type.CHILD_LINKED_SPAN:
return jaeger.SpanRefType.CHILD_OF
if ref == link_module.Type.PARENT_LINKED_SPAN:
return jaeger.SpanRefType.FOLLOWS_FROM
return None
|
[
"def",
"_convert_reftype_to_jaeger_reftype",
"(",
"ref",
")",
":",
"if",
"ref",
"==",
"link_module",
".",
"Type",
".",
"CHILD_LINKED_SPAN",
":",
"return",
"jaeger",
".",
"SpanRefType",
".",
"CHILD_OF",
"if",
"ref",
"==",
"link_module",
".",
"Type",
".",
"PARENT_LINKED_SPAN",
":",
"return",
"jaeger",
".",
"SpanRefType",
".",
"FOLLOWS_FROM",
"return",
"None"
] |
Convert opencensus reference types to jaeger reference types.
|
[
"Convert",
"opencensus",
"reference",
"types",
"to",
"jaeger",
"reference",
"types",
"."
] |
python
|
train
|
resonai/ybt
|
setup.py
|
https://github.com/resonai/ybt/blob/5b40df0922ef3383eb85f2b04a26a2db4b81b3fd/setup.py#L16-L20
|
def get_readme():
"""Read and return the content of the project README file."""
base_dir = path.abspath(path.dirname(__file__))
with open(path.join(base_dir, 'README.md'), encoding='utf-8') as readme_f:
return readme_f.read()
|
[
"def",
"get_readme",
"(",
")",
":",
"base_dir",
"=",
"path",
".",
"abspath",
"(",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"with",
"open",
"(",
"path",
".",
"join",
"(",
"base_dir",
",",
"'README.md'",
")",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"readme_f",
":",
"return",
"readme_f",
".",
"read",
"(",
")"
] |
Read and return the content of the project README file.
|
[
"Read",
"and",
"return",
"the",
"content",
"of",
"the",
"project",
"README",
"file",
"."
] |
python
|
train
|
etingof/pysmi
|
pysmi/parser/smi.py
|
https://github.com/etingof/pysmi/blob/379a0a384c81875731be51a054bdacced6260fd8/pysmi/parser/smi.py#L759-L766
|
def p_BitNames(self, p):
"""BitNames : BitNames ',' LOWERCASE_IDENTIFIER
| LOWERCASE_IDENTIFIER"""
n = len(p)
if n == 4:
p[0] = ('BitNames', p[1][1] + [p[3]])
elif n == 2:
p[0] = ('BitNames', [p[1]])
|
[
"def",
"p_BitNames",
"(",
"self",
",",
"p",
")",
":",
"n",
"=",
"len",
"(",
"p",
")",
"if",
"n",
"==",
"4",
":",
"p",
"[",
"0",
"]",
"=",
"(",
"'BitNames'",
",",
"p",
"[",
"1",
"]",
"[",
"1",
"]",
"+",
"[",
"p",
"[",
"3",
"]",
"]",
")",
"elif",
"n",
"==",
"2",
":",
"p",
"[",
"0",
"]",
"=",
"(",
"'BitNames'",
",",
"[",
"p",
"[",
"1",
"]",
"]",
")"
] |
BitNames : BitNames ',' LOWERCASE_IDENTIFIER
| LOWERCASE_IDENTIFIER
|
[
"BitNames",
":",
"BitNames",
"LOWERCASE_IDENTIFIER",
"|",
"LOWERCASE_IDENTIFIER"
] |
python
|
valid
|
xtrementl/focus
|
focus/plugin/modules/im.py
|
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/plugin/modules/im.py#L216-L244
|
def _linux_skype_status(status, message):
""" Updates status and message for Skype IM application on Linux.
`status`
Status type.
`message`
Status message.
"""
try:
iface = _dbus_get_interface('com.Skype.API',
'/com/Skype',
'com.Skype.API')
if iface:
# authenticate
if iface.Invoke('NAME focus') != 'OK':
msg = 'User denied authorization'
raise dbus.exceptions.DbusException(msg)
iface.Invoke('PROTOCOL 5')
# set status
iface.Invoke('SET USERSTATUS {0}'.format(SKYPE_CODE_MAP[status]))
# set the message, if provided
iface.Invoke('SET PROFILE MOOD_TEXT {0}'
.format(message))
except dbus.exceptions.DBusException:
pass
|
[
"def",
"_linux_skype_status",
"(",
"status",
",",
"message",
")",
":",
"try",
":",
"iface",
"=",
"_dbus_get_interface",
"(",
"'com.Skype.API'",
",",
"'/com/Skype'",
",",
"'com.Skype.API'",
")",
"if",
"iface",
":",
"# authenticate",
"if",
"iface",
".",
"Invoke",
"(",
"'NAME focus'",
")",
"!=",
"'OK'",
":",
"msg",
"=",
"'User denied authorization'",
"raise",
"dbus",
".",
"exceptions",
".",
"DbusException",
"(",
"msg",
")",
"iface",
".",
"Invoke",
"(",
"'PROTOCOL 5'",
")",
"# set status",
"iface",
".",
"Invoke",
"(",
"'SET USERSTATUS {0}'",
".",
"format",
"(",
"SKYPE_CODE_MAP",
"[",
"status",
"]",
")",
")",
"# set the message, if provided",
"iface",
".",
"Invoke",
"(",
"'SET PROFILE MOOD_TEXT {0}'",
".",
"format",
"(",
"message",
")",
")",
"except",
"dbus",
".",
"exceptions",
".",
"DBusException",
":",
"pass"
] |
Updates status and message for Skype IM application on Linux.
`status`
Status type.
`message`
Status message.
|
[
"Updates",
"status",
"and",
"message",
"for",
"Skype",
"IM",
"application",
"on",
"Linux",
"."
] |
python
|
train
|
fastai/fastai
|
fastai/data_block.py
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L149-L152
|
def filter_by_func(self, func:Callable)->'ItemList':
"Only keep elements for which `func` returns `True`."
self.items = array([o for o in self.items if func(o)])
return self
|
[
"def",
"filter_by_func",
"(",
"self",
",",
"func",
":",
"Callable",
")",
"->",
"'ItemList'",
":",
"self",
".",
"items",
"=",
"array",
"(",
"[",
"o",
"for",
"o",
"in",
"self",
".",
"items",
"if",
"func",
"(",
"o",
")",
"]",
")",
"return",
"self"
] |
Only keep elements for which `func` returns `True`.
|
[
"Only",
"keep",
"elements",
"for",
"which",
"func",
"returns",
"True",
"."
] |
python
|
train
|
PyCQA/pylint
|
setup.py
|
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/setup.py#L142-L184
|
def install(**kwargs):
"""setup entry point"""
if USE_SETUPTOOLS:
if "--force-manifest" in sys.argv:
sys.argv.remove("--force-manifest")
packages = [modname] + get_packages(join(base_dir, "pylint"), modname)
if USE_SETUPTOOLS:
if install_requires:
kwargs["install_requires"] = install_requires
kwargs["dependency_links"] = dependency_links
kwargs["entry_points"] = {
"console_scripts": [
"pylint = pylint:run_pylint",
"epylint = pylint:run_epylint",
"pyreverse = pylint:run_pyreverse",
"symilar = pylint:run_symilar",
]
}
kwargs["packages"] = packages
cmdclass = {"install_lib": MyInstallLib, "build_py": build_py}
if easy_install_lib:
cmdclass["easy_install"] = easy_install
return setup(
name=distname,
version=__pkginfo__["version"],
license=__pkginfo__["license"],
description=__pkginfo__["description"],
long_description=long_description,
author=__pkginfo__["author"],
author_email=__pkginfo__["author_email"],
url=__pkginfo__["web"],
scripts=ensure_scripts(scripts),
classifiers=__pkginfo__["classifiers"],
data_files=data_files,
ext_modules=ext_modules,
cmdclass=cmdclass,
extras_require=extras_require,
test_suite="test",
python_requires=">=3.4.*",
setup_requires=["pytest-runner"],
tests_require=["pytest"],
**kwargs
)
|
[
"def",
"install",
"(",
"*",
"*",
"kwargs",
")",
":",
"if",
"USE_SETUPTOOLS",
":",
"if",
"\"--force-manifest\"",
"in",
"sys",
".",
"argv",
":",
"sys",
".",
"argv",
".",
"remove",
"(",
"\"--force-manifest\"",
")",
"packages",
"=",
"[",
"modname",
"]",
"+",
"get_packages",
"(",
"join",
"(",
"base_dir",
",",
"\"pylint\"",
")",
",",
"modname",
")",
"if",
"USE_SETUPTOOLS",
":",
"if",
"install_requires",
":",
"kwargs",
"[",
"\"install_requires\"",
"]",
"=",
"install_requires",
"kwargs",
"[",
"\"dependency_links\"",
"]",
"=",
"dependency_links",
"kwargs",
"[",
"\"entry_points\"",
"]",
"=",
"{",
"\"console_scripts\"",
":",
"[",
"\"pylint = pylint:run_pylint\"",
",",
"\"epylint = pylint:run_epylint\"",
",",
"\"pyreverse = pylint:run_pyreverse\"",
",",
"\"symilar = pylint:run_symilar\"",
",",
"]",
"}",
"kwargs",
"[",
"\"packages\"",
"]",
"=",
"packages",
"cmdclass",
"=",
"{",
"\"install_lib\"",
":",
"MyInstallLib",
",",
"\"build_py\"",
":",
"build_py",
"}",
"if",
"easy_install_lib",
":",
"cmdclass",
"[",
"\"easy_install\"",
"]",
"=",
"easy_install",
"return",
"setup",
"(",
"name",
"=",
"distname",
",",
"version",
"=",
"__pkginfo__",
"[",
"\"version\"",
"]",
",",
"license",
"=",
"__pkginfo__",
"[",
"\"license\"",
"]",
",",
"description",
"=",
"__pkginfo__",
"[",
"\"description\"",
"]",
",",
"long_description",
"=",
"long_description",
",",
"author",
"=",
"__pkginfo__",
"[",
"\"author\"",
"]",
",",
"author_email",
"=",
"__pkginfo__",
"[",
"\"author_email\"",
"]",
",",
"url",
"=",
"__pkginfo__",
"[",
"\"web\"",
"]",
",",
"scripts",
"=",
"ensure_scripts",
"(",
"scripts",
")",
",",
"classifiers",
"=",
"__pkginfo__",
"[",
"\"classifiers\"",
"]",
",",
"data_files",
"=",
"data_files",
",",
"ext_modules",
"=",
"ext_modules",
",",
"cmdclass",
"=",
"cmdclass",
",",
"extras_require",
"=",
"extras_require",
",",
"test_suite",
"=",
"\"test\"",
",",
"python_requires",
"=",
"\">=3.4.*\"",
",",
"setup_requires",
"=",
"[",
"\"pytest-runner\"",
"]",
",",
"tests_require",
"=",
"[",
"\"pytest\"",
"]",
",",
"*",
"*",
"kwargs",
")"
] |
setup entry point
|
[
"setup",
"entry",
"point"
] |
python
|
test
|
angr/angr
|
angr/keyed_region.py
|
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/keyed_region.py#L238-L248
|
def add_object(self, start, obj, object_size):
"""
Add/Store an object to this region at the given offset.
:param start:
:param obj:
:param int object_size: Size of the object
:return:
"""
self._store(start, obj, object_size, overwrite=False)
|
[
"def",
"add_object",
"(",
"self",
",",
"start",
",",
"obj",
",",
"object_size",
")",
":",
"self",
".",
"_store",
"(",
"start",
",",
"obj",
",",
"object_size",
",",
"overwrite",
"=",
"False",
")"
] |
Add/Store an object to this region at the given offset.
:param start:
:param obj:
:param int object_size: Size of the object
:return:
|
[
"Add",
"/",
"Store",
"an",
"object",
"to",
"this",
"region",
"at",
"the",
"given",
"offset",
"."
] |
python
|
train
|
bspaans/python-mingus
|
mingus/midi/midi_track.py
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/midi/midi_track.py#L157-L166
|
def header(self):
"""Return the bytes for the header of track.
The header contains the length of the track_data, so you'll have to
call this function when you're done adding data (when you're not
using get_midi_data).
"""
chunk_size = a2b_hex('%08x' % (len(self.track_data)
+ len(self.end_of_track())))
return TRACK_HEADER + chunk_size
|
[
"def",
"header",
"(",
"self",
")",
":",
"chunk_size",
"=",
"a2b_hex",
"(",
"'%08x'",
"%",
"(",
"len",
"(",
"self",
".",
"track_data",
")",
"+",
"len",
"(",
"self",
".",
"end_of_track",
"(",
")",
")",
")",
")",
"return",
"TRACK_HEADER",
"+",
"chunk_size"
] |
Return the bytes for the header of track.
The header contains the length of the track_data, so you'll have to
call this function when you're done adding data (when you're not
using get_midi_data).
|
[
"Return",
"the",
"bytes",
"for",
"the",
"header",
"of",
"track",
"."
] |
python
|
train
|
horazont/aioxmpp
|
aioxmpp/entitycaps/caps390.py
|
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/entitycaps/caps390.py#L103-L121
|
def _process_extensions(exts):
"""
Generate the `Extensions String` from an iterable of data forms.
:param exts: The data forms to generate the extensions string from.
:type exts: :class:`~collections.abc.Iterable` of
:class:`~.forms.xso.Data`
:return: The `Extensions String`
:rtype: :class:`bytes`
Generate the `Extensions String` from the given `exts` as specified
in :xep:`390`.
"""
parts = [
_process_form(form)
for form in exts
]
parts.sort()
return b"".join(parts)+b"\x1c"
|
[
"def",
"_process_extensions",
"(",
"exts",
")",
":",
"parts",
"=",
"[",
"_process_form",
"(",
"form",
")",
"for",
"form",
"in",
"exts",
"]",
"parts",
".",
"sort",
"(",
")",
"return",
"b\"\"",
".",
"join",
"(",
"parts",
")",
"+",
"b\"\\x1c\""
] |
Generate the `Extensions String` from an iterable of data forms.
:param exts: The data forms to generate the extensions string from.
:type exts: :class:`~collections.abc.Iterable` of
:class:`~.forms.xso.Data`
:return: The `Extensions String`
:rtype: :class:`bytes`
Generate the `Extensions String` from the given `exts` as specified
in :xep:`390`.
|
[
"Generate",
"the",
"Extensions",
"String",
"from",
"an",
"iterable",
"of",
"data",
"forms",
"."
] |
python
|
train
|
ldomic/lintools
|
lintools/analysis/hbonds.py
|
https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/analysis/hbonds.py#L153-L195
|
def determine_hbonds_for_drawing(self, analysis_cutoff):
"""
Since plotting all hydrogen bonds could lead to a messy plot, a cutoff has to be imple-
mented. In this function the frequency of each hydrogen bond is summated and the total
compared against analysis cutoff - a fraction multiplied by trajectory count. Those
hydrogen bonds that are present for longer than analysis cutoff will be plotted in the
final plot.
Takes:
* analysis_cutoff * - (user-defined) fraction of time a hydrogen bond has to be
present for to be plotted (default - 0.3). It is multiplied by number of trajectories
Output:
* frequency * - dictionary of hydrogen bond donor-acceptor indices and frequencies
These hydrogen bonds will be plotted in the final image.
"""
self.frequency = defaultdict(int)
for traj in self.hbonds_by_type:
for bond in self.hbonds_by_type[traj]:
# frequency[(residue_atom_idx,ligand_atom_name,residue_atom_name)]=frequency
# residue atom name will be used to determine if hydrogen bond is interacting with a sidechain or bakcbone
if bond["donor_resnm"]!="LIG":
self.frequency[(bond["donor_idx"],bond["acceptor_atom"],bond["donor_atom"],bond["acceptor_idx"])] += bond["frequency"]
#check whether ligand is donor or acceptor
else:
self.frequency[(bond["acceptor_idx"],bond["donor_atom"],bond["acceptor_atom"],bond["donor_idx"])] += bond["frequency"]
#Add the frequency counts
self.frequency = {i:self.frequency[i] for i in self.frequency if self.frequency[i]>(int(len(self.trajectory))*analysis_cutoff)}
#change the ligand atomname to a heavy atom - required for plot since only heavy atoms shown in final image
self.hbonds_for_drawing = {}
for bond in self.frequency:
atomname = bond[1]
if atomname.startswith("O",0) or atomname.startswith("N",0):
lig_atom=atomname
else:
atomindex = [index for index,atom in enumerate(self.topology_data.universe.ligand.atoms) if atom.name==atomname][0]
rdkit_atom = self.topology_data.mol.GetAtomWithIdx(atomindex)
for neigh in rdkit_atom.GetNeighbors():
neigh_atom_id = neigh.GetIdx()
lig_atom = [atom.name for index,atom in enumerate(self.topology_data.universe.ligand.atoms) if index==neigh_atom_id][0]
self.hbonds_for_drawing[(bond[0],lig_atom,bond[2],bond[3])]=self.frequency[bond]
|
[
"def",
"determine_hbonds_for_drawing",
"(",
"self",
",",
"analysis_cutoff",
")",
":",
"self",
".",
"frequency",
"=",
"defaultdict",
"(",
"int",
")",
"for",
"traj",
"in",
"self",
".",
"hbonds_by_type",
":",
"for",
"bond",
"in",
"self",
".",
"hbonds_by_type",
"[",
"traj",
"]",
":",
"# frequency[(residue_atom_idx,ligand_atom_name,residue_atom_name)]=frequency",
"# residue atom name will be used to determine if hydrogen bond is interacting with a sidechain or bakcbone",
"if",
"bond",
"[",
"\"donor_resnm\"",
"]",
"!=",
"\"LIG\"",
":",
"self",
".",
"frequency",
"[",
"(",
"bond",
"[",
"\"donor_idx\"",
"]",
",",
"bond",
"[",
"\"acceptor_atom\"",
"]",
",",
"bond",
"[",
"\"donor_atom\"",
"]",
",",
"bond",
"[",
"\"acceptor_idx\"",
"]",
")",
"]",
"+=",
"bond",
"[",
"\"frequency\"",
"]",
"#check whether ligand is donor or acceptor",
"else",
":",
"self",
".",
"frequency",
"[",
"(",
"bond",
"[",
"\"acceptor_idx\"",
"]",
",",
"bond",
"[",
"\"donor_atom\"",
"]",
",",
"bond",
"[",
"\"acceptor_atom\"",
"]",
",",
"bond",
"[",
"\"donor_idx\"",
"]",
")",
"]",
"+=",
"bond",
"[",
"\"frequency\"",
"]",
"#Add the frequency counts",
"self",
".",
"frequency",
"=",
"{",
"i",
":",
"self",
".",
"frequency",
"[",
"i",
"]",
"for",
"i",
"in",
"self",
".",
"frequency",
"if",
"self",
".",
"frequency",
"[",
"i",
"]",
">",
"(",
"int",
"(",
"len",
"(",
"self",
".",
"trajectory",
")",
")",
"*",
"analysis_cutoff",
")",
"}",
"#change the ligand atomname to a heavy atom - required for plot since only heavy atoms shown in final image",
"self",
".",
"hbonds_for_drawing",
"=",
"{",
"}",
"for",
"bond",
"in",
"self",
".",
"frequency",
":",
"atomname",
"=",
"bond",
"[",
"1",
"]",
"if",
"atomname",
".",
"startswith",
"(",
"\"O\"",
",",
"0",
")",
"or",
"atomname",
".",
"startswith",
"(",
"\"N\"",
",",
"0",
")",
":",
"lig_atom",
"=",
"atomname",
"else",
":",
"atomindex",
"=",
"[",
"index",
"for",
"index",
",",
"atom",
"in",
"enumerate",
"(",
"self",
".",
"topology_data",
".",
"universe",
".",
"ligand",
".",
"atoms",
")",
"if",
"atom",
".",
"name",
"==",
"atomname",
"]",
"[",
"0",
"]",
"rdkit_atom",
"=",
"self",
".",
"topology_data",
".",
"mol",
".",
"GetAtomWithIdx",
"(",
"atomindex",
")",
"for",
"neigh",
"in",
"rdkit_atom",
".",
"GetNeighbors",
"(",
")",
":",
"neigh_atom_id",
"=",
"neigh",
".",
"GetIdx",
"(",
")",
"lig_atom",
"=",
"[",
"atom",
".",
"name",
"for",
"index",
",",
"atom",
"in",
"enumerate",
"(",
"self",
".",
"topology_data",
".",
"universe",
".",
"ligand",
".",
"atoms",
")",
"if",
"index",
"==",
"neigh_atom_id",
"]",
"[",
"0",
"]",
"self",
".",
"hbonds_for_drawing",
"[",
"(",
"bond",
"[",
"0",
"]",
",",
"lig_atom",
",",
"bond",
"[",
"2",
"]",
",",
"bond",
"[",
"3",
"]",
")",
"]",
"=",
"self",
".",
"frequency",
"[",
"bond",
"]"
] |
Since plotting all hydrogen bonds could lead to a messy plot, a cutoff has to be imple-
mented. In this function the frequency of each hydrogen bond is summated and the total
compared against analysis cutoff - a fraction multiplied by trajectory count. Those
hydrogen bonds that are present for longer than analysis cutoff will be plotted in the
final plot.
Takes:
* analysis_cutoff * - (user-defined) fraction of time a hydrogen bond has to be
present for to be plotted (default - 0.3). It is multiplied by number of trajectories
Output:
* frequency * - dictionary of hydrogen bond donor-acceptor indices and frequencies
These hydrogen bonds will be plotted in the final image.
|
[
"Since",
"plotting",
"all",
"hydrogen",
"bonds",
"could",
"lead",
"to",
"a",
"messy",
"plot",
"a",
"cutoff",
"has",
"to",
"be",
"imple",
"-",
"mented",
".",
"In",
"this",
"function",
"the",
"frequency",
"of",
"each",
"hydrogen",
"bond",
"is",
"summated",
"and",
"the",
"total",
"compared",
"against",
"analysis",
"cutoff",
"-",
"a",
"fraction",
"multiplied",
"by",
"trajectory",
"count",
".",
"Those",
"hydrogen",
"bonds",
"that",
"are",
"present",
"for",
"longer",
"than",
"analysis",
"cutoff",
"will",
"be",
"plotted",
"in",
"the",
"final",
"plot",
"."
] |
python
|
train
|
nicolargo/glances
|
glances/stats_server.py
|
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/stats_server.py#L41-L49
|
def update(self, input_stats=None):
"""Update the stats."""
input_stats = input_stats or {}
# Force update of all the stats
super(GlancesStatsServer, self).update()
# Build all_stats variable (concatenation of all the stats)
self.all_stats = self._set_stats(input_stats)
|
[
"def",
"update",
"(",
"self",
",",
"input_stats",
"=",
"None",
")",
":",
"input_stats",
"=",
"input_stats",
"or",
"{",
"}",
"# Force update of all the stats",
"super",
"(",
"GlancesStatsServer",
",",
"self",
")",
".",
"update",
"(",
")",
"# Build all_stats variable (concatenation of all the stats)",
"self",
".",
"all_stats",
"=",
"self",
".",
"_set_stats",
"(",
"input_stats",
")"
] |
Update the stats.
|
[
"Update",
"the",
"stats",
"."
] |
python
|
train
|
saltstack/salt
|
salt/modules/boto_apigateway.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_apigateway.py#L1273-L1295
|
def delete_api_integration_response(restApiId, resourcePath, httpMethod, statusCode,
region=None, key=None, keyid=None, profile=None):
'''
Deletes an integration response for a given method in a given API
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.delete_api_integration_response restApiId resourcePath httpMethod statusCode
'''
try:
resource = describe_api_resource(restApiId, resourcePath, region=region,
key=key, keyid=keyid, profile=profile).get('resource')
if resource:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_integration_response(restApiId=restApiId, resourceId=resource['id'],
httpMethod=httpMethod, statusCode=statusCode)
return {'deleted': True}
return {'deleted': False, 'error': 'no such resource'}
except ClientError as e:
return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
|
[
"def",
"delete_api_integration_response",
"(",
"restApiId",
",",
"resourcePath",
",",
"httpMethod",
",",
"statusCode",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"try",
":",
"resource",
"=",
"describe_api_resource",
"(",
"restApiId",
",",
"resourcePath",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
".",
"get",
"(",
"'resource'",
")",
"if",
"resource",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"conn",
".",
"delete_integration_response",
"(",
"restApiId",
"=",
"restApiId",
",",
"resourceId",
"=",
"resource",
"[",
"'id'",
"]",
",",
"httpMethod",
"=",
"httpMethod",
",",
"statusCode",
"=",
"statusCode",
")",
"return",
"{",
"'deleted'",
":",
"True",
"}",
"return",
"{",
"'deleted'",
":",
"False",
",",
"'error'",
":",
"'no such resource'",
"}",
"except",
"ClientError",
"as",
"e",
":",
"return",
"{",
"'deleted'",
":",
"False",
",",
"'error'",
":",
"__utils__",
"[",
"'boto3.get_error'",
"]",
"(",
"e",
")",
"}"
] |
Deletes an integration response for a given method in a given API
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.delete_api_integration_response restApiId resourcePath httpMethod statusCode
|
[
"Deletes",
"an",
"integration",
"response",
"for",
"a",
"given",
"method",
"in",
"a",
"given",
"API"
] |
python
|
train
|
CRS-support/ftw
|
ftw/http.py
|
https://github.com/CRS-support/ftw/blob/1bbfd9b702e7e65532c1fd52bc82960556cefae5/ftw/http.py#L260-L282
|
def build_socket(self):
"""
Generate either an HTTPS or HTTP socket
"""
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(self.SOCKET_TIMEOUT)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Check if TLS
if self.request_object.protocol == 'https':
self.sock = ssl.wrap_socket(self.sock, ciphers=self.CIPHERS)
self.sock.connect(
(self.request_object.dest_addr, self.request_object.port))
except socket.error as msg:
raise errors.TestError(
'Failed to connect to server',
{
'host': self.request_object.dest_addr,
'port': self.request_object.port,
'proto': self.request_object.protocol,
'message': msg,
'function': 'http.HttpUA.build_socket'
})
|
[
"def",
"build_socket",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"sock",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"self",
".",
"sock",
".",
"settimeout",
"(",
"self",
".",
"SOCKET_TIMEOUT",
")",
"self",
".",
"sock",
".",
"setsockopt",
"(",
"socket",
".",
"SOL_SOCKET",
",",
"socket",
".",
"SO_REUSEADDR",
",",
"1",
")",
"# Check if TLS",
"if",
"self",
".",
"request_object",
".",
"protocol",
"==",
"'https'",
":",
"self",
".",
"sock",
"=",
"ssl",
".",
"wrap_socket",
"(",
"self",
".",
"sock",
",",
"ciphers",
"=",
"self",
".",
"CIPHERS",
")",
"self",
".",
"sock",
".",
"connect",
"(",
"(",
"self",
".",
"request_object",
".",
"dest_addr",
",",
"self",
".",
"request_object",
".",
"port",
")",
")",
"except",
"socket",
".",
"error",
"as",
"msg",
":",
"raise",
"errors",
".",
"TestError",
"(",
"'Failed to connect to server'",
",",
"{",
"'host'",
":",
"self",
".",
"request_object",
".",
"dest_addr",
",",
"'port'",
":",
"self",
".",
"request_object",
".",
"port",
",",
"'proto'",
":",
"self",
".",
"request_object",
".",
"protocol",
",",
"'message'",
":",
"msg",
",",
"'function'",
":",
"'http.HttpUA.build_socket'",
"}",
")"
] |
Generate either an HTTPS or HTTP socket
|
[
"Generate",
"either",
"an",
"HTTPS",
"or",
"HTTP",
"socket"
] |
python
|
train
|
learningequality/ricecooker
|
ricecooker/classes/questions.py
|
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/classes/questions.py#L136-L154
|
def parse_html(self, text):
""" parse_html: Properly formats any img tags that might be in content
Args:
text (str): text to parse
Returns: string with properly formatted images
"""
bs = BeautifulSoup(text, "html5lib")
file_reg = re.compile(MARKDOWN_IMAGE_REGEX, flags=re.IGNORECASE)
tags = bs.findAll('img')
for tag in tags:
# Look for src attribute, remove formatting if added to image
src_text = tag.get("src") or ""
formatted_src_match = file_reg.search(src_text)
src_text = formatted_src_match.group(2) if formatted_src_match else src_text
alt_text = tag.get("alt") or ""
tag.replaceWith("".format(alt=alt_text, src=src_text))
return html.unescape(bs.find('body').renderContents().decode('utf-8'))
|
[
"def",
"parse_html",
"(",
"self",
",",
"text",
")",
":",
"bs",
"=",
"BeautifulSoup",
"(",
"text",
",",
"\"html5lib\"",
")",
"file_reg",
"=",
"re",
".",
"compile",
"(",
"MARKDOWN_IMAGE_REGEX",
",",
"flags",
"=",
"re",
".",
"IGNORECASE",
")",
"tags",
"=",
"bs",
".",
"findAll",
"(",
"'img'",
")",
"for",
"tag",
"in",
"tags",
":",
"# Look for src attribute, remove formatting if added to image",
"src_text",
"=",
"tag",
".",
"get",
"(",
"\"src\"",
")",
"or",
"\"\"",
"formatted_src_match",
"=",
"file_reg",
".",
"search",
"(",
"src_text",
")",
"src_text",
"=",
"formatted_src_match",
".",
"group",
"(",
"2",
")",
"if",
"formatted_src_match",
"else",
"src_text",
"alt_text",
"=",
"tag",
".",
"get",
"(",
"\"alt\"",
")",
"or",
"\"\"",
"tag",
".",
"replaceWith",
"(",
"\"\"",
".",
"format",
"(",
"alt",
"=",
"alt_text",
",",
"src",
"=",
"src_text",
")",
")",
"return",
"html",
".",
"unescape",
"(",
"bs",
".",
"find",
"(",
"'body'",
")",
".",
"renderContents",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
")"
] |
parse_html: Properly formats any img tags that might be in content
Args:
text (str): text to parse
Returns: string with properly formatted images
|
[
"parse_html",
":",
"Properly",
"formats",
"any",
"img",
"tags",
"that",
"might",
"be",
"in",
"content",
"Args",
":",
"text",
"(",
"str",
")",
":",
"text",
"to",
"parse",
"Returns",
":",
"string",
"with",
"properly",
"formatted",
"images"
] |
python
|
train
|
yyuu/botornado
|
boto/ec2/autoscale/__init__.py
|
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/ec2/autoscale/__init__.py#L643-L667
|
def get_all_tags(self, filters=None, max_records=None, next_token=None):
"""
Lists the Auto Scaling group tags.
This action supports pagination by returning a token if there are more
pages to retrieve. To get the next page, call this action again with the returned token as the NextToken parameter.
:type filters: dict
:param filters: The value of the filter type used to identify
the tags to be returned. NOT IMPLEMENTED YET.
:type max_records: int
:param max_records: Maximum number of tags to return.
:rtype: list
:returns: List of :class:`boto.ec2.autoscale.tag.Tag`
instances.
"""
params = {}
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribeTags', params,
[('member', Tag)])
|
[
"def",
"get_all_tags",
"(",
"self",
",",
"filters",
"=",
"None",
",",
"max_records",
"=",
"None",
",",
"next_token",
"=",
"None",
")",
":",
"params",
"=",
"{",
"}",
"if",
"max_records",
":",
"params",
"[",
"'MaxRecords'",
"]",
"=",
"max_records",
"if",
"next_token",
":",
"params",
"[",
"'NextToken'",
"]",
"=",
"next_token",
"return",
"self",
".",
"get_list",
"(",
"'DescribeTags'",
",",
"params",
",",
"[",
"(",
"'member'",
",",
"Tag",
")",
"]",
")"
] |
Lists the Auto Scaling group tags.
This action supports pagination by returning a token if there are more
pages to retrieve. To get the next page, call this action again with the returned token as the NextToken parameter.
:type filters: dict
:param filters: The value of the filter type used to identify
the tags to be returned. NOT IMPLEMENTED YET.
:type max_records: int
:param max_records: Maximum number of tags to return.
:rtype: list
:returns: List of :class:`boto.ec2.autoscale.tag.Tag`
instances.
|
[
"Lists",
"the",
"Auto",
"Scaling",
"group",
"tags",
"."
] |
python
|
train
|
Qiskit/qiskit-terra
|
qiskit/quantum_info/operators/pauli.py
|
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/quantum_info/operators/pauli.py#L273-L295
|
def to_spmatrix(self):
r"""
Convert Pauli to a sparse matrix representation (CSR format).
Order is q_{n-1} .... q_0, i.e., $P_{n-1} \otimes ... P_0$
Returns:
scipy.sparse.csr_matrix: a sparse matrix with CSR format that
represnets the pauli.
"""
mat = sparse.coo_matrix(1)
for z, x in zip(self._z, self._x):
if not z and not x: # I
mat = sparse.bmat([[mat, None], [None, mat]], format='coo')
elif z and not x: # Z
mat = sparse.bmat([[mat, None], [None, -mat]], format='coo')
elif not z and x: # X
mat = sparse.bmat([[None, mat], [mat, None]], format='coo')
else: # Y
mat = mat * 1j
mat = sparse.bmat([[None, -mat], [mat, None]], format='coo')
return mat.tocsr()
|
[
"def",
"to_spmatrix",
"(",
"self",
")",
":",
"mat",
"=",
"sparse",
".",
"coo_matrix",
"(",
"1",
")",
"for",
"z",
",",
"x",
"in",
"zip",
"(",
"self",
".",
"_z",
",",
"self",
".",
"_x",
")",
":",
"if",
"not",
"z",
"and",
"not",
"x",
":",
"# I",
"mat",
"=",
"sparse",
".",
"bmat",
"(",
"[",
"[",
"mat",
",",
"None",
"]",
",",
"[",
"None",
",",
"mat",
"]",
"]",
",",
"format",
"=",
"'coo'",
")",
"elif",
"z",
"and",
"not",
"x",
":",
"# Z",
"mat",
"=",
"sparse",
".",
"bmat",
"(",
"[",
"[",
"mat",
",",
"None",
"]",
",",
"[",
"None",
",",
"-",
"mat",
"]",
"]",
",",
"format",
"=",
"'coo'",
")",
"elif",
"not",
"z",
"and",
"x",
":",
"# X",
"mat",
"=",
"sparse",
".",
"bmat",
"(",
"[",
"[",
"None",
",",
"mat",
"]",
",",
"[",
"mat",
",",
"None",
"]",
"]",
",",
"format",
"=",
"'coo'",
")",
"else",
":",
"# Y",
"mat",
"=",
"mat",
"*",
"1j",
"mat",
"=",
"sparse",
".",
"bmat",
"(",
"[",
"[",
"None",
",",
"-",
"mat",
"]",
",",
"[",
"mat",
",",
"None",
"]",
"]",
",",
"format",
"=",
"'coo'",
")",
"return",
"mat",
".",
"tocsr",
"(",
")"
] |
r"""
Convert Pauli to a sparse matrix representation (CSR format).
Order is q_{n-1} .... q_0, i.e., $P_{n-1} \otimes ... P_0$
Returns:
scipy.sparse.csr_matrix: a sparse matrix with CSR format that
represnets the pauli.
|
[
"r",
"Convert",
"Pauli",
"to",
"a",
"sparse",
"matrix",
"representation",
"(",
"CSR",
"format",
")",
"."
] |
python
|
test
|
inveniosoftware/invenio-oaiserver
|
invenio_oaiserver/percolator.py
|
https://github.com/inveniosoftware/invenio-oaiserver/blob/eae765e32bd816ddc5612d4b281caf205518b512/invenio_oaiserver/percolator.py#L118-L142
|
def get_record_sets(record):
"""Find matching sets."""
# get lists of sets with search_pattern equals to None but already in the
# set list inside the record
record_sets = set(record.get('_oai', {}).get('sets', []))
for spec in _build_cache():
if spec in record_sets:
yield spec
# get list of sets that match using percolator
index, doc_type = RecordIndexer().record_to_index(record)
document = record.dumps()
percolator_doc_type = _get_percolator_doc_type(index)
_create_percolator_mapping(index, percolator_doc_type)
results = _percolate_query(index, doc_type, percolator_doc_type, document)
prefix = 'oaiset-'
prefix_len = len(prefix)
for match in results:
set_name = match['_id']
if set_name.startswith(prefix):
name = set_name[prefix_len:]
yield name
raise StopIteration
|
[
"def",
"get_record_sets",
"(",
"record",
")",
":",
"# get lists of sets with search_pattern equals to None but already in the",
"# set list inside the record",
"record_sets",
"=",
"set",
"(",
"record",
".",
"get",
"(",
"'_oai'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'sets'",
",",
"[",
"]",
")",
")",
"for",
"spec",
"in",
"_build_cache",
"(",
")",
":",
"if",
"spec",
"in",
"record_sets",
":",
"yield",
"spec",
"# get list of sets that match using percolator",
"index",
",",
"doc_type",
"=",
"RecordIndexer",
"(",
")",
".",
"record_to_index",
"(",
"record",
")",
"document",
"=",
"record",
".",
"dumps",
"(",
")",
"percolator_doc_type",
"=",
"_get_percolator_doc_type",
"(",
"index",
")",
"_create_percolator_mapping",
"(",
"index",
",",
"percolator_doc_type",
")",
"results",
"=",
"_percolate_query",
"(",
"index",
",",
"doc_type",
",",
"percolator_doc_type",
",",
"document",
")",
"prefix",
"=",
"'oaiset-'",
"prefix_len",
"=",
"len",
"(",
"prefix",
")",
"for",
"match",
"in",
"results",
":",
"set_name",
"=",
"match",
"[",
"'_id'",
"]",
"if",
"set_name",
".",
"startswith",
"(",
"prefix",
")",
":",
"name",
"=",
"set_name",
"[",
"prefix_len",
":",
"]",
"yield",
"name",
"raise",
"StopIteration"
] |
Find matching sets.
|
[
"Find",
"matching",
"sets",
"."
] |
python
|
train
|
StaticCube/python-synology
|
SynologyDSM/SynologyDSM.py
|
https://github.com/StaticCube/python-synology/blob/a5446a052fc91a38f7589803dc7a654180db2566/SynologyDSM/SynologyDSM.py#L322-L327
|
def _get_disk(self, disk_id):
"""Returns a specific disk"""
if self._data is not None:
for disk in self._data["disks"]:
if disk["id"] == disk_id:
return disk
|
[
"def",
"_get_disk",
"(",
"self",
",",
"disk_id",
")",
":",
"if",
"self",
".",
"_data",
"is",
"not",
"None",
":",
"for",
"disk",
"in",
"self",
".",
"_data",
"[",
"\"disks\"",
"]",
":",
"if",
"disk",
"[",
"\"id\"",
"]",
"==",
"disk_id",
":",
"return",
"disk"
] |
Returns a specific disk
|
[
"Returns",
"a",
"specific",
"disk"
] |
python
|
test
|
fumitoh/modelx
|
modelx/core/spacecontainer.py
|
https://github.com/fumitoh/modelx/blob/0180da34d052c44fb94dab9e115e218bbebfc9c3/modelx/core/spacecontainer.py#L41-L52
|
def cur_space(self, name=None):
"""Set the current space to Space ``name`` and return it.
If called without arguments, the current space is returned.
Otherwise, the current space is set to the space named ``name``
and the space is returned.
"""
if name is None:
return self._impl.model.currentspace.interface
else:
self._impl.model.currentspace = self._impl.spaces[name]
return self.cur_space()
|
[
"def",
"cur_space",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"return",
"self",
".",
"_impl",
".",
"model",
".",
"currentspace",
".",
"interface",
"else",
":",
"self",
".",
"_impl",
".",
"model",
".",
"currentspace",
"=",
"self",
".",
"_impl",
".",
"spaces",
"[",
"name",
"]",
"return",
"self",
".",
"cur_space",
"(",
")"
] |
Set the current space to Space ``name`` and return it.
If called without arguments, the current space is returned.
Otherwise, the current space is set to the space named ``name``
and the space is returned.
|
[
"Set",
"the",
"current",
"space",
"to",
"Space",
"name",
"and",
"return",
"it",
"."
] |
python
|
valid
|
gwastro/pycbc
|
pycbc/workflow/splittable.py
|
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/splittable.py#L121-L175
|
def setup_splittable_dax_generated(workflow, input_tables, out_dir, tags):
'''
Function for setting up the splitting jobs as part of the workflow.
Parameters
-----------
workflow : pycbc.workflow.core.Workflow
The Workflow instance that the jobs will be added to.
input_tables : pycbc.workflow.core.FileList
The input files to be split up.
out_dir : path
The directory in which output will be written.
Returns
--------
split_table_outs : pycbc.workflow.core.FileList
The list of split up files as output from this job.
'''
cp = workflow.cp
# Get values from ini file
try:
num_splits = cp.get_opt_tags("workflow-splittable",
"splittable-num-banks", tags)
except BaseException:
inj_interval = int(cp.get_opt_tags("workflow-splittable",
"splitinjtable-interval", tags))
if cp.has_option_tags("em_bright_filter", "max-keep", tags) and \
cp.has_option("workflow-injections", "em-bright-only"):
num_injs = int(cp.get_opt_tags("em_bright_filter", "max-keep",
tags))
else:
num_injs = int(cp.get_opt_tags("workflow-injections", "num-injs",
tags))
inj_tspace = float(abs(workflow.analysis_time)) / num_injs
num_splits = int(inj_interval // inj_tspace) + 1
split_exe_tag = cp.get_opt_tags("workflow-splittable",
"splittable-exe-tag", tags)
split_exe = os.path.basename(cp.get("executables", split_exe_tag))
# Select the appropriate class
exe_class = select_splitfilejob_instance(split_exe)
# Set up output structure
out_file_groups = FileList([])
# Set up the condorJob class for the current executable
curr_exe_job = exe_class(workflow.cp, split_exe_tag, num_splits,
out_dir=out_dir)
for input in input_tables:
node = curr_exe_job.create_node(input, tags=tags)
workflow.add_node(node)
out_file_groups += node.output_files
return out_file_groups
|
[
"def",
"setup_splittable_dax_generated",
"(",
"workflow",
",",
"input_tables",
",",
"out_dir",
",",
"tags",
")",
":",
"cp",
"=",
"workflow",
".",
"cp",
"# Get values from ini file",
"try",
":",
"num_splits",
"=",
"cp",
".",
"get_opt_tags",
"(",
"\"workflow-splittable\"",
",",
"\"splittable-num-banks\"",
",",
"tags",
")",
"except",
"BaseException",
":",
"inj_interval",
"=",
"int",
"(",
"cp",
".",
"get_opt_tags",
"(",
"\"workflow-splittable\"",
",",
"\"splitinjtable-interval\"",
",",
"tags",
")",
")",
"if",
"cp",
".",
"has_option_tags",
"(",
"\"em_bright_filter\"",
",",
"\"max-keep\"",
",",
"tags",
")",
"and",
"cp",
".",
"has_option",
"(",
"\"workflow-injections\"",
",",
"\"em-bright-only\"",
")",
":",
"num_injs",
"=",
"int",
"(",
"cp",
".",
"get_opt_tags",
"(",
"\"em_bright_filter\"",
",",
"\"max-keep\"",
",",
"tags",
")",
")",
"else",
":",
"num_injs",
"=",
"int",
"(",
"cp",
".",
"get_opt_tags",
"(",
"\"workflow-injections\"",
",",
"\"num-injs\"",
",",
"tags",
")",
")",
"inj_tspace",
"=",
"float",
"(",
"abs",
"(",
"workflow",
".",
"analysis_time",
")",
")",
"/",
"num_injs",
"num_splits",
"=",
"int",
"(",
"inj_interval",
"//",
"inj_tspace",
")",
"+",
"1",
"split_exe_tag",
"=",
"cp",
".",
"get_opt_tags",
"(",
"\"workflow-splittable\"",
",",
"\"splittable-exe-tag\"",
",",
"tags",
")",
"split_exe",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"cp",
".",
"get",
"(",
"\"executables\"",
",",
"split_exe_tag",
")",
")",
"# Select the appropriate class",
"exe_class",
"=",
"select_splitfilejob_instance",
"(",
"split_exe",
")",
"# Set up output structure",
"out_file_groups",
"=",
"FileList",
"(",
"[",
"]",
")",
"# Set up the condorJob class for the current executable",
"curr_exe_job",
"=",
"exe_class",
"(",
"workflow",
".",
"cp",
",",
"split_exe_tag",
",",
"num_splits",
",",
"out_dir",
"=",
"out_dir",
")",
"for",
"input",
"in",
"input_tables",
":",
"node",
"=",
"curr_exe_job",
".",
"create_node",
"(",
"input",
",",
"tags",
"=",
"tags",
")",
"workflow",
".",
"add_node",
"(",
"node",
")",
"out_file_groups",
"+=",
"node",
".",
"output_files",
"return",
"out_file_groups"
] |
Function for setting up the splitting jobs as part of the workflow.
Parameters
-----------
workflow : pycbc.workflow.core.Workflow
The Workflow instance that the jobs will be added to.
input_tables : pycbc.workflow.core.FileList
The input files to be split up.
out_dir : path
The directory in which output will be written.
Returns
--------
split_table_outs : pycbc.workflow.core.FileList
The list of split up files as output from this job.
|
[
"Function",
"for",
"setting",
"up",
"the",
"splitting",
"jobs",
"as",
"part",
"of",
"the",
"workflow",
"."
] |
python
|
train
|
softlayer/softlayer-python
|
SoftLayer/CLI/virt/detail.py
|
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/virt/detail.py#L24-L145
|
def cli(env, identifier, passwords=False, price=False):
"""Get details for a virtual server."""
vsi = SoftLayer.VSManager(env.client)
table = formatting.KeyValueTable(['name', 'value'])
table.align['name'] = 'r'
table.align['value'] = 'l'
vs_id = helpers.resolve_id(vsi.resolve_ids, identifier, 'VS')
result = vsi.get_instance(vs_id)
result = utils.NestedDict(result)
table.add_row(['id', result['id']])
table.add_row(['guid', result['globalIdentifier']])
table.add_row(['hostname', result['hostname']])
table.add_row(['domain', result['domain']])
table.add_row(['fqdn', result['fullyQualifiedDomainName']])
table.add_row(['status', formatting.FormattedItem(
result['status']['keyName'] or formatting.blank(),
result['status']['name'] or formatting.blank()
)])
table.add_row(['state', formatting.FormattedItem(
utils.lookup(result, 'powerState', 'keyName'),
utils.lookup(result, 'powerState', 'name'),
)])
table.add_row(['active_transaction', formatting.active_txn(result)])
table.add_row(['datacenter',
result['datacenter']['name'] or formatting.blank()])
_cli_helper_dedicated_host(env, result, table)
operating_system = utils.lookup(result,
'operatingSystem',
'softwareLicense',
'softwareDescription') or {}
table.add_row(['os', operating_system.get('name') or formatting.blank()])
table.add_row(['os_version',
operating_system.get('version') or formatting.blank()])
table.add_row(['cores', result['maxCpu']])
table.add_row(['memory', formatting.mb_to_gb(result['maxMemory'])])
table.add_row(['public_ip',
result['primaryIpAddress'] or formatting.blank()])
table.add_row(['private_ip',
result['primaryBackendIpAddress'] or formatting.blank()])
table.add_row(['private_only', result['privateNetworkOnlyFlag']])
table.add_row(['private_cpu', result['dedicatedAccountHostOnlyFlag']])
table.add_row(['created', result['createDate']])
table.add_row(['modified', result['modifyDate']])
if utils.lookup(result, 'billingItem') != []:
table.add_row(['owner', formatting.FormattedItem(
utils.lookup(result, 'billingItem', 'orderItem',
'order', 'userRecord',
'username') or formatting.blank(),
)])
else:
table.add_row(['owner', formatting.blank()])
vlan_table = formatting.Table(['type', 'number', 'id'])
for vlan in result['networkVlans']:
vlan_table.add_row([
vlan['networkSpace'], vlan['vlanNumber'], vlan['id']])
table.add_row(['vlans', vlan_table])
if result.get('networkComponents'):
secgroup_table = formatting.Table(['interface', 'id', 'name'])
has_secgroups = False
for comp in result.get('networkComponents'):
interface = 'PRIVATE' if comp['port'] == 0 else 'PUBLIC'
for binding in comp['securityGroupBindings']:
has_secgroups = True
secgroup = binding['securityGroup']
secgroup_table.add_row([
interface, secgroup['id'],
secgroup.get('name') or formatting.blank()])
if has_secgroups:
table.add_row(['security_groups', secgroup_table])
if result.get('notes'):
table.add_row(['notes', result['notes']])
if price:
total_price = utils.lookup(result,
'billingItem',
'nextInvoiceTotalRecurringAmount') or 0
total_price += sum(p['nextInvoiceTotalRecurringAmount']
for p
in utils.lookup(result,
'billingItem',
'children') or [])
table.add_row(['price_rate', total_price])
if passwords:
pass_table = formatting.Table(['software', 'username', 'password'])
for component in result['softwareComponents']:
for item in component['passwords']:
pass_table.add_row([
utils.lookup(component,
'softwareLicense',
'softwareDescription',
'name'),
item['username'],
item['password'],
])
table.add_row(['users', pass_table])
table.add_row(['tags', formatting.tags(result['tagReferences'])])
# Test to see if this actually has a primary (public) ip address
try:
if not result['privateNetworkOnlyFlag']:
ptr_domains = env.client.call(
'Virtual_Guest', 'getReverseDomainRecords',
id=vs_id,
)
for ptr_domain in ptr_domains:
for ptr in ptr_domain['resourceRecords']:
table.add_row(['ptr', ptr['data']])
except SoftLayer.SoftLayerAPIError:
pass
env.fout(table)
|
[
"def",
"cli",
"(",
"env",
",",
"identifier",
",",
"passwords",
"=",
"False",
",",
"price",
"=",
"False",
")",
":",
"vsi",
"=",
"SoftLayer",
".",
"VSManager",
"(",
"env",
".",
"client",
")",
"table",
"=",
"formatting",
".",
"KeyValueTable",
"(",
"[",
"'name'",
",",
"'value'",
"]",
")",
"table",
".",
"align",
"[",
"'name'",
"]",
"=",
"'r'",
"table",
".",
"align",
"[",
"'value'",
"]",
"=",
"'l'",
"vs_id",
"=",
"helpers",
".",
"resolve_id",
"(",
"vsi",
".",
"resolve_ids",
",",
"identifier",
",",
"'VS'",
")",
"result",
"=",
"vsi",
".",
"get_instance",
"(",
"vs_id",
")",
"result",
"=",
"utils",
".",
"NestedDict",
"(",
"result",
")",
"table",
".",
"add_row",
"(",
"[",
"'id'",
",",
"result",
"[",
"'id'",
"]",
"]",
")",
"table",
".",
"add_row",
"(",
"[",
"'guid'",
",",
"result",
"[",
"'globalIdentifier'",
"]",
"]",
")",
"table",
".",
"add_row",
"(",
"[",
"'hostname'",
",",
"result",
"[",
"'hostname'",
"]",
"]",
")",
"table",
".",
"add_row",
"(",
"[",
"'domain'",
",",
"result",
"[",
"'domain'",
"]",
"]",
")",
"table",
".",
"add_row",
"(",
"[",
"'fqdn'",
",",
"result",
"[",
"'fullyQualifiedDomainName'",
"]",
"]",
")",
"table",
".",
"add_row",
"(",
"[",
"'status'",
",",
"formatting",
".",
"FormattedItem",
"(",
"result",
"[",
"'status'",
"]",
"[",
"'keyName'",
"]",
"or",
"formatting",
".",
"blank",
"(",
")",
",",
"result",
"[",
"'status'",
"]",
"[",
"'name'",
"]",
"or",
"formatting",
".",
"blank",
"(",
")",
")",
"]",
")",
"table",
".",
"add_row",
"(",
"[",
"'state'",
",",
"formatting",
".",
"FormattedItem",
"(",
"utils",
".",
"lookup",
"(",
"result",
",",
"'powerState'",
",",
"'keyName'",
")",
",",
"utils",
".",
"lookup",
"(",
"result",
",",
"'powerState'",
",",
"'name'",
")",
",",
")",
"]",
")",
"table",
".",
"add_row",
"(",
"[",
"'active_transaction'",
",",
"formatting",
".",
"active_txn",
"(",
"result",
")",
"]",
")",
"table",
".",
"add_row",
"(",
"[",
"'datacenter'",
",",
"result",
"[",
"'datacenter'",
"]",
"[",
"'name'",
"]",
"or",
"formatting",
".",
"blank",
"(",
")",
"]",
")",
"_cli_helper_dedicated_host",
"(",
"env",
",",
"result",
",",
"table",
")",
"operating_system",
"=",
"utils",
".",
"lookup",
"(",
"result",
",",
"'operatingSystem'",
",",
"'softwareLicense'",
",",
"'softwareDescription'",
")",
"or",
"{",
"}",
"table",
".",
"add_row",
"(",
"[",
"'os'",
",",
"operating_system",
".",
"get",
"(",
"'name'",
")",
"or",
"formatting",
".",
"blank",
"(",
")",
"]",
")",
"table",
".",
"add_row",
"(",
"[",
"'os_version'",
",",
"operating_system",
".",
"get",
"(",
"'version'",
")",
"or",
"formatting",
".",
"blank",
"(",
")",
"]",
")",
"table",
".",
"add_row",
"(",
"[",
"'cores'",
",",
"result",
"[",
"'maxCpu'",
"]",
"]",
")",
"table",
".",
"add_row",
"(",
"[",
"'memory'",
",",
"formatting",
".",
"mb_to_gb",
"(",
"result",
"[",
"'maxMemory'",
"]",
")",
"]",
")",
"table",
".",
"add_row",
"(",
"[",
"'public_ip'",
",",
"result",
"[",
"'primaryIpAddress'",
"]",
"or",
"formatting",
".",
"blank",
"(",
")",
"]",
")",
"table",
".",
"add_row",
"(",
"[",
"'private_ip'",
",",
"result",
"[",
"'primaryBackendIpAddress'",
"]",
"or",
"formatting",
".",
"blank",
"(",
")",
"]",
")",
"table",
".",
"add_row",
"(",
"[",
"'private_only'",
",",
"result",
"[",
"'privateNetworkOnlyFlag'",
"]",
"]",
")",
"table",
".",
"add_row",
"(",
"[",
"'private_cpu'",
",",
"result",
"[",
"'dedicatedAccountHostOnlyFlag'",
"]",
"]",
")",
"table",
".",
"add_row",
"(",
"[",
"'created'",
",",
"result",
"[",
"'createDate'",
"]",
"]",
")",
"table",
".",
"add_row",
"(",
"[",
"'modified'",
",",
"result",
"[",
"'modifyDate'",
"]",
"]",
")",
"if",
"utils",
".",
"lookup",
"(",
"result",
",",
"'billingItem'",
")",
"!=",
"[",
"]",
":",
"table",
".",
"add_row",
"(",
"[",
"'owner'",
",",
"formatting",
".",
"FormattedItem",
"(",
"utils",
".",
"lookup",
"(",
"result",
",",
"'billingItem'",
",",
"'orderItem'",
",",
"'order'",
",",
"'userRecord'",
",",
"'username'",
")",
"or",
"formatting",
".",
"blank",
"(",
")",
",",
")",
"]",
")",
"else",
":",
"table",
".",
"add_row",
"(",
"[",
"'owner'",
",",
"formatting",
".",
"blank",
"(",
")",
"]",
")",
"vlan_table",
"=",
"formatting",
".",
"Table",
"(",
"[",
"'type'",
",",
"'number'",
",",
"'id'",
"]",
")",
"for",
"vlan",
"in",
"result",
"[",
"'networkVlans'",
"]",
":",
"vlan_table",
".",
"add_row",
"(",
"[",
"vlan",
"[",
"'networkSpace'",
"]",
",",
"vlan",
"[",
"'vlanNumber'",
"]",
",",
"vlan",
"[",
"'id'",
"]",
"]",
")",
"table",
".",
"add_row",
"(",
"[",
"'vlans'",
",",
"vlan_table",
"]",
")",
"if",
"result",
".",
"get",
"(",
"'networkComponents'",
")",
":",
"secgroup_table",
"=",
"formatting",
".",
"Table",
"(",
"[",
"'interface'",
",",
"'id'",
",",
"'name'",
"]",
")",
"has_secgroups",
"=",
"False",
"for",
"comp",
"in",
"result",
".",
"get",
"(",
"'networkComponents'",
")",
":",
"interface",
"=",
"'PRIVATE'",
"if",
"comp",
"[",
"'port'",
"]",
"==",
"0",
"else",
"'PUBLIC'",
"for",
"binding",
"in",
"comp",
"[",
"'securityGroupBindings'",
"]",
":",
"has_secgroups",
"=",
"True",
"secgroup",
"=",
"binding",
"[",
"'securityGroup'",
"]",
"secgroup_table",
".",
"add_row",
"(",
"[",
"interface",
",",
"secgroup",
"[",
"'id'",
"]",
",",
"secgroup",
".",
"get",
"(",
"'name'",
")",
"or",
"formatting",
".",
"blank",
"(",
")",
"]",
")",
"if",
"has_secgroups",
":",
"table",
".",
"add_row",
"(",
"[",
"'security_groups'",
",",
"secgroup_table",
"]",
")",
"if",
"result",
".",
"get",
"(",
"'notes'",
")",
":",
"table",
".",
"add_row",
"(",
"[",
"'notes'",
",",
"result",
"[",
"'notes'",
"]",
"]",
")",
"if",
"price",
":",
"total_price",
"=",
"utils",
".",
"lookup",
"(",
"result",
",",
"'billingItem'",
",",
"'nextInvoiceTotalRecurringAmount'",
")",
"or",
"0",
"total_price",
"+=",
"sum",
"(",
"p",
"[",
"'nextInvoiceTotalRecurringAmount'",
"]",
"for",
"p",
"in",
"utils",
".",
"lookup",
"(",
"result",
",",
"'billingItem'",
",",
"'children'",
")",
"or",
"[",
"]",
")",
"table",
".",
"add_row",
"(",
"[",
"'price_rate'",
",",
"total_price",
"]",
")",
"if",
"passwords",
":",
"pass_table",
"=",
"formatting",
".",
"Table",
"(",
"[",
"'software'",
",",
"'username'",
",",
"'password'",
"]",
")",
"for",
"component",
"in",
"result",
"[",
"'softwareComponents'",
"]",
":",
"for",
"item",
"in",
"component",
"[",
"'passwords'",
"]",
":",
"pass_table",
".",
"add_row",
"(",
"[",
"utils",
".",
"lookup",
"(",
"component",
",",
"'softwareLicense'",
",",
"'softwareDescription'",
",",
"'name'",
")",
",",
"item",
"[",
"'username'",
"]",
",",
"item",
"[",
"'password'",
"]",
",",
"]",
")",
"table",
".",
"add_row",
"(",
"[",
"'users'",
",",
"pass_table",
"]",
")",
"table",
".",
"add_row",
"(",
"[",
"'tags'",
",",
"formatting",
".",
"tags",
"(",
"result",
"[",
"'tagReferences'",
"]",
")",
"]",
")",
"# Test to see if this actually has a primary (public) ip address",
"try",
":",
"if",
"not",
"result",
"[",
"'privateNetworkOnlyFlag'",
"]",
":",
"ptr_domains",
"=",
"env",
".",
"client",
".",
"call",
"(",
"'Virtual_Guest'",
",",
"'getReverseDomainRecords'",
",",
"id",
"=",
"vs_id",
",",
")",
"for",
"ptr_domain",
"in",
"ptr_domains",
":",
"for",
"ptr",
"in",
"ptr_domain",
"[",
"'resourceRecords'",
"]",
":",
"table",
".",
"add_row",
"(",
"[",
"'ptr'",
",",
"ptr",
"[",
"'data'",
"]",
"]",
")",
"except",
"SoftLayer",
".",
"SoftLayerAPIError",
":",
"pass",
"env",
".",
"fout",
"(",
"table",
")"
] |
Get details for a virtual server.
|
[
"Get",
"details",
"for",
"a",
"virtual",
"server",
"."
] |
python
|
train
|
awslabs/aws-sam-cli
|
samcli/commands/init/__init__.py
|
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/commands/init/__init__.py#L30-L76
|
def cli(ctx, location, runtime, dependency_manager, output_dir, name, no_input):
""" \b
Initialize a serverless application with a SAM template, folder
structure for your Lambda functions, connected to an event source such as APIs,
S3 Buckets or DynamoDB Tables. This application includes everything you need to
get started with serverless and eventually grow into a production scale application.
\b
This command can initialize a boilerplate serverless app. If you want to create your own
template as well as use a custom location please take a look at our official documentation.
\b
Common usage:
\b
Initializes a new SAM project using Python 3.6 default template runtime
\b
$ sam init --runtime python3.6
\b
Initializes a new SAM project using Java 8 and Gradle dependency manager
\b
$ sam init --runtime java8 --dependency-manager gradle
\b
Initializes a new SAM project using custom template in a Git/Mercurial repository
\b
# gh being expanded to github url
$ sam init --location gh:aws-samples/cookiecutter-aws-sam-python
\b
$ sam init --location git+ssh://git@github.com/aws-samples/cookiecutter-aws-sam-python.git
\b
$ sam init --location hg+ssh://hg@bitbucket.org/repo/template-name
\b
Initializes a new SAM project using custom template in a Zipfile
\b
$ sam init --location /path/to/template.zip
\b
$ sam init --location https://example.com/path/to/template.zip
\b
Initializes a new SAM project using custom template in a local path
\b
$ sam init --location /path/to/template/folder
"""
# All logic must be implemented in the `do_cli` method. This helps ease unit tests
do_cli(ctx, location, runtime, dependency_manager, output_dir,
name, no_input)
|
[
"def",
"cli",
"(",
"ctx",
",",
"location",
",",
"runtime",
",",
"dependency_manager",
",",
"output_dir",
",",
"name",
",",
"no_input",
")",
":",
"# All logic must be implemented in the `do_cli` method. This helps ease unit tests",
"do_cli",
"(",
"ctx",
",",
"location",
",",
"runtime",
",",
"dependency_manager",
",",
"output_dir",
",",
"name",
",",
"no_input",
")"
] |
\b
Initialize a serverless application with a SAM template, folder
structure for your Lambda functions, connected to an event source such as APIs,
S3 Buckets or DynamoDB Tables. This application includes everything you need to
get started with serverless and eventually grow into a production scale application.
\b
This command can initialize a boilerplate serverless app. If you want to create your own
template as well as use a custom location please take a look at our official documentation.
\b
Common usage:
\b
Initializes a new SAM project using Python 3.6 default template runtime
\b
$ sam init --runtime python3.6
\b
Initializes a new SAM project using Java 8 and Gradle dependency manager
\b
$ sam init --runtime java8 --dependency-manager gradle
\b
Initializes a new SAM project using custom template in a Git/Mercurial repository
\b
# gh being expanded to github url
$ sam init --location gh:aws-samples/cookiecutter-aws-sam-python
\b
$ sam init --location git+ssh://git@github.com/aws-samples/cookiecutter-aws-sam-python.git
\b
$ sam init --location hg+ssh://hg@bitbucket.org/repo/template-name
\b
Initializes a new SAM project using custom template in a Zipfile
\b
$ sam init --location /path/to/template.zip
\b
$ sam init --location https://example.com/path/to/template.zip
\b
Initializes a new SAM project using custom template in a local path
\b
$ sam init --location /path/to/template/folder
|
[
"\\",
"b",
"Initialize",
"a",
"serverless",
"application",
"with",
"a",
"SAM",
"template",
"folder",
"structure",
"for",
"your",
"Lambda",
"functions",
"connected",
"to",
"an",
"event",
"source",
"such",
"as",
"APIs",
"S3",
"Buckets",
"or",
"DynamoDB",
"Tables",
".",
"This",
"application",
"includes",
"everything",
"you",
"need",
"to",
"get",
"started",
"with",
"serverless",
"and",
"eventually",
"grow",
"into",
"a",
"production",
"scale",
"application",
".",
"\\",
"b",
"This",
"command",
"can",
"initialize",
"a",
"boilerplate",
"serverless",
"app",
".",
"If",
"you",
"want",
"to",
"create",
"your",
"own",
"template",
"as",
"well",
"as",
"use",
"a",
"custom",
"location",
"please",
"take",
"a",
"look",
"at",
"our",
"official",
"documentation",
"."
] |
python
|
train
|
UDST/urbansim
|
urbansim/models/transition.py
|
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/transition.py#L261-L335
|
def transition(self, data, year):
"""
Add or remove rows to/from a table according to the prescribed
growth rate for this model and year.
Parameters
----------
data : pandas.DataFrame
Rows will be removed from or added to this table.
year : None, optional
Here for compatibility with other transition models,
but ignored.
Returns
-------
updated : pandas.DataFrame
Table with rows removed or added.
added : pandas.Index
New indexes of the rows that were added.
copied : pandas.Index
Indexes of rows that were copied. A row copied multiple times
will have multiple entries.
removed : pandas.Index
Index of rows that were removed.
"""
logger.debug('start: tabular transition')
if year not in self._config_table.index:
raise ValueError('No targets for given year: {}'.format(year))
# want this to be a DataFrame
year_config = self._config_table.loc[[year]]
logger.debug('transitioning {} segments'.format(len(year_config)))
segments = []
added_indexes = []
copied_indexes = []
removed_indexes = []
# since we're looping over discrete segments we need to track
# out here where their new indexes will begin
starting_index = data.index.values.max() + 1
for _, row in year_config.iterrows():
subset = util.filter_table(data, row, ignore={self._config_column})
# Do not run on segment if it is empty
if len(subset) == 0:
logger.debug('empty segment encountered')
continue
if self.accounting_column is None:
nrows = self._calc_nrows(len(subset), row[self._config_column])
else:
nrows = self._calc_nrows(
subset[self.accounting_column].sum(),
row[self._config_column])
updated, added, copied, removed = \
add_or_remove_rows(subset, nrows, starting_index, self.accounting_column)
if nrows > 0:
# only update the starting index if rows were added
starting_index = starting_index + nrows
segments.append(updated)
added_indexes.append(added)
copied_indexes.append(copied)
removed_indexes.append(removed)
updated = pd.concat(segments)
added_indexes = util.concat_indexes(added_indexes)
copied_indexes = util.concat_indexes(copied_indexes)
removed_indexes = util.concat_indexes(removed_indexes)
logger.debug('finish: tabular transition')
return updated, added_indexes, copied_indexes, removed_indexes
|
[
"def",
"transition",
"(",
"self",
",",
"data",
",",
"year",
")",
":",
"logger",
".",
"debug",
"(",
"'start: tabular transition'",
")",
"if",
"year",
"not",
"in",
"self",
".",
"_config_table",
".",
"index",
":",
"raise",
"ValueError",
"(",
"'No targets for given year: {}'",
".",
"format",
"(",
"year",
")",
")",
"# want this to be a DataFrame",
"year_config",
"=",
"self",
".",
"_config_table",
".",
"loc",
"[",
"[",
"year",
"]",
"]",
"logger",
".",
"debug",
"(",
"'transitioning {} segments'",
".",
"format",
"(",
"len",
"(",
"year_config",
")",
")",
")",
"segments",
"=",
"[",
"]",
"added_indexes",
"=",
"[",
"]",
"copied_indexes",
"=",
"[",
"]",
"removed_indexes",
"=",
"[",
"]",
"# since we're looping over discrete segments we need to track",
"# out here where their new indexes will begin",
"starting_index",
"=",
"data",
".",
"index",
".",
"values",
".",
"max",
"(",
")",
"+",
"1",
"for",
"_",
",",
"row",
"in",
"year_config",
".",
"iterrows",
"(",
")",
":",
"subset",
"=",
"util",
".",
"filter_table",
"(",
"data",
",",
"row",
",",
"ignore",
"=",
"{",
"self",
".",
"_config_column",
"}",
")",
"# Do not run on segment if it is empty",
"if",
"len",
"(",
"subset",
")",
"==",
"0",
":",
"logger",
".",
"debug",
"(",
"'empty segment encountered'",
")",
"continue",
"if",
"self",
".",
"accounting_column",
"is",
"None",
":",
"nrows",
"=",
"self",
".",
"_calc_nrows",
"(",
"len",
"(",
"subset",
")",
",",
"row",
"[",
"self",
".",
"_config_column",
"]",
")",
"else",
":",
"nrows",
"=",
"self",
".",
"_calc_nrows",
"(",
"subset",
"[",
"self",
".",
"accounting_column",
"]",
".",
"sum",
"(",
")",
",",
"row",
"[",
"self",
".",
"_config_column",
"]",
")",
"updated",
",",
"added",
",",
"copied",
",",
"removed",
"=",
"add_or_remove_rows",
"(",
"subset",
",",
"nrows",
",",
"starting_index",
",",
"self",
".",
"accounting_column",
")",
"if",
"nrows",
">",
"0",
":",
"# only update the starting index if rows were added",
"starting_index",
"=",
"starting_index",
"+",
"nrows",
"segments",
".",
"append",
"(",
"updated",
")",
"added_indexes",
".",
"append",
"(",
"added",
")",
"copied_indexes",
".",
"append",
"(",
"copied",
")",
"removed_indexes",
".",
"append",
"(",
"removed",
")",
"updated",
"=",
"pd",
".",
"concat",
"(",
"segments",
")",
"added_indexes",
"=",
"util",
".",
"concat_indexes",
"(",
"added_indexes",
")",
"copied_indexes",
"=",
"util",
".",
"concat_indexes",
"(",
"copied_indexes",
")",
"removed_indexes",
"=",
"util",
".",
"concat_indexes",
"(",
"removed_indexes",
")",
"logger",
".",
"debug",
"(",
"'finish: tabular transition'",
")",
"return",
"updated",
",",
"added_indexes",
",",
"copied_indexes",
",",
"removed_indexes"
] |
Add or remove rows to/from a table according to the prescribed
growth rate for this model and year.
Parameters
----------
data : pandas.DataFrame
Rows will be removed from or added to this table.
year : None, optional
Here for compatibility with other transition models,
but ignored.
Returns
-------
updated : pandas.DataFrame
Table with rows removed or added.
added : pandas.Index
New indexes of the rows that were added.
copied : pandas.Index
Indexes of rows that were copied. A row copied multiple times
will have multiple entries.
removed : pandas.Index
Index of rows that were removed.
|
[
"Add",
"or",
"remove",
"rows",
"to",
"/",
"from",
"a",
"table",
"according",
"to",
"the",
"prescribed",
"growth",
"rate",
"for",
"this",
"model",
"and",
"year",
"."
] |
python
|
train
|
rupertford/melody
|
examples/PSyclone/psyclone.py
|
https://github.com/rupertford/melody/blob/d50459880a87fdd1802c6893f6e12b52d51b3b91/examples/PSyclone/psyclone.py#L129-L155
|
def options(self, my_psy):
'''Returns all potential loop fusion options for the psy object
provided'''
# compute options dynamically here as they may depend on previous
# changes to the psy tree
my_options = []
invokes = my_psy.invokes.invoke_list
#print "there are {0} invokes".format(len(invokes))
if self._dependent_invokes:
raise RuntimeError(
"dependent invokes assumes fusion in one invoke might "
"affect fusion in another invoke. This is not yet "
"implemented")
else:
# treat each invoke separately
for idx, invoke in enumerate(invokes):
print "invoke {0}".format(idx)
# iterate through each outer loop
for loop in invoke.schedule.loops():
if loop.loop_type == "outer":
siblings = loop.parent.children
my_index = siblings.index(loop)
option = []
self._recurse(siblings, my_index, option, my_options,
invoke)
return my_options
|
[
"def",
"options",
"(",
"self",
",",
"my_psy",
")",
":",
"# compute options dynamically here as they may depend on previous",
"# changes to the psy tree",
"my_options",
"=",
"[",
"]",
"invokes",
"=",
"my_psy",
".",
"invokes",
".",
"invoke_list",
"#print \"there are {0} invokes\".format(len(invokes))",
"if",
"self",
".",
"_dependent_invokes",
":",
"raise",
"RuntimeError",
"(",
"\"dependent invokes assumes fusion in one invoke might \"",
"\"affect fusion in another invoke. This is not yet \"",
"\"implemented\"",
")",
"else",
":",
"# treat each invoke separately",
"for",
"idx",
",",
"invoke",
"in",
"enumerate",
"(",
"invokes",
")",
":",
"print",
"\"invoke {0}\"",
".",
"format",
"(",
"idx",
")",
"# iterate through each outer loop",
"for",
"loop",
"in",
"invoke",
".",
"schedule",
".",
"loops",
"(",
")",
":",
"if",
"loop",
".",
"loop_type",
"==",
"\"outer\"",
":",
"siblings",
"=",
"loop",
".",
"parent",
".",
"children",
"my_index",
"=",
"siblings",
".",
"index",
"(",
"loop",
")",
"option",
"=",
"[",
"]",
"self",
".",
"_recurse",
"(",
"siblings",
",",
"my_index",
",",
"option",
",",
"my_options",
",",
"invoke",
")",
"return",
"my_options"
] |
Returns all potential loop fusion options for the psy object
provided
|
[
"Returns",
"all",
"potential",
"loop",
"fusion",
"options",
"for",
"the",
"psy",
"object",
"provided"
] |
python
|
test
|
singularityhub/sregistry-cli
|
sregistry/main/hub/query.py
|
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/hub/query.py#L20-L38
|
def search(self, query=None, **kwargs):
'''query a Singularity registry for a list of images.
If query is None, collections are listed.
EXAMPLE QUERIES:
[empty] list all collections in singularity hub
vsoch do a general search for collection "vsoch"
vsoch/dinosaur list details of container vsoch/dinosaur
tag "latest" is used by default, and then the most recent
vsoch/dinosaur:tag list details for specific container
'''
if query is not None:
return self._search_collection(query)
# Search collections across all fields
return self.list()
|
[
"def",
"search",
"(",
"self",
",",
"query",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"query",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_search_collection",
"(",
"query",
")",
"# Search collections across all fields",
"return",
"self",
".",
"list",
"(",
")"
] |
query a Singularity registry for a list of images.
If query is None, collections are listed.
EXAMPLE QUERIES:
[empty] list all collections in singularity hub
vsoch do a general search for collection "vsoch"
vsoch/dinosaur list details of container vsoch/dinosaur
tag "latest" is used by default, and then the most recent
vsoch/dinosaur:tag list details for specific container
|
[
"query",
"a",
"Singularity",
"registry",
"for",
"a",
"list",
"of",
"images",
".",
"If",
"query",
"is",
"None",
"collections",
"are",
"listed",
"."
] |
python
|
test
|
mnick/scikit-tensor
|
sktensor/core.py
|
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/core.py#L297-L306
|
def flipsign(U):
"""
Flip sign of factor matrices such that largest magnitude
element will be positive
"""
midx = abs(U).argmax(axis=0)
for i in range(U.shape[1]):
if U[midx[i], i] < 0:
U[:, i] = -U[:, i]
return U
|
[
"def",
"flipsign",
"(",
"U",
")",
":",
"midx",
"=",
"abs",
"(",
"U",
")",
".",
"argmax",
"(",
"axis",
"=",
"0",
")",
"for",
"i",
"in",
"range",
"(",
"U",
".",
"shape",
"[",
"1",
"]",
")",
":",
"if",
"U",
"[",
"midx",
"[",
"i",
"]",
",",
"i",
"]",
"<",
"0",
":",
"U",
"[",
":",
",",
"i",
"]",
"=",
"-",
"U",
"[",
":",
",",
"i",
"]",
"return",
"U"
] |
Flip sign of factor matrices such that largest magnitude
element will be positive
|
[
"Flip",
"sign",
"of",
"factor",
"matrices",
"such",
"that",
"largest",
"magnitude",
"element",
"will",
"be",
"positive"
] |
python
|
train
|
AnthonyBloomer/daftlistings
|
daftlistings/listing.py
|
https://github.com/AnthonyBloomer/daftlistings/blob/f6c1b52425bc740f443b5efe6632a4bf18ee997f/daftlistings/listing.py#L104-L118
|
def price_change(self):
"""
This method returns any price change.
:return:
"""
try:
if self._data_from_search:
return self._data_from_search.find('div', {'class': 'price-changes-sr'}).text
else:
return self._ad_page_content.find('div', {'class': 'price-changes-sr'}).text
except Exception as e:
if self._debug:
logging.error(
"Error getting price_change. Error message: " + e.args[0])
return
|
[
"def",
"price_change",
"(",
"self",
")",
":",
"try",
":",
"if",
"self",
".",
"_data_from_search",
":",
"return",
"self",
".",
"_data_from_search",
".",
"find",
"(",
"'div'",
",",
"{",
"'class'",
":",
"'price-changes-sr'",
"}",
")",
".",
"text",
"else",
":",
"return",
"self",
".",
"_ad_page_content",
".",
"find",
"(",
"'div'",
",",
"{",
"'class'",
":",
"'price-changes-sr'",
"}",
")",
".",
"text",
"except",
"Exception",
"as",
"e",
":",
"if",
"self",
".",
"_debug",
":",
"logging",
".",
"error",
"(",
"\"Error getting price_change. Error message: \"",
"+",
"e",
".",
"args",
"[",
"0",
"]",
")",
"return"
] |
This method returns any price change.
:return:
|
[
"This",
"method",
"returns",
"any",
"price",
"change",
".",
":",
"return",
":"
] |
python
|
train
|
cackharot/suds-py3
|
suds/store.py
|
https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/store.py#L554-L566
|
def open(self, url):
"""
Open a document at the specified url.
@param url: A document URL.
@type url: str
@return: A file pointer to the document.
@rtype: StringIO
"""
protocol, location = self.split(url)
if protocol == self.protocol:
return self.find(location)
else:
return None
|
[
"def",
"open",
"(",
"self",
",",
"url",
")",
":",
"protocol",
",",
"location",
"=",
"self",
".",
"split",
"(",
"url",
")",
"if",
"protocol",
"==",
"self",
".",
"protocol",
":",
"return",
"self",
".",
"find",
"(",
"location",
")",
"else",
":",
"return",
"None"
] |
Open a document at the specified url.
@param url: A document URL.
@type url: str
@return: A file pointer to the document.
@rtype: StringIO
|
[
"Open",
"a",
"document",
"at",
"the",
"specified",
"url",
"."
] |
python
|
train
|
pywbem/pywbem
|
pywbem/tupleparse.py
|
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/tupleparse.py#L1839-L1847
|
def parse_expmethodresponse(self, tup_tree):
# pylint: disable=unused-argument
"""
This function not implemented.
"""
raise CIMXMLParseError(
_format("Internal Error: Parsing support for element {0!A} is not "
"implemented", name(tup_tree)),
conn_id=self.conn_id)
|
[
"def",
"parse_expmethodresponse",
"(",
"self",
",",
"tup_tree",
")",
":",
"# pylint: disable=unused-argument",
"raise",
"CIMXMLParseError",
"(",
"_format",
"(",
"\"Internal Error: Parsing support for element {0!A} is not \"",
"\"implemented\"",
",",
"name",
"(",
"tup_tree",
")",
")",
",",
"conn_id",
"=",
"self",
".",
"conn_id",
")"
] |
This function not implemented.
|
[
"This",
"function",
"not",
"implemented",
"."
] |
python
|
train
|
hovren/crisp
|
crisp/calibration.py
|
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/calibration.py#L160-L165
|
def parameter(self):
"""Return the current best value of a parameter"""
D = {}
for source in PARAM_SOURCE_ORDER:
D.update(self.params[source])
return D
|
[
"def",
"parameter",
"(",
"self",
")",
":",
"D",
"=",
"{",
"}",
"for",
"source",
"in",
"PARAM_SOURCE_ORDER",
":",
"D",
".",
"update",
"(",
"self",
".",
"params",
"[",
"source",
"]",
")",
"return",
"D"
] |
Return the current best value of a parameter
|
[
"Return",
"the",
"current",
"best",
"value",
"of",
"a",
"parameter"
] |
python
|
train
|
pebble/libpebble2
|
libpebble2/communication/__init__.py
|
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/communication/__init__.py#L79-L92
|
def run_sync(self):
"""
Runs the message loop until the Pebble disconnects. This method will block until the watch disconnects or
a fatal error occurs.
For alternatives that don't block forever, see :meth:`pump_reader` and :meth:`run_async`.
"""
while self.connected:
try:
self.pump_reader()
except PacketDecodeError as e:
logger.warning("Packet decode failed: %s", e)
except ConnectionError:
break
|
[
"def",
"run_sync",
"(",
"self",
")",
":",
"while",
"self",
".",
"connected",
":",
"try",
":",
"self",
".",
"pump_reader",
"(",
")",
"except",
"PacketDecodeError",
"as",
"e",
":",
"logger",
".",
"warning",
"(",
"\"Packet decode failed: %s\"",
",",
"e",
")",
"except",
"ConnectionError",
":",
"break"
] |
Runs the message loop until the Pebble disconnects. This method will block until the watch disconnects or
a fatal error occurs.
For alternatives that don't block forever, see :meth:`pump_reader` and :meth:`run_async`.
|
[
"Runs",
"the",
"message",
"loop",
"until",
"the",
"Pebble",
"disconnects",
".",
"This",
"method",
"will",
"block",
"until",
"the",
"watch",
"disconnects",
"or",
"a",
"fatal",
"error",
"occurs",
"."
] |
python
|
train
|
apple/turicreate
|
deps/src/libxml2-2.9.1/python/libxml2.py
|
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L6035-L6039
|
def htmlDocContentDumpFormatOutput(self, cur, encoding, format):
"""Dump an HTML document. """
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.htmlDocContentDumpFormatOutput(self._o, cur__o, encoding, format)
|
[
"def",
"htmlDocContentDumpFormatOutput",
"(",
"self",
",",
"cur",
",",
"encoding",
",",
"format",
")",
":",
"if",
"cur",
"is",
"None",
":",
"cur__o",
"=",
"None",
"else",
":",
"cur__o",
"=",
"cur",
".",
"_o",
"libxml2mod",
".",
"htmlDocContentDumpFormatOutput",
"(",
"self",
".",
"_o",
",",
"cur__o",
",",
"encoding",
",",
"format",
")"
] |
Dump an HTML document.
|
[
"Dump",
"an",
"HTML",
"document",
"."
] |
python
|
train
|
zagaran/mongobackup
|
mongobackup/backups.py
|
https://github.com/zagaran/mongobackup/blob/d090d0cca44ecd066974c4de80edca5f26b7eeea/mongobackup/backups.py#L37-L94
|
def backup(mongo_username, mongo_password, local_backup_directory_path, database=None,
attached_directory_path=None, custom_prefix="backup",
mongo_backup_directory_path="/tmp/mongo_dump",
s3_bucket=None, s3_access_key_id=None, s3_secret_key=None,
purge_local=None, purge_attached=None, cleanup=True, silent=False):
"""
Runs a backup operation to At Least a local directory.
You must provide mongodb credentials along with a a directory for a dump
operation and a directory to contain your compressed backup.
backup_prefix: optionally provide a prefix to be prepended to your backups,
by default the prefix is "backup".
database: optionally provide the name of one specific database to back up
(instead of backing up all databases on the MongoDB server)
attached_directory_path: makes a second copy of the backup to a different
directory. This directory is checked before other operations and
will raise an error if it cannot be found.
s3_bucket: if you have an Amazon Web Services S3 account you can
automatically upload the backup to an S3 Bucket you provide;
requires s3_access_key_id and s3_secret key to be passed as well
s3_access_key_id, s3_secret_key: credentials for your AWS account.
purge_local: An integer value, the number of days of backups to purge
from local_backup_directory_path after operations have completed.
purge_attached: An integer value, the number of days of backups to purge
from attached_directory_path after operations have completed.
cleanup: set to False to leave the mongo_backup_directory_path after operations
have completed.
"""
if attached_directory_path:
if not path.exists(attached_directory_path):
raise Exception("ERROR. Would have to create %s for your attached storage, make sure that file paths already exist and re-run"
% (attached_directory_path))
# Dump mongo, tarbz, copy to attached storage, upload to s3, purge, clean.
full_file_name_path = local_backup_directory_path + custom_prefix + time_string()
mongodump(mongo_username, mongo_password, mongo_backup_directory_path, database, silent=silent)
local_backup_file = tarbz(mongo_backup_directory_path, full_file_name_path, silent=silent)
if attached_directory_path:
copy(local_backup_file, attached_directory_path + local_backup_file.split("/")[-1])
if s3_bucket:
s3_upload(local_backup_file, s3_bucket, s3_access_key_id, s3_secret_key)
if purge_local:
purge_date = (datetime.utcnow().replace(second=0, microsecond=0) -
timedelta(days=purge_local))
purge_old_files(purge_date, local_backup_directory_path, custom_prefix=custom_prefix)
if purge_attached and attached_directory_path:
purge_date = (datetime.utcnow().replace(second=0, microsecond=0) -
timedelta(days=purge_attached))
purge_old_files(purge_date, attached_directory_path, custom_prefix=custom_prefix)
if cleanup:
rmtree(mongo_backup_directory_path)
|
[
"def",
"backup",
"(",
"mongo_username",
",",
"mongo_password",
",",
"local_backup_directory_path",
",",
"database",
"=",
"None",
",",
"attached_directory_path",
"=",
"None",
",",
"custom_prefix",
"=",
"\"backup\"",
",",
"mongo_backup_directory_path",
"=",
"\"/tmp/mongo_dump\"",
",",
"s3_bucket",
"=",
"None",
",",
"s3_access_key_id",
"=",
"None",
",",
"s3_secret_key",
"=",
"None",
",",
"purge_local",
"=",
"None",
",",
"purge_attached",
"=",
"None",
",",
"cleanup",
"=",
"True",
",",
"silent",
"=",
"False",
")",
":",
"if",
"attached_directory_path",
":",
"if",
"not",
"path",
".",
"exists",
"(",
"attached_directory_path",
")",
":",
"raise",
"Exception",
"(",
"\"ERROR. Would have to create %s for your attached storage, make sure that file paths already exist and re-run\"",
"%",
"(",
"attached_directory_path",
")",
")",
"# Dump mongo, tarbz, copy to attached storage, upload to s3, purge, clean.\r",
"full_file_name_path",
"=",
"local_backup_directory_path",
"+",
"custom_prefix",
"+",
"time_string",
"(",
")",
"mongodump",
"(",
"mongo_username",
",",
"mongo_password",
",",
"mongo_backup_directory_path",
",",
"database",
",",
"silent",
"=",
"silent",
")",
"local_backup_file",
"=",
"tarbz",
"(",
"mongo_backup_directory_path",
",",
"full_file_name_path",
",",
"silent",
"=",
"silent",
")",
"if",
"attached_directory_path",
":",
"copy",
"(",
"local_backup_file",
",",
"attached_directory_path",
"+",
"local_backup_file",
".",
"split",
"(",
"\"/\"",
")",
"[",
"-",
"1",
"]",
")",
"if",
"s3_bucket",
":",
"s3_upload",
"(",
"local_backup_file",
",",
"s3_bucket",
",",
"s3_access_key_id",
",",
"s3_secret_key",
")",
"if",
"purge_local",
":",
"purge_date",
"=",
"(",
"datetime",
".",
"utcnow",
"(",
")",
".",
"replace",
"(",
"second",
"=",
"0",
",",
"microsecond",
"=",
"0",
")",
"-",
"timedelta",
"(",
"days",
"=",
"purge_local",
")",
")",
"purge_old_files",
"(",
"purge_date",
",",
"local_backup_directory_path",
",",
"custom_prefix",
"=",
"custom_prefix",
")",
"if",
"purge_attached",
"and",
"attached_directory_path",
":",
"purge_date",
"=",
"(",
"datetime",
".",
"utcnow",
"(",
")",
".",
"replace",
"(",
"second",
"=",
"0",
",",
"microsecond",
"=",
"0",
")",
"-",
"timedelta",
"(",
"days",
"=",
"purge_attached",
")",
")",
"purge_old_files",
"(",
"purge_date",
",",
"attached_directory_path",
",",
"custom_prefix",
"=",
"custom_prefix",
")",
"if",
"cleanup",
":",
"rmtree",
"(",
"mongo_backup_directory_path",
")"
] |
Runs a backup operation to At Least a local directory.
You must provide mongodb credentials along with a a directory for a dump
operation and a directory to contain your compressed backup.
backup_prefix: optionally provide a prefix to be prepended to your backups,
by default the prefix is "backup".
database: optionally provide the name of one specific database to back up
(instead of backing up all databases on the MongoDB server)
attached_directory_path: makes a second copy of the backup to a different
directory. This directory is checked before other operations and
will raise an error if it cannot be found.
s3_bucket: if you have an Amazon Web Services S3 account you can
automatically upload the backup to an S3 Bucket you provide;
requires s3_access_key_id and s3_secret key to be passed as well
s3_access_key_id, s3_secret_key: credentials for your AWS account.
purge_local: An integer value, the number of days of backups to purge
from local_backup_directory_path after operations have completed.
purge_attached: An integer value, the number of days of backups to purge
from attached_directory_path after operations have completed.
cleanup: set to False to leave the mongo_backup_directory_path after operations
have completed.
|
[
"Runs",
"a",
"backup",
"operation",
"to",
"At",
"Least",
"a",
"local",
"directory",
".",
"You",
"must",
"provide",
"mongodb",
"credentials",
"along",
"with",
"a",
"a",
"directory",
"for",
"a",
"dump",
"operation",
"and",
"a",
"directory",
"to",
"contain",
"your",
"compressed",
"backup",
".",
"backup_prefix",
":",
"optionally",
"provide",
"a",
"prefix",
"to",
"be",
"prepended",
"to",
"your",
"backups",
"by",
"default",
"the",
"prefix",
"is",
"backup",
".",
"database",
":",
"optionally",
"provide",
"the",
"name",
"of",
"one",
"specific",
"database",
"to",
"back",
"up",
"(",
"instead",
"of",
"backing",
"up",
"all",
"databases",
"on",
"the",
"MongoDB",
"server",
")",
"attached_directory_path",
":",
"makes",
"a",
"second",
"copy",
"of",
"the",
"backup",
"to",
"a",
"different",
"directory",
".",
"This",
"directory",
"is",
"checked",
"before",
"other",
"operations",
"and",
"will",
"raise",
"an",
"error",
"if",
"it",
"cannot",
"be",
"found",
".",
"s3_bucket",
":",
"if",
"you",
"have",
"an",
"Amazon",
"Web",
"Services",
"S3",
"account",
"you",
"can",
"automatically",
"upload",
"the",
"backup",
"to",
"an",
"S3",
"Bucket",
"you",
"provide",
";",
"requires",
"s3_access_key_id",
"and",
"s3_secret",
"key",
"to",
"be",
"passed",
"as",
"well",
"s3_access_key_id",
"s3_secret_key",
":",
"credentials",
"for",
"your",
"AWS",
"account",
".",
"purge_local",
":",
"An",
"integer",
"value",
"the",
"number",
"of",
"days",
"of",
"backups",
"to",
"purge",
"from",
"local_backup_directory_path",
"after",
"operations",
"have",
"completed",
".",
"purge_attached",
":",
"An",
"integer",
"value",
"the",
"number",
"of",
"days",
"of",
"backups",
"to",
"purge",
"from",
"attached_directory_path",
"after",
"operations",
"have",
"completed",
".",
"cleanup",
":",
"set",
"to",
"False",
"to",
"leave",
"the",
"mongo_backup_directory_path",
"after",
"operations",
"have",
"completed",
"."
] |
python
|
train
|
twilio/twilio-python
|
twilio/rest/serverless/v1/service/__init__.py
|
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/serverless/v1/service/__init__.py#L122-L145
|
def create(self, unique_name, friendly_name, include_credentials=values.unset):
"""
Create a new ServiceInstance
:param unicode unique_name: The unique_name
:param unicode friendly_name: The friendly_name
:param bool include_credentials: The include_credentials
:returns: Newly created ServiceInstance
:rtype: twilio.rest.serverless.v1.service.ServiceInstance
"""
data = values.of({
'UniqueName': unique_name,
'FriendlyName': friendly_name,
'IncludeCredentials': include_credentials,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return ServiceInstance(self._version, payload, )
|
[
"def",
"create",
"(",
"self",
",",
"unique_name",
",",
"friendly_name",
",",
"include_credentials",
"=",
"values",
".",
"unset",
")",
":",
"data",
"=",
"values",
".",
"of",
"(",
"{",
"'UniqueName'",
":",
"unique_name",
",",
"'FriendlyName'",
":",
"friendly_name",
",",
"'IncludeCredentials'",
":",
"include_credentials",
",",
"}",
")",
"payload",
"=",
"self",
".",
"_version",
".",
"create",
"(",
"'POST'",
",",
"self",
".",
"_uri",
",",
"data",
"=",
"data",
",",
")",
"return",
"ServiceInstance",
"(",
"self",
".",
"_version",
",",
"payload",
",",
")"
] |
Create a new ServiceInstance
:param unicode unique_name: The unique_name
:param unicode friendly_name: The friendly_name
:param bool include_credentials: The include_credentials
:returns: Newly created ServiceInstance
:rtype: twilio.rest.serverless.v1.service.ServiceInstance
|
[
"Create",
"a",
"new",
"ServiceInstance"
] |
python
|
train
|
mwickert/scikit-dsp-comm
|
sk_dsp_comm/pyaudio_helper.py
|
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/pyaudio_helper.py#L291-L298
|
def DSP_callback_toc(self):
"""
Add new toc time to the DSP_toc list. Will not be called if
Tcapture = 0.
"""
if self.Tcapture > 0:
self.DSP_toc.append(time.time()-self.start_time)
|
[
"def",
"DSP_callback_toc",
"(",
"self",
")",
":",
"if",
"self",
".",
"Tcapture",
">",
"0",
":",
"self",
".",
"DSP_toc",
".",
"append",
"(",
"time",
".",
"time",
"(",
")",
"-",
"self",
".",
"start_time",
")"
] |
Add new toc time to the DSP_toc list. Will not be called if
Tcapture = 0.
|
[
"Add",
"new",
"toc",
"time",
"to",
"the",
"DSP_toc",
"list",
".",
"Will",
"not",
"be",
"called",
"if",
"Tcapture",
"=",
"0",
"."
] |
python
|
valid
|
planetarypy/pvl
|
pvl/decoder.py
|
https://github.com/planetarypy/pvl/blob/ed92b284c4208439b033d28c9c176534c0faac0e/pvl/decoder.py#L489-L505
|
def parse_value(self, stream):
"""
Value ::= (SimpleValue | Set | Sequence) WSC UnitsExpression?
"""
if self.has_sequence(stream):
value = self.parse_sequence(stream)
elif self.has_set(stream):
value = self.parse_set(stream)
else:
value = self.parse_simple_value(stream)
self.skip_whitespace_or_comment(stream)
if self.has_units(stream):
return Units(value, self.parse_units(stream))
return value
|
[
"def",
"parse_value",
"(",
"self",
",",
"stream",
")",
":",
"if",
"self",
".",
"has_sequence",
"(",
"stream",
")",
":",
"value",
"=",
"self",
".",
"parse_sequence",
"(",
"stream",
")",
"elif",
"self",
".",
"has_set",
"(",
"stream",
")",
":",
"value",
"=",
"self",
".",
"parse_set",
"(",
"stream",
")",
"else",
":",
"value",
"=",
"self",
".",
"parse_simple_value",
"(",
"stream",
")",
"self",
".",
"skip_whitespace_or_comment",
"(",
"stream",
")",
"if",
"self",
".",
"has_units",
"(",
"stream",
")",
":",
"return",
"Units",
"(",
"value",
",",
"self",
".",
"parse_units",
"(",
"stream",
")",
")",
"return",
"value"
] |
Value ::= (SimpleValue | Set | Sequence) WSC UnitsExpression?
|
[
"Value",
"::",
"=",
"(",
"SimpleValue",
"|",
"Set",
"|",
"Sequence",
")",
"WSC",
"UnitsExpression?"
] |
python
|
train
|
zetaops/zengine
|
zengine/views/channel_management.py
|
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/views/channel_management.py#L195-L218
|
def move_complete_channel(self):
"""
Channels and theirs subscribers are moved
completely to new channel or existing channel.
"""
to_channel = Channel.objects.get(self.current.task_data['target_channel_key'])
chosen_channels = self.current.task_data['chosen_channels']
chosen_channels_names = self.current.task_data['chosen_channels_names']
with BlockSave(Subscriber, query_dict={'channel_id': to_channel.key}):
for s in Subscriber.objects.filter(channel_id__in=chosen_channels, typ=15):
s.channel = to_channel
s.save()
with BlockDelete(Message):
Message.objects.filter(channel_id__in=chosen_channels, typ=15).delete()
with BlockDelete(Channel):
Channel.objects.filter(key__in=chosen_channels).delete()
self.current.task_data[
'msg'] = _(u"Chosen channels(%s) have been merged to '%s' channel successfully.") % \
(', '.join(chosen_channels_names), to_channel.name)
|
[
"def",
"move_complete_channel",
"(",
"self",
")",
":",
"to_channel",
"=",
"Channel",
".",
"objects",
".",
"get",
"(",
"self",
".",
"current",
".",
"task_data",
"[",
"'target_channel_key'",
"]",
")",
"chosen_channels",
"=",
"self",
".",
"current",
".",
"task_data",
"[",
"'chosen_channels'",
"]",
"chosen_channels_names",
"=",
"self",
".",
"current",
".",
"task_data",
"[",
"'chosen_channels_names'",
"]",
"with",
"BlockSave",
"(",
"Subscriber",
",",
"query_dict",
"=",
"{",
"'channel_id'",
":",
"to_channel",
".",
"key",
"}",
")",
":",
"for",
"s",
"in",
"Subscriber",
".",
"objects",
".",
"filter",
"(",
"channel_id__in",
"=",
"chosen_channels",
",",
"typ",
"=",
"15",
")",
":",
"s",
".",
"channel",
"=",
"to_channel",
"s",
".",
"save",
"(",
")",
"with",
"BlockDelete",
"(",
"Message",
")",
":",
"Message",
".",
"objects",
".",
"filter",
"(",
"channel_id__in",
"=",
"chosen_channels",
",",
"typ",
"=",
"15",
")",
".",
"delete",
"(",
")",
"with",
"BlockDelete",
"(",
"Channel",
")",
":",
"Channel",
".",
"objects",
".",
"filter",
"(",
"key__in",
"=",
"chosen_channels",
")",
".",
"delete",
"(",
")",
"self",
".",
"current",
".",
"task_data",
"[",
"'msg'",
"]",
"=",
"_",
"(",
"u\"Chosen channels(%s) have been merged to '%s' channel successfully.\"",
")",
"%",
"(",
"', '",
".",
"join",
"(",
"chosen_channels_names",
")",
",",
"to_channel",
".",
"name",
")"
] |
Channels and theirs subscribers are moved
completely to new channel or existing channel.
|
[
"Channels",
"and",
"theirs",
"subscribers",
"are",
"moved",
"completely",
"to",
"new",
"channel",
"or",
"existing",
"channel",
"."
] |
python
|
train
|
raff/dynash
|
dynash/dynash.py
|
https://github.com/raff/dynash/blob/a2b4fab67dd85ceaa9c1bb7604ebc1768a7fc28e/dynash/dynash.py#L317-L354
|
def do_describe(self, line):
"describe [-c] {tablename}..."
args = self.getargs(line)
if '-c' in args:
create_info = True
args.remove('-c')
else:
create_info = False
if not args:
if self.table:
args = [self.table.name]
else:
args = self.tables
for table in args:
desc = self.conn.describe_table(table)
if create_info:
info = desc['Table']
schema = info['KeySchema']
name = info['TableName']
hkey = schema['HashKeyElement']
hkey = "%s:%s" % (hkey['AttributeName'], hkey['AttributeType'])
if 'RangeKeyElement' in schema:
rkey = schema['RangeKeyElement']
rkey = " %s:%s" % (rkey['AttributeName'], rkey['AttributeType'])
else:
rkey = ''
prov = info['ProvisionedThroughput']
prov = "-c %d,%d" % (prov['ReadCapacityUnits'], prov['WriteCapacityUnits'])
print "create %s %s %s%s" % (name, prov, hkey, rkey)
else:
self.pprint(desc, "%s: " % table)
|
[
"def",
"do_describe",
"(",
"self",
",",
"line",
")",
":",
"args",
"=",
"self",
".",
"getargs",
"(",
"line",
")",
"if",
"'-c'",
"in",
"args",
":",
"create_info",
"=",
"True",
"args",
".",
"remove",
"(",
"'-c'",
")",
"else",
":",
"create_info",
"=",
"False",
"if",
"not",
"args",
":",
"if",
"self",
".",
"table",
":",
"args",
"=",
"[",
"self",
".",
"table",
".",
"name",
"]",
"else",
":",
"args",
"=",
"self",
".",
"tables",
"for",
"table",
"in",
"args",
":",
"desc",
"=",
"self",
".",
"conn",
".",
"describe_table",
"(",
"table",
")",
"if",
"create_info",
":",
"info",
"=",
"desc",
"[",
"'Table'",
"]",
"schema",
"=",
"info",
"[",
"'KeySchema'",
"]",
"name",
"=",
"info",
"[",
"'TableName'",
"]",
"hkey",
"=",
"schema",
"[",
"'HashKeyElement'",
"]",
"hkey",
"=",
"\"%s:%s\"",
"%",
"(",
"hkey",
"[",
"'AttributeName'",
"]",
",",
"hkey",
"[",
"'AttributeType'",
"]",
")",
"if",
"'RangeKeyElement'",
"in",
"schema",
":",
"rkey",
"=",
"schema",
"[",
"'RangeKeyElement'",
"]",
"rkey",
"=",
"\" %s:%s\"",
"%",
"(",
"rkey",
"[",
"'AttributeName'",
"]",
",",
"rkey",
"[",
"'AttributeType'",
"]",
")",
"else",
":",
"rkey",
"=",
"''",
"prov",
"=",
"info",
"[",
"'ProvisionedThroughput'",
"]",
"prov",
"=",
"\"-c %d,%d\"",
"%",
"(",
"prov",
"[",
"'ReadCapacityUnits'",
"]",
",",
"prov",
"[",
"'WriteCapacityUnits'",
"]",
")",
"print",
"\"create %s %s %s%s\"",
"%",
"(",
"name",
",",
"prov",
",",
"hkey",
",",
"rkey",
")",
"else",
":",
"self",
".",
"pprint",
"(",
"desc",
",",
"\"%s: \"",
"%",
"table",
")"
] |
describe [-c] {tablename}...
|
[
"describe",
"[",
"-",
"c",
"]",
"{",
"tablename",
"}",
"..."
] |
python
|
train
|
dropbox/stone
|
stone/backends/python_rsrc/stone_validators.py
|
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/backends/python_rsrc/stone_validators.py#L462-L470
|
def validate_with_permissions(self, val, caller_permissions):
"""
For a val to pass validation, val must be of the correct type and have
all required permissioned fields present. Should only be called
for callers with extra permissions.
"""
self.validate(val)
self.validate_fields_only_with_permissions(val, caller_permissions)
return val
|
[
"def",
"validate_with_permissions",
"(",
"self",
",",
"val",
",",
"caller_permissions",
")",
":",
"self",
".",
"validate",
"(",
"val",
")",
"self",
".",
"validate_fields_only_with_permissions",
"(",
"val",
",",
"caller_permissions",
")",
"return",
"val"
] |
For a val to pass validation, val must be of the correct type and have
all required permissioned fields present. Should only be called
for callers with extra permissions.
|
[
"For",
"a",
"val",
"to",
"pass",
"validation",
"val",
"must",
"be",
"of",
"the",
"correct",
"type",
"and",
"have",
"all",
"required",
"permissioned",
"fields",
"present",
".",
"Should",
"only",
"be",
"called",
"for",
"callers",
"with",
"extra",
"permissions",
"."
] |
python
|
train
|
saltstack/salt
|
salt/modules/boto_apigateway.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_apigateway.py#L656-L681
|
def describe_api_deployments(restApiId, region=None, key=None, keyid=None, profile=None):
'''
Gets information about the defined API Deployments. Return list of api deployments.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.describe_api_deployments restApiId
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
deployments = []
_deployments = conn.get_deployments(restApiId=restApiId)
while True:
if _deployments:
deployments = deployments + _deployments['items']
if 'position' not in _deployments:
break
_deployments = conn.get_deployments(restApiId=restApiId, position=_deployments['position'])
return {'deployments': [_convert_datetime_str(deployment) for deployment in deployments]}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
|
[
"def",
"describe_api_deployments",
"(",
"restApiId",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"try",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"deployments",
"=",
"[",
"]",
"_deployments",
"=",
"conn",
".",
"get_deployments",
"(",
"restApiId",
"=",
"restApiId",
")",
"while",
"True",
":",
"if",
"_deployments",
":",
"deployments",
"=",
"deployments",
"+",
"_deployments",
"[",
"'items'",
"]",
"if",
"'position'",
"not",
"in",
"_deployments",
":",
"break",
"_deployments",
"=",
"conn",
".",
"get_deployments",
"(",
"restApiId",
"=",
"restApiId",
",",
"position",
"=",
"_deployments",
"[",
"'position'",
"]",
")",
"return",
"{",
"'deployments'",
":",
"[",
"_convert_datetime_str",
"(",
"deployment",
")",
"for",
"deployment",
"in",
"deployments",
"]",
"}",
"except",
"ClientError",
"as",
"e",
":",
"return",
"{",
"'error'",
":",
"__utils__",
"[",
"'boto3.get_error'",
"]",
"(",
"e",
")",
"}"
] |
Gets information about the defined API Deployments. Return list of api deployments.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.describe_api_deployments restApiId
|
[
"Gets",
"information",
"about",
"the",
"defined",
"API",
"Deployments",
".",
"Return",
"list",
"of",
"api",
"deployments",
"."
] |
python
|
train
|
schul-cloud/resources-api-v1
|
generators/python_client/schul_cloud_resources_api_v1/schema/__init__.py
|
https://github.com/schul-cloud/resources-api-v1/blob/58b2d7ba13669fa013ef81c0ffcffbf6b3fdb52d/generators/python_client/schul_cloud_resources_api_v1/schema/__init__.py#L57-L65
|
def validate(self, object):
"""Validate an object against the schema.
This function just passes if the schema matches the object.
If the object does not match the schema, a ValidationException is raised.
This error allows debugging.
"""
resolver=self.get_resolver()
jsonschema.validate(object, self.get_schema(), resolver=resolver)
|
[
"def",
"validate",
"(",
"self",
",",
"object",
")",
":",
"resolver",
"=",
"self",
".",
"get_resolver",
"(",
")",
"jsonschema",
".",
"validate",
"(",
"object",
",",
"self",
".",
"get_schema",
"(",
")",
",",
"resolver",
"=",
"resolver",
")"
] |
Validate an object against the schema.
This function just passes if the schema matches the object.
If the object does not match the schema, a ValidationException is raised.
This error allows debugging.
|
[
"Validate",
"an",
"object",
"against",
"the",
"schema",
"."
] |
python
|
test
|
klmitch/framer
|
framer/framers.py
|
https://github.com/klmitch/framer/blob/bd34cee9737793dab61d1d8973930b64bd08acb4/framer/framers.py#L302-L334
|
def to_frame(self, data, state):
"""
Extract a single frame from the data buffer. The consumed
data should be removed from the buffer. If no complete frame
can be read, must raise a ``NoFrames`` exception.
:param data: A ``bytearray`` instance containing the data so
far read.
:param state: An instance of ``FramerState``. If the buffer
contains a partial frame, this object can be
used to store state information to allow the
remainder of the frame to be read.
:returns: A frame. The frame may be any object. The stock
framers always return bytes.
"""
# If we've read all the data, let the caller know
if state.chunk_remaining <= 0:
raise exc.NoFrames()
# OK, how much data do we send on?
data_len = min(state.chunk_remaining, len(data))
# Extract that data from the buffer
frame = six.binary_type(data[:data_len])
del data[:data_len]
# Update the state
state.chunk_remaining -= data_len
# Return the frame
return frame
|
[
"def",
"to_frame",
"(",
"self",
",",
"data",
",",
"state",
")",
":",
"# If we've read all the data, let the caller know",
"if",
"state",
".",
"chunk_remaining",
"<=",
"0",
":",
"raise",
"exc",
".",
"NoFrames",
"(",
")",
"# OK, how much data do we send on?",
"data_len",
"=",
"min",
"(",
"state",
".",
"chunk_remaining",
",",
"len",
"(",
"data",
")",
")",
"# Extract that data from the buffer",
"frame",
"=",
"six",
".",
"binary_type",
"(",
"data",
"[",
":",
"data_len",
"]",
")",
"del",
"data",
"[",
":",
"data_len",
"]",
"# Update the state",
"state",
".",
"chunk_remaining",
"-=",
"data_len",
"# Return the frame",
"return",
"frame"
] |
Extract a single frame from the data buffer. The consumed
data should be removed from the buffer. If no complete frame
can be read, must raise a ``NoFrames`` exception.
:param data: A ``bytearray`` instance containing the data so
far read.
:param state: An instance of ``FramerState``. If the buffer
contains a partial frame, this object can be
used to store state information to allow the
remainder of the frame to be read.
:returns: A frame. The frame may be any object. The stock
framers always return bytes.
|
[
"Extract",
"a",
"single",
"frame",
"from",
"the",
"data",
"buffer",
".",
"The",
"consumed",
"data",
"should",
"be",
"removed",
"from",
"the",
"buffer",
".",
"If",
"no",
"complete",
"frame",
"can",
"be",
"read",
"must",
"raise",
"a",
"NoFrames",
"exception",
"."
] |
python
|
train
|
linkedin/naarad
|
src/naarad/graphing/matplotlib_naarad.py
|
https://github.com/linkedin/naarad/blob/261e2c0760fd6a6b0ee59064180bd8e3674311fe/src/naarad/graphing/matplotlib_naarad.py#L74-L83
|
def highlight_region(plt, start_x, end_x):
"""
Highlight a region on the chart between the specified start and end x-co-ordinates.
param pyplot plt: matplotlibk pyplot which contains the charts to be highlighted
param string start_x : epoch time millis
param string end_x : epoch time millis
"""
start_x = convert_to_mdate(start_x)
end_x = convert_to_mdate(end_x)
plt.axvspan(start_x, end_x, color=CONSTANTS.HIGHLIGHT_COLOR, alpha=CONSTANTS.HIGHLIGHT_ALPHA)
|
[
"def",
"highlight_region",
"(",
"plt",
",",
"start_x",
",",
"end_x",
")",
":",
"start_x",
"=",
"convert_to_mdate",
"(",
"start_x",
")",
"end_x",
"=",
"convert_to_mdate",
"(",
"end_x",
")",
"plt",
".",
"axvspan",
"(",
"start_x",
",",
"end_x",
",",
"color",
"=",
"CONSTANTS",
".",
"HIGHLIGHT_COLOR",
",",
"alpha",
"=",
"CONSTANTS",
".",
"HIGHLIGHT_ALPHA",
")"
] |
Highlight a region on the chart between the specified start and end x-co-ordinates.
param pyplot plt: matplotlibk pyplot which contains the charts to be highlighted
param string start_x : epoch time millis
param string end_x : epoch time millis
|
[
"Highlight",
"a",
"region",
"on",
"the",
"chart",
"between",
"the",
"specified",
"start",
"and",
"end",
"x",
"-",
"co",
"-",
"ordinates",
".",
"param",
"pyplot",
"plt",
":",
"matplotlibk",
"pyplot",
"which",
"contains",
"the",
"charts",
"to",
"be",
"highlighted",
"param",
"string",
"start_x",
":",
"epoch",
"time",
"millis",
"param",
"string",
"end_x",
":",
"epoch",
"time",
"millis"
] |
python
|
valid
|
iotile/coretools
|
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/FortranCommon.py
|
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/FortranCommon.py#L163-L185
|
def add_fortran_to_env(env):
"""Add Builders and construction variables for Fortran to an Environment."""
try:
FortranSuffixes = env['FORTRANFILESUFFIXES']
except KeyError:
FortranSuffixes = ['.f', '.for', '.ftn']
#print("Adding %s to fortran suffixes" % FortranSuffixes)
try:
FortranPPSuffixes = env['FORTRANPPFILESUFFIXES']
except KeyError:
FortranPPSuffixes = ['.fpp', '.FPP']
DialectAddToEnv(env, "FORTRAN", FortranSuffixes,
FortranPPSuffixes, support_module = 1)
env['FORTRANMODPREFIX'] = '' # like $LIBPREFIX
env['FORTRANMODSUFFIX'] = '.mod' # like $LIBSUFFIX
env['FORTRANMODDIR'] = '' # where the compiler should place .mod files
env['FORTRANMODDIRPREFIX'] = '' # some prefix to $FORTRANMODDIR - similar to $INCPREFIX
env['FORTRANMODDIRSUFFIX'] = '' # some suffix to $FORTRANMODDIR - similar to $INCSUFFIX
env['_FORTRANMODFLAG'] = '$( ${_concat(FORTRANMODDIRPREFIX, FORTRANMODDIR, FORTRANMODDIRSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)'
|
[
"def",
"add_fortran_to_env",
"(",
"env",
")",
":",
"try",
":",
"FortranSuffixes",
"=",
"env",
"[",
"'FORTRANFILESUFFIXES'",
"]",
"except",
"KeyError",
":",
"FortranSuffixes",
"=",
"[",
"'.f'",
",",
"'.for'",
",",
"'.ftn'",
"]",
"#print(\"Adding %s to fortran suffixes\" % FortranSuffixes)",
"try",
":",
"FortranPPSuffixes",
"=",
"env",
"[",
"'FORTRANPPFILESUFFIXES'",
"]",
"except",
"KeyError",
":",
"FortranPPSuffixes",
"=",
"[",
"'.fpp'",
",",
"'.FPP'",
"]",
"DialectAddToEnv",
"(",
"env",
",",
"\"FORTRAN\"",
",",
"FortranSuffixes",
",",
"FortranPPSuffixes",
",",
"support_module",
"=",
"1",
")",
"env",
"[",
"'FORTRANMODPREFIX'",
"]",
"=",
"''",
"# like $LIBPREFIX",
"env",
"[",
"'FORTRANMODSUFFIX'",
"]",
"=",
"'.mod'",
"# like $LIBSUFFIX",
"env",
"[",
"'FORTRANMODDIR'",
"]",
"=",
"''",
"# where the compiler should place .mod files",
"env",
"[",
"'FORTRANMODDIRPREFIX'",
"]",
"=",
"''",
"# some prefix to $FORTRANMODDIR - similar to $INCPREFIX",
"env",
"[",
"'FORTRANMODDIRSUFFIX'",
"]",
"=",
"''",
"# some suffix to $FORTRANMODDIR - similar to $INCSUFFIX",
"env",
"[",
"'_FORTRANMODFLAG'",
"]",
"=",
"'$( ${_concat(FORTRANMODDIRPREFIX, FORTRANMODDIR, FORTRANMODDIRSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)'"
] |
Add Builders and construction variables for Fortran to an Environment.
|
[
"Add",
"Builders",
"and",
"construction",
"variables",
"for",
"Fortran",
"to",
"an",
"Environment",
"."
] |
python
|
train
|
tapilab/brandelion
|
brandelion/cli/analyze.py
|
https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L314-L316
|
def _cosine(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / (math.sqrt(len(a)) * math.sqrt(len(b)))
|
[
"def",
"_cosine",
"(",
"a",
",",
"b",
")",
":",
"return",
"1.",
"*",
"len",
"(",
"a",
"&",
"b",
")",
"/",
"(",
"math",
".",
"sqrt",
"(",
"len",
"(",
"a",
")",
")",
"*",
"math",
".",
"sqrt",
"(",
"len",
"(",
"b",
")",
")",
")"
] |
Return the len(a & b) / len(a)
|
[
"Return",
"the",
"len",
"(",
"a",
"&",
"b",
")",
"/",
"len",
"(",
"a",
")"
] |
python
|
train
|
raiden-network/raiden
|
raiden/blockchain/events.py
|
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/blockchain/events.py#L216-L223
|
def poll_blockchain_events(self, block_number: typing.BlockNumber):
""" Poll for new blockchain events up to `block_number`. """
for event_listener in self.event_listeners:
assert isinstance(event_listener.filter, StatelessFilter)
for log_event in event_listener.filter.get_new_entries(block_number):
yield decode_event_to_internal(event_listener.abi, log_event)
|
[
"def",
"poll_blockchain_events",
"(",
"self",
",",
"block_number",
":",
"typing",
".",
"BlockNumber",
")",
":",
"for",
"event_listener",
"in",
"self",
".",
"event_listeners",
":",
"assert",
"isinstance",
"(",
"event_listener",
".",
"filter",
",",
"StatelessFilter",
")",
"for",
"log_event",
"in",
"event_listener",
".",
"filter",
".",
"get_new_entries",
"(",
"block_number",
")",
":",
"yield",
"decode_event_to_internal",
"(",
"event_listener",
".",
"abi",
",",
"log_event",
")"
] |
Poll for new blockchain events up to `block_number`.
|
[
"Poll",
"for",
"new",
"blockchain",
"events",
"up",
"to",
"block_number",
"."
] |
python
|
train
|
SheffieldML/GPy
|
GPy/likelihoods/weibull.py
|
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/likelihoods/weibull.py#L52-L82
|
def logpdf_link(self, link_f, y, Y_metadata=None):
"""
Log Likelihood Function given link(f)
.. math::
\\ln p(y_{i}|\lambda(f_{i})) = \\alpha_{i}\\log \\beta - \\log \\Gamma(\\alpha_{i}) + (\\alpha_{i} - 1)\\log y_{i} - \\beta y_{i}\\\\
\\alpha_{i} = \\beta y_{i}
:param link_f: latent variables (link(f))
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in poisson distribution
:returns: likelihood evaluated for this point
:rtype: float
"""
# alpha = self.gp_link.transf(gp)*self.beta sum(log(a) + (a-1).*log(y)- f - exp(-f).*y.^a)
# return (1. - alpha)*np.log(obs) + self.beta*obs - alpha * np.log(self.beta) + np.log(special.gamma(alpha))
assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape
c = np.zeros_like(y)
if Y_metadata is not None and 'censored' in Y_metadata.keys():
c = Y_metadata['censored']
# uncensored = (1-c)* (np.log(self.r) + (self.r - 1) * np.log(y) - link_f - (np.exp(-link_f) * (y ** self.r)))
# censored = (-c)*np.exp(-link_f)*(y**self.r)
uncensored = (1-c)*( np.log(self.r)-np.log(link_f)+(self.r-1)*np.log(y) - y**self.r/link_f)
censored = -c*y**self.r/link_f
log_objective = uncensored + censored
return log_objective
|
[
"def",
"logpdf_link",
"(",
"self",
",",
"link_f",
",",
"y",
",",
"Y_metadata",
"=",
"None",
")",
":",
"# alpha = self.gp_link.transf(gp)*self.beta sum(log(a) + (a-1).*log(y)- f - exp(-f).*y.^a)",
"# return (1. - alpha)*np.log(obs) + self.beta*obs - alpha * np.log(self.beta) + np.log(special.gamma(alpha))",
"assert",
"np",
".",
"atleast_1d",
"(",
"link_f",
")",
".",
"shape",
"==",
"np",
".",
"atleast_1d",
"(",
"y",
")",
".",
"shape",
"c",
"=",
"np",
".",
"zeros_like",
"(",
"y",
")",
"if",
"Y_metadata",
"is",
"not",
"None",
"and",
"'censored'",
"in",
"Y_metadata",
".",
"keys",
"(",
")",
":",
"c",
"=",
"Y_metadata",
"[",
"'censored'",
"]",
"# uncensored = (1-c)* (np.log(self.r) + (self.r - 1) * np.log(y) - link_f - (np.exp(-link_f) * (y ** self.r)))",
"# censored = (-c)*np.exp(-link_f)*(y**self.r)",
"uncensored",
"=",
"(",
"1",
"-",
"c",
")",
"*",
"(",
"np",
".",
"log",
"(",
"self",
".",
"r",
")",
"-",
"np",
".",
"log",
"(",
"link_f",
")",
"+",
"(",
"self",
".",
"r",
"-",
"1",
")",
"*",
"np",
".",
"log",
"(",
"y",
")",
"-",
"y",
"**",
"self",
".",
"r",
"/",
"link_f",
")",
"censored",
"=",
"-",
"c",
"*",
"y",
"**",
"self",
".",
"r",
"/",
"link_f",
"log_objective",
"=",
"uncensored",
"+",
"censored",
"return",
"log_objective"
] |
Log Likelihood Function given link(f)
.. math::
\\ln p(y_{i}|\lambda(f_{i})) = \\alpha_{i}\\log \\beta - \\log \\Gamma(\\alpha_{i}) + (\\alpha_{i} - 1)\\log y_{i} - \\beta y_{i}\\\\
\\alpha_{i} = \\beta y_{i}
:param link_f: latent variables (link(f))
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in poisson distribution
:returns: likelihood evaluated for this point
:rtype: float
|
[
"Log",
"Likelihood",
"Function",
"given",
"link",
"(",
"f",
")"
] |
python
|
train
|
dpgaspar/Flask-AppBuilder
|
flask_appbuilder/api/__init__.py
|
https://github.com/dpgaspar/Flask-AppBuilder/blob/c293734c1b86e176a3ba57ee2deab6676d125576/flask_appbuilder/api/__init__.py#L414-L432
|
def operation_helper(
self, path=None, operations=None, methods=None, func=None, **kwargs
):
"""May mutate operations.
:param str path: Path to the resource
:param dict operations: A `dict` mapping HTTP methods to operation object. See
:param list methods: A list of methods registered for this path
"""
for method in methods:
yaml_doc_string = yaml_utils.load_operations_from_docstring(func.__doc__)
yaml_doc_string = yaml.safe_load(
str(yaml_doc_string).replace(
"{{self.__class__.__name__}}", self.__class__.__name__
)
)
if yaml_doc_string:
operations[method.lower()] = yaml_doc_string.get(method.lower(), {})
else:
operations[method.lower()] = {}
|
[
"def",
"operation_helper",
"(",
"self",
",",
"path",
"=",
"None",
",",
"operations",
"=",
"None",
",",
"methods",
"=",
"None",
",",
"func",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"method",
"in",
"methods",
":",
"yaml_doc_string",
"=",
"yaml_utils",
".",
"load_operations_from_docstring",
"(",
"func",
".",
"__doc__",
")",
"yaml_doc_string",
"=",
"yaml",
".",
"safe_load",
"(",
"str",
"(",
"yaml_doc_string",
")",
".",
"replace",
"(",
"\"{{self.__class__.__name__}}\"",
",",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"if",
"yaml_doc_string",
":",
"operations",
"[",
"method",
".",
"lower",
"(",
")",
"]",
"=",
"yaml_doc_string",
".",
"get",
"(",
"method",
".",
"lower",
"(",
")",
",",
"{",
"}",
")",
"else",
":",
"operations",
"[",
"method",
".",
"lower",
"(",
")",
"]",
"=",
"{",
"}"
] |
May mutate operations.
:param str path: Path to the resource
:param dict operations: A `dict` mapping HTTP methods to operation object. See
:param list methods: A list of methods registered for this path
|
[
"May",
"mutate",
"operations",
".",
":",
"param",
"str",
"path",
":",
"Path",
"to",
"the",
"resource",
":",
"param",
"dict",
"operations",
":",
"A",
"dict",
"mapping",
"HTTP",
"methods",
"to",
"operation",
"object",
".",
"See",
":",
"param",
"list",
"methods",
":",
"A",
"list",
"of",
"methods",
"registered",
"for",
"this",
"path"
] |
python
|
train
|
jonathf/chaospy
|
chaospy/distributions/approximation.py
|
https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/distributions/approximation.py#L229-L315
|
def approximate_moment(
dist,
K,
retall=False,
control_var=None,
rule="F",
order=1000,
**kws
):
"""
Approximation method for estimation of raw statistical moments.
Args:
dist : Dist
Distribution domain with dim=len(dist)
K : numpy.ndarray
The exponents of the moments of interest with shape (dim,K).
control_var : Dist
If provided will be used as a control variable to try to reduce
the error.
acc (:py:data:typing.Optional[int]):
The order of quadrature/MCI
sparse : bool
If True used Smolyak's sparse grid instead of normal tensor
product grid in numerical integration.
rule : str
Quadrature rule
Key Description
---- -----------
"G" Optiomal Gaussian quadrature from Golub-Welsch
Slow for high order and composit is ignored.
"E" Gauss-Legendre quadrature
"C" Clenshaw-Curtis quadrature. Exponential growth rule is
used when sparse is True to make the rule nested.
Monte Carlo Integration
Key Description
---- -----------
"H" Halton sequence
"K" Korobov set
"L" Latin hypercube sampling
"M" Hammersley sequence
"R" (Pseudo-)Random sampling
"S" Sobol sequence
composite (:py:data:typing.Optional[int, numpy.ndarray]):
If provided, composite quadrature will be used.
Ignored in the case if gaussian=True.
If int provided, determines number of even domain splits
If array of ints, determines number of even domain splits along
each axis
If array of arrays/floats, determines location of splits
antithetic (:py:data:typing.Optional[numpy.ndarray]):
List of bool. Represents the axes to mirror using antithetic
variable during MCI.
"""
dim = len(dist)
shape = K.shape
size = int(K.size/dim)
K = K.reshape(dim, size)
if dim > 1:
shape = shape[1:]
X, W = quad.generate_quadrature(order, dist, rule=rule, normalize=True, **kws)
grid = numpy.mgrid[:len(X[0]), :size]
X = X.T[grid[0]].T
K = K.T[grid[1]].T
out = numpy.prod(X**K, 0)*W
if control_var is not None:
Y = control_var.ppf(dist.fwd(X))
mu = control_var.mom(numpy.eye(len(control_var)))
if (mu.size == 1) and (dim > 1):
mu = mu.repeat(dim)
for d in range(dim):
alpha = numpy.cov(out, Y[d])[0, 1]/numpy.var(Y[d])
out -= alpha*(Y[d]-mu)
out = numpy.sum(out, -1)
return out
|
[
"def",
"approximate_moment",
"(",
"dist",
",",
"K",
",",
"retall",
"=",
"False",
",",
"control_var",
"=",
"None",
",",
"rule",
"=",
"\"F\"",
",",
"order",
"=",
"1000",
",",
"*",
"*",
"kws",
")",
":",
"dim",
"=",
"len",
"(",
"dist",
")",
"shape",
"=",
"K",
".",
"shape",
"size",
"=",
"int",
"(",
"K",
".",
"size",
"/",
"dim",
")",
"K",
"=",
"K",
".",
"reshape",
"(",
"dim",
",",
"size",
")",
"if",
"dim",
">",
"1",
":",
"shape",
"=",
"shape",
"[",
"1",
":",
"]",
"X",
",",
"W",
"=",
"quad",
".",
"generate_quadrature",
"(",
"order",
",",
"dist",
",",
"rule",
"=",
"rule",
",",
"normalize",
"=",
"True",
",",
"*",
"*",
"kws",
")",
"grid",
"=",
"numpy",
".",
"mgrid",
"[",
":",
"len",
"(",
"X",
"[",
"0",
"]",
")",
",",
":",
"size",
"]",
"X",
"=",
"X",
".",
"T",
"[",
"grid",
"[",
"0",
"]",
"]",
".",
"T",
"K",
"=",
"K",
".",
"T",
"[",
"grid",
"[",
"1",
"]",
"]",
".",
"T",
"out",
"=",
"numpy",
".",
"prod",
"(",
"X",
"**",
"K",
",",
"0",
")",
"*",
"W",
"if",
"control_var",
"is",
"not",
"None",
":",
"Y",
"=",
"control_var",
".",
"ppf",
"(",
"dist",
".",
"fwd",
"(",
"X",
")",
")",
"mu",
"=",
"control_var",
".",
"mom",
"(",
"numpy",
".",
"eye",
"(",
"len",
"(",
"control_var",
")",
")",
")",
"if",
"(",
"mu",
".",
"size",
"==",
"1",
")",
"and",
"(",
"dim",
">",
"1",
")",
":",
"mu",
"=",
"mu",
".",
"repeat",
"(",
"dim",
")",
"for",
"d",
"in",
"range",
"(",
"dim",
")",
":",
"alpha",
"=",
"numpy",
".",
"cov",
"(",
"out",
",",
"Y",
"[",
"d",
"]",
")",
"[",
"0",
",",
"1",
"]",
"/",
"numpy",
".",
"var",
"(",
"Y",
"[",
"d",
"]",
")",
"out",
"-=",
"alpha",
"*",
"(",
"Y",
"[",
"d",
"]",
"-",
"mu",
")",
"out",
"=",
"numpy",
".",
"sum",
"(",
"out",
",",
"-",
"1",
")",
"return",
"out"
] |
Approximation method for estimation of raw statistical moments.
Args:
dist : Dist
Distribution domain with dim=len(dist)
K : numpy.ndarray
The exponents of the moments of interest with shape (dim,K).
control_var : Dist
If provided will be used as a control variable to try to reduce
the error.
acc (:py:data:typing.Optional[int]):
The order of quadrature/MCI
sparse : bool
If True used Smolyak's sparse grid instead of normal tensor
product grid in numerical integration.
rule : str
Quadrature rule
Key Description
---- -----------
"G" Optiomal Gaussian quadrature from Golub-Welsch
Slow for high order and composit is ignored.
"E" Gauss-Legendre quadrature
"C" Clenshaw-Curtis quadrature. Exponential growth rule is
used when sparse is True to make the rule nested.
Monte Carlo Integration
Key Description
---- -----------
"H" Halton sequence
"K" Korobov set
"L" Latin hypercube sampling
"M" Hammersley sequence
"R" (Pseudo-)Random sampling
"S" Sobol sequence
composite (:py:data:typing.Optional[int, numpy.ndarray]):
If provided, composite quadrature will be used.
Ignored in the case if gaussian=True.
If int provided, determines number of even domain splits
If array of ints, determines number of even domain splits along
each axis
If array of arrays/floats, determines location of splits
antithetic (:py:data:typing.Optional[numpy.ndarray]):
List of bool. Represents the axes to mirror using antithetic
variable during MCI.
|
[
"Approximation",
"method",
"for",
"estimation",
"of",
"raw",
"statistical",
"moments",
"."
] |
python
|
train
|
sethmlarson/virtualbox-python
|
virtualbox/library.py
|
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L25006-L25042
|
def get_visible_region(self, rectangles, count):
"""Returns the visible region of this frame buffer.
If the @a rectangles parameter is @c null then the value of the
@a count parameter is ignored and the number of elements necessary to
describe the current visible region is returned in @a countCopied.
If @a rectangles is not @c null but @a count is less
than the required number of elements to store region data, the method
will report a failure. If @a count is equal or greater than the
required number of elements, then the actual number of elements copied
to the provided array will be returned in @a countCopied.
The address of the provided array must be in the process space of
this IFramebuffer object.
Method not yet implemented.
in rectangles of type str
Pointer to the @c RTRECT array to receive region data.
in count of type int
Number of @c RTRECT elements in the @a rectangles array.
return count_copied of type int
Number of elements copied to the @a rectangles array.
"""
if not isinstance(rectangles, basestring):
raise TypeError("rectangles can only be an instance of type basestring")
if not isinstance(count, baseinteger):
raise TypeError("count can only be an instance of type baseinteger")
count_copied = self._call("getVisibleRegion",
in_p=[rectangles, count])
return count_copied
|
[
"def",
"get_visible_region",
"(",
"self",
",",
"rectangles",
",",
"count",
")",
":",
"if",
"not",
"isinstance",
"(",
"rectangles",
",",
"basestring",
")",
":",
"raise",
"TypeError",
"(",
"\"rectangles can only be an instance of type basestring\"",
")",
"if",
"not",
"isinstance",
"(",
"count",
",",
"baseinteger",
")",
":",
"raise",
"TypeError",
"(",
"\"count can only be an instance of type baseinteger\"",
")",
"count_copied",
"=",
"self",
".",
"_call",
"(",
"\"getVisibleRegion\"",
",",
"in_p",
"=",
"[",
"rectangles",
",",
"count",
"]",
")",
"return",
"count_copied"
] |
Returns the visible region of this frame buffer.
If the @a rectangles parameter is @c null then the value of the
@a count parameter is ignored and the number of elements necessary to
describe the current visible region is returned in @a countCopied.
If @a rectangles is not @c null but @a count is less
than the required number of elements to store region data, the method
will report a failure. If @a count is equal or greater than the
required number of elements, then the actual number of elements copied
to the provided array will be returned in @a countCopied.
The address of the provided array must be in the process space of
this IFramebuffer object.
Method not yet implemented.
in rectangles of type str
Pointer to the @c RTRECT array to receive region data.
in count of type int
Number of @c RTRECT elements in the @a rectangles array.
return count_copied of type int
Number of elements copied to the @a rectangles array.
|
[
"Returns",
"the",
"visible",
"region",
"of",
"this",
"frame",
"buffer",
".",
"If",
"the",
"@a",
"rectangles",
"parameter",
"is",
"@c",
"null",
"then",
"the",
"value",
"of",
"the",
"@a",
"count",
"parameter",
"is",
"ignored",
"and",
"the",
"number",
"of",
"elements",
"necessary",
"to",
"describe",
"the",
"current",
"visible",
"region",
"is",
"returned",
"in",
"@a",
"countCopied",
".",
"If",
"@a",
"rectangles",
"is",
"not",
"@c",
"null",
"but",
"@a",
"count",
"is",
"less",
"than",
"the",
"required",
"number",
"of",
"elements",
"to",
"store",
"region",
"data",
"the",
"method",
"will",
"report",
"a",
"failure",
".",
"If",
"@a",
"count",
"is",
"equal",
"or",
"greater",
"than",
"the",
"required",
"number",
"of",
"elements",
"then",
"the",
"actual",
"number",
"of",
"elements",
"copied",
"to",
"the",
"provided",
"array",
"will",
"be",
"returned",
"in",
"@a",
"countCopied",
".",
"The",
"address",
"of",
"the",
"provided",
"array",
"must",
"be",
"in",
"the",
"process",
"space",
"of",
"this",
"IFramebuffer",
"object",
".",
"Method",
"not",
"yet",
"implemented",
"."
] |
python
|
train
|
bwohlberg/sporco
|
sporco/cnvrep.py
|
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/cnvrep.py#L541-L588
|
def mskWshape(W, cri):
"""Get appropriate internal shape (see
:class:`CSC_ConvRepIndexing` and :class:`CDU_ConvRepIndexing`) for
data fidelity term mask array `W`. The external shape of `W`
depends on the external shape of input data array `S`. The
simplest criterion for ensuring that the external `W` is
compatible with `S` is to ensure that `W` has the same shape as
`S`, except that non-singleton dimensions in `S` may be singleton
dimensions in `W`. If `W` has a single non-spatial axis, it is
assigned as a channel or multi-signal axis depending on the
corresponding assignement in `S`.
Parameters
----------
W : array_like
Data fidelity term weight/mask array
cri : :class:`CSC_ConvRepIndexing` object or :class:`CDU_ConvRepIndexing`\
object
Object specifying convolutional representation dimensions
Returns
-------
shp : tuple
Appropriate internal mask array shape
"""
# Number of axes in W available for C and/or K axes
ckdim = W.ndim - cri.dimN
if ckdim >= 2:
# Both C and K axes are present in W
shpW = W.shape + (1,) if ckdim == 2 else W.shape
elif ckdim == 1:
# Exactly one of C or K axes is present in W
if cri.C == 1 and cri.K > 1:
# Input S has a single channel and multiple signals
shpW = W.shape[0:cri.dimN] + (1, W.shape[cri.dimN]) + (1,)
elif cri.C > 1 and cri.K == 1:
# Input S has multiple channels and a single signal
shpW = W.shape[0:cri.dimN] + (W.shape[cri.dimN], 1) + (1,)
else:
# Input S has multiple channels and signals: resolve ambiguity
# by taking extra axis in W as a channel axis
shpW = W.shape[0:cri.dimN] + (W.shape[cri.dimN], 1) + (1,)
else:
# Neither C nor K axis is present in W
shpW = W.shape + (1,) * (3 - ckdim)
return shpW
|
[
"def",
"mskWshape",
"(",
"W",
",",
"cri",
")",
":",
"# Number of axes in W available for C and/or K axes",
"ckdim",
"=",
"W",
".",
"ndim",
"-",
"cri",
".",
"dimN",
"if",
"ckdim",
">=",
"2",
":",
"# Both C and K axes are present in W",
"shpW",
"=",
"W",
".",
"shape",
"+",
"(",
"1",
",",
")",
"if",
"ckdim",
"==",
"2",
"else",
"W",
".",
"shape",
"elif",
"ckdim",
"==",
"1",
":",
"# Exactly one of C or K axes is present in W",
"if",
"cri",
".",
"C",
"==",
"1",
"and",
"cri",
".",
"K",
">",
"1",
":",
"# Input S has a single channel and multiple signals",
"shpW",
"=",
"W",
".",
"shape",
"[",
"0",
":",
"cri",
".",
"dimN",
"]",
"+",
"(",
"1",
",",
"W",
".",
"shape",
"[",
"cri",
".",
"dimN",
"]",
")",
"+",
"(",
"1",
",",
")",
"elif",
"cri",
".",
"C",
">",
"1",
"and",
"cri",
".",
"K",
"==",
"1",
":",
"# Input S has multiple channels and a single signal",
"shpW",
"=",
"W",
".",
"shape",
"[",
"0",
":",
"cri",
".",
"dimN",
"]",
"+",
"(",
"W",
".",
"shape",
"[",
"cri",
".",
"dimN",
"]",
",",
"1",
")",
"+",
"(",
"1",
",",
")",
"else",
":",
"# Input S has multiple channels and signals: resolve ambiguity",
"# by taking extra axis in W as a channel axis",
"shpW",
"=",
"W",
".",
"shape",
"[",
"0",
":",
"cri",
".",
"dimN",
"]",
"+",
"(",
"W",
".",
"shape",
"[",
"cri",
".",
"dimN",
"]",
",",
"1",
")",
"+",
"(",
"1",
",",
")",
"else",
":",
"# Neither C nor K axis is present in W",
"shpW",
"=",
"W",
".",
"shape",
"+",
"(",
"1",
",",
")",
"*",
"(",
"3",
"-",
"ckdim",
")",
"return",
"shpW"
] |
Get appropriate internal shape (see
:class:`CSC_ConvRepIndexing` and :class:`CDU_ConvRepIndexing`) for
data fidelity term mask array `W`. The external shape of `W`
depends on the external shape of input data array `S`. The
simplest criterion for ensuring that the external `W` is
compatible with `S` is to ensure that `W` has the same shape as
`S`, except that non-singleton dimensions in `S` may be singleton
dimensions in `W`. If `W` has a single non-spatial axis, it is
assigned as a channel or multi-signal axis depending on the
corresponding assignement in `S`.
Parameters
----------
W : array_like
Data fidelity term weight/mask array
cri : :class:`CSC_ConvRepIndexing` object or :class:`CDU_ConvRepIndexing`\
object
Object specifying convolutional representation dimensions
Returns
-------
shp : tuple
Appropriate internal mask array shape
|
[
"Get",
"appropriate",
"internal",
"shape",
"(",
"see",
":",
"class",
":",
"CSC_ConvRepIndexing",
"and",
":",
"class",
":",
"CDU_ConvRepIndexing",
")",
"for",
"data",
"fidelity",
"term",
"mask",
"array",
"W",
".",
"The",
"external",
"shape",
"of",
"W",
"depends",
"on",
"the",
"external",
"shape",
"of",
"input",
"data",
"array",
"S",
".",
"The",
"simplest",
"criterion",
"for",
"ensuring",
"that",
"the",
"external",
"W",
"is",
"compatible",
"with",
"S",
"is",
"to",
"ensure",
"that",
"W",
"has",
"the",
"same",
"shape",
"as",
"S",
"except",
"that",
"non",
"-",
"singleton",
"dimensions",
"in",
"S",
"may",
"be",
"singleton",
"dimensions",
"in",
"W",
".",
"If",
"W",
"has",
"a",
"single",
"non",
"-",
"spatial",
"axis",
"it",
"is",
"assigned",
"as",
"a",
"channel",
"or",
"multi",
"-",
"signal",
"axis",
"depending",
"on",
"the",
"corresponding",
"assignement",
"in",
"S",
"."
] |
python
|
train
|
BD2KGenomics/toil-scripts
|
src/toil_scripts/gatk_germline/germline.py
|
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/gatk_germline/germline.py#L135-L233
|
def gatk_germline_pipeline(job, samples, config):
"""
Runs the GATK best practices pipeline for germline SNP and INDEL discovery.
Steps in Pipeline
0: Generate and preprocess BAM
- Uploads processed BAM to output directory
1: Call Variants using HaplotypeCaller
- Uploads GVCF
2: Genotype VCF
- Uploads VCF
3: Filter Variants using either "hard filters" or VQSR
- Uploads filtered VCF
:param JobFunctionWrappingJob job: passed automatically by Toil
:param list[GermlineSample] samples: List of GermlineSample namedtuples
:param Namespace config: Input parameters and reference FileStoreIDs
Requires the following config attributes:
config.genome_fasta FilesStoreID for reference genome fasta file
config.genome_fai FilesStoreID for reference genome fasta index file
config.genome_dict FilesStoreID for reference genome sequence dictionary file
config.cores Number of cores for each job
config.xmx Java heap size in bytes
config.suffix Suffix added to output filename
config.output_dir URL or local path to output directory
config.ssec Path to key file for SSE-C encryption
config.joint_genotype If True, then joint genotype and filter cohort
config.hc_output URL or local path to HaplotypeCaller output for testing
:return: Dictionary of filtered VCF FileStoreIDs
:rtype: dict
"""
require(len(samples) > 0, 'No samples were provided!')
# Get total size of genome reference files. This is used for configuring disk size.
genome_ref_size = config.genome_fasta.size + config.genome_fai.size + config.genome_dict.size
# 0: Generate processed BAM and BAI files for each sample
# group preprocessing and variant calling steps in empty Job instance
group_bam_jobs = Job()
gvcfs = {}
for sample in samples:
# 0: Generate processed BAM and BAI files for each sample
get_bam = group_bam_jobs.addChildJobFn(prepare_bam,
sample.uuid,
sample.url,
config,
paired_url=sample.paired_url,
rg_line=sample.rg_line)
# 1: Generate per sample gvcfs {uuid: gvcf_id}
# The HaplotypeCaller disk requirement depends on the input bam, bai, the genome reference
# files, and the output GVCF file. The output GVCF is smaller than the input BAM file.
hc_disk = PromisedRequirement(lambda bam, bai, ref_size:
2 * bam.size + bai.size + ref_size,
get_bam.rv(0),
get_bam.rv(1),
genome_ref_size)
get_gvcf = get_bam.addFollowOnJobFn(gatk_haplotype_caller,
get_bam.rv(0),
get_bam.rv(1),
config.genome_fasta, config.genome_fai, config.genome_dict,
annotations=config.annotations,
cores=config.cores,
disk=hc_disk,
memory=config.xmx,
hc_output=config.hc_output)
# Store cohort GVCFs in dictionary
gvcfs[sample.uuid] = get_gvcf.rv()
# Upload individual sample GVCF before genotyping to a sample specific output directory
vqsr_name = '{}{}.g.vcf'.format(sample.uuid, config.suffix)
get_gvcf.addChildJobFn(output_file_job,
vqsr_name,
get_gvcf.rv(),
os.path.join(config.output_dir, sample.uuid),
s3_key_path=config.ssec,
disk=PromisedRequirement(lambda x: x.size, get_gvcf.rv()))
# VQSR requires many variants in order to train a decent model. GATK recommends a minimum of
# 30 exomes or one large WGS sample:
# https://software.broadinstitute.org/gatk/documentation/article?id=3225
filtered_vcfs = {}
if config.joint_genotype:
# Need to configure joint genotype in a separate function to resolve promises
filtered_vcfs = group_bam_jobs.addFollowOnJobFn(joint_genotype_and_filter,
gvcfs,
config).rv()
# If not joint genotyping, then iterate over cohort and genotype and filter individually.
else:
for uuid, gvcf_id in gvcfs.iteritems():
filtered_vcfs[uuid] = group_bam_jobs.addFollowOnJobFn(genotype_and_filter,
{uuid: gvcf_id},
config).rv()
job.addChild(group_bam_jobs)
return filtered_vcfs
|
[
"def",
"gatk_germline_pipeline",
"(",
"job",
",",
"samples",
",",
"config",
")",
":",
"require",
"(",
"len",
"(",
"samples",
")",
">",
"0",
",",
"'No samples were provided!'",
")",
"# Get total size of genome reference files. This is used for configuring disk size.",
"genome_ref_size",
"=",
"config",
".",
"genome_fasta",
".",
"size",
"+",
"config",
".",
"genome_fai",
".",
"size",
"+",
"config",
".",
"genome_dict",
".",
"size",
"# 0: Generate processed BAM and BAI files for each sample",
"# group preprocessing and variant calling steps in empty Job instance",
"group_bam_jobs",
"=",
"Job",
"(",
")",
"gvcfs",
"=",
"{",
"}",
"for",
"sample",
"in",
"samples",
":",
"# 0: Generate processed BAM and BAI files for each sample",
"get_bam",
"=",
"group_bam_jobs",
".",
"addChildJobFn",
"(",
"prepare_bam",
",",
"sample",
".",
"uuid",
",",
"sample",
".",
"url",
",",
"config",
",",
"paired_url",
"=",
"sample",
".",
"paired_url",
",",
"rg_line",
"=",
"sample",
".",
"rg_line",
")",
"# 1: Generate per sample gvcfs {uuid: gvcf_id}",
"# The HaplotypeCaller disk requirement depends on the input bam, bai, the genome reference",
"# files, and the output GVCF file. The output GVCF is smaller than the input BAM file.",
"hc_disk",
"=",
"PromisedRequirement",
"(",
"lambda",
"bam",
",",
"bai",
",",
"ref_size",
":",
"2",
"*",
"bam",
".",
"size",
"+",
"bai",
".",
"size",
"+",
"ref_size",
",",
"get_bam",
".",
"rv",
"(",
"0",
")",
",",
"get_bam",
".",
"rv",
"(",
"1",
")",
",",
"genome_ref_size",
")",
"get_gvcf",
"=",
"get_bam",
".",
"addFollowOnJobFn",
"(",
"gatk_haplotype_caller",
",",
"get_bam",
".",
"rv",
"(",
"0",
")",
",",
"get_bam",
".",
"rv",
"(",
"1",
")",
",",
"config",
".",
"genome_fasta",
",",
"config",
".",
"genome_fai",
",",
"config",
".",
"genome_dict",
",",
"annotations",
"=",
"config",
".",
"annotations",
",",
"cores",
"=",
"config",
".",
"cores",
",",
"disk",
"=",
"hc_disk",
",",
"memory",
"=",
"config",
".",
"xmx",
",",
"hc_output",
"=",
"config",
".",
"hc_output",
")",
"# Store cohort GVCFs in dictionary",
"gvcfs",
"[",
"sample",
".",
"uuid",
"]",
"=",
"get_gvcf",
".",
"rv",
"(",
")",
"# Upload individual sample GVCF before genotyping to a sample specific output directory",
"vqsr_name",
"=",
"'{}{}.g.vcf'",
".",
"format",
"(",
"sample",
".",
"uuid",
",",
"config",
".",
"suffix",
")",
"get_gvcf",
".",
"addChildJobFn",
"(",
"output_file_job",
",",
"vqsr_name",
",",
"get_gvcf",
".",
"rv",
"(",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"config",
".",
"output_dir",
",",
"sample",
".",
"uuid",
")",
",",
"s3_key_path",
"=",
"config",
".",
"ssec",
",",
"disk",
"=",
"PromisedRequirement",
"(",
"lambda",
"x",
":",
"x",
".",
"size",
",",
"get_gvcf",
".",
"rv",
"(",
")",
")",
")",
"# VQSR requires many variants in order to train a decent model. GATK recommends a minimum of",
"# 30 exomes or one large WGS sample:",
"# https://software.broadinstitute.org/gatk/documentation/article?id=3225",
"filtered_vcfs",
"=",
"{",
"}",
"if",
"config",
".",
"joint_genotype",
":",
"# Need to configure joint genotype in a separate function to resolve promises",
"filtered_vcfs",
"=",
"group_bam_jobs",
".",
"addFollowOnJobFn",
"(",
"joint_genotype_and_filter",
",",
"gvcfs",
",",
"config",
")",
".",
"rv",
"(",
")",
"# If not joint genotyping, then iterate over cohort and genotype and filter individually.",
"else",
":",
"for",
"uuid",
",",
"gvcf_id",
"in",
"gvcfs",
".",
"iteritems",
"(",
")",
":",
"filtered_vcfs",
"[",
"uuid",
"]",
"=",
"group_bam_jobs",
".",
"addFollowOnJobFn",
"(",
"genotype_and_filter",
",",
"{",
"uuid",
":",
"gvcf_id",
"}",
",",
"config",
")",
".",
"rv",
"(",
")",
"job",
".",
"addChild",
"(",
"group_bam_jobs",
")",
"return",
"filtered_vcfs"
] |
Runs the GATK best practices pipeline for germline SNP and INDEL discovery.
Steps in Pipeline
0: Generate and preprocess BAM
- Uploads processed BAM to output directory
1: Call Variants using HaplotypeCaller
- Uploads GVCF
2: Genotype VCF
- Uploads VCF
3: Filter Variants using either "hard filters" or VQSR
- Uploads filtered VCF
:param JobFunctionWrappingJob job: passed automatically by Toil
:param list[GermlineSample] samples: List of GermlineSample namedtuples
:param Namespace config: Input parameters and reference FileStoreIDs
Requires the following config attributes:
config.genome_fasta FilesStoreID for reference genome fasta file
config.genome_fai FilesStoreID for reference genome fasta index file
config.genome_dict FilesStoreID for reference genome sequence dictionary file
config.cores Number of cores for each job
config.xmx Java heap size in bytes
config.suffix Suffix added to output filename
config.output_dir URL or local path to output directory
config.ssec Path to key file for SSE-C encryption
config.joint_genotype If True, then joint genotype and filter cohort
config.hc_output URL or local path to HaplotypeCaller output for testing
:return: Dictionary of filtered VCF FileStoreIDs
:rtype: dict
|
[
"Runs",
"the",
"GATK",
"best",
"practices",
"pipeline",
"for",
"germline",
"SNP",
"and",
"INDEL",
"discovery",
"."
] |
python
|
train
|
jbittel/django-mama-cas
|
mama_cas/services/__init__.py
|
https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/services/__init__.py#L82-L91
|
def get_logout_url(service):
"""Get the configured logout URL for a given service identifier, if any."""
for backend in _get_backends():
try:
return backend.get_logout_url(service)
except AttributeError:
raise NotImplementedError("%s.%s.get_logout_url() not implemented" % (
backend.__class__.__module__, backend.__class__.__name__)
)
return None
|
[
"def",
"get_logout_url",
"(",
"service",
")",
":",
"for",
"backend",
"in",
"_get_backends",
"(",
")",
":",
"try",
":",
"return",
"backend",
".",
"get_logout_url",
"(",
"service",
")",
"except",
"AttributeError",
":",
"raise",
"NotImplementedError",
"(",
"\"%s.%s.get_logout_url() not implemented\"",
"%",
"(",
"backend",
".",
"__class__",
".",
"__module__",
",",
"backend",
".",
"__class__",
".",
"__name__",
")",
")",
"return",
"None"
] |
Get the configured logout URL for a given service identifier, if any.
|
[
"Get",
"the",
"configured",
"logout",
"URL",
"for",
"a",
"given",
"service",
"identifier",
"if",
"any",
"."
] |
python
|
train
|
openstack/horizon
|
openstack_auth/policy.py
|
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_auth/policy.py#L86-L186
|
def check(actions, request, target=None):
"""Check user permission.
Check if the user has permission to the action according
to policy setting.
:param actions: list of scope and action to do policy checks on,
the composition of which is (scope, action). Multiple actions
are treated as a logical AND.
* scope: service type managing the policy for action
* action: string representing the action to be checked
this should be colon separated for clarity.
i.e.
| compute:create_instance
| compute:attach_volume
| volume:attach_volume
for a policy action that requires a single action, actions
should look like
| "(("compute", "compute:create_instance"),)"
for a multiple action check, actions should look like
| "(("identity", "identity:list_users"),
| ("identity", "identity:list_roles"))"
:param request: django http request object. If not specified, credentials
must be passed.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary
representing the location of the object e.g.
{'project_id': object.project_id}
:returns: boolean if the user has permission or not for the actions.
"""
if target is None:
target = {}
user = auth_utils.get_user(request)
# Several service policy engines default to a project id check for
# ownership. Since the user is already scoped to a project, if a
# different project id has not been specified use the currently scoped
# project's id.
#
# The reason is the operator can edit the local copies of the service
# policy file. If a rule is removed, then the default rule is used. We
# don't want to block all actions because the operator did not fully
# understand the implication of editing the policy file. Additionally,
# the service APIs will correct us if we are too permissive.
if target.get('project_id') is None:
target['project_id'] = user.project_id
if target.get('tenant_id') is None:
target['tenant_id'] = target['project_id']
# same for user_id
if target.get('user_id') is None:
target['user_id'] = user.id
domain_id_keys = [
'domain_id',
'project.domain_id',
'user.domain_id',
'group.domain_id'
]
# populates domain id keys with user's current domain id
for key in domain_id_keys:
if target.get(key) is None:
target[key] = user.user_domain_id
credentials = _user_to_credentials(user)
domain_credentials = _domain_to_credentials(request, user)
# if there is a domain token use the domain_id instead of the user's domain
if domain_credentials:
credentials['domain_id'] = domain_credentials.get('domain_id')
enforcer = _get_enforcer()
for action in actions:
scope, action = action[0], action[1]
if scope in enforcer:
# this is for handling the v3 policy file and will only be
# needed when a domain scoped token is present
if scope == 'identity' and domain_credentials:
# use domain credentials
if not _check_credentials(enforcer[scope],
action,
target,
domain_credentials):
return False
# use project credentials
if not _check_credentials(enforcer[scope],
action, target, credentials):
return False
# if no policy for scope, allow action, underlying API will
# ultimately block the action if not permitted, treat as though
# allowed
return True
|
[
"def",
"check",
"(",
"actions",
",",
"request",
",",
"target",
"=",
"None",
")",
":",
"if",
"target",
"is",
"None",
":",
"target",
"=",
"{",
"}",
"user",
"=",
"auth_utils",
".",
"get_user",
"(",
"request",
")",
"# Several service policy engines default to a project id check for",
"# ownership. Since the user is already scoped to a project, if a",
"# different project id has not been specified use the currently scoped",
"# project's id.",
"#",
"# The reason is the operator can edit the local copies of the service",
"# policy file. If a rule is removed, then the default rule is used. We",
"# don't want to block all actions because the operator did not fully",
"# understand the implication of editing the policy file. Additionally,",
"# the service APIs will correct us if we are too permissive.",
"if",
"target",
".",
"get",
"(",
"'project_id'",
")",
"is",
"None",
":",
"target",
"[",
"'project_id'",
"]",
"=",
"user",
".",
"project_id",
"if",
"target",
".",
"get",
"(",
"'tenant_id'",
")",
"is",
"None",
":",
"target",
"[",
"'tenant_id'",
"]",
"=",
"target",
"[",
"'project_id'",
"]",
"# same for user_id",
"if",
"target",
".",
"get",
"(",
"'user_id'",
")",
"is",
"None",
":",
"target",
"[",
"'user_id'",
"]",
"=",
"user",
".",
"id",
"domain_id_keys",
"=",
"[",
"'domain_id'",
",",
"'project.domain_id'",
",",
"'user.domain_id'",
",",
"'group.domain_id'",
"]",
"# populates domain id keys with user's current domain id",
"for",
"key",
"in",
"domain_id_keys",
":",
"if",
"target",
".",
"get",
"(",
"key",
")",
"is",
"None",
":",
"target",
"[",
"key",
"]",
"=",
"user",
".",
"user_domain_id",
"credentials",
"=",
"_user_to_credentials",
"(",
"user",
")",
"domain_credentials",
"=",
"_domain_to_credentials",
"(",
"request",
",",
"user",
")",
"# if there is a domain token use the domain_id instead of the user's domain",
"if",
"domain_credentials",
":",
"credentials",
"[",
"'domain_id'",
"]",
"=",
"domain_credentials",
".",
"get",
"(",
"'domain_id'",
")",
"enforcer",
"=",
"_get_enforcer",
"(",
")",
"for",
"action",
"in",
"actions",
":",
"scope",
",",
"action",
"=",
"action",
"[",
"0",
"]",
",",
"action",
"[",
"1",
"]",
"if",
"scope",
"in",
"enforcer",
":",
"# this is for handling the v3 policy file and will only be",
"# needed when a domain scoped token is present",
"if",
"scope",
"==",
"'identity'",
"and",
"domain_credentials",
":",
"# use domain credentials",
"if",
"not",
"_check_credentials",
"(",
"enforcer",
"[",
"scope",
"]",
",",
"action",
",",
"target",
",",
"domain_credentials",
")",
":",
"return",
"False",
"# use project credentials",
"if",
"not",
"_check_credentials",
"(",
"enforcer",
"[",
"scope",
"]",
",",
"action",
",",
"target",
",",
"credentials",
")",
":",
"return",
"False",
"# if no policy for scope, allow action, underlying API will",
"# ultimately block the action if not permitted, treat as though",
"# allowed",
"return",
"True"
] |
Check user permission.
Check if the user has permission to the action according
to policy setting.
:param actions: list of scope and action to do policy checks on,
the composition of which is (scope, action). Multiple actions
are treated as a logical AND.
* scope: service type managing the policy for action
* action: string representing the action to be checked
this should be colon separated for clarity.
i.e.
| compute:create_instance
| compute:attach_volume
| volume:attach_volume
for a policy action that requires a single action, actions
should look like
| "(("compute", "compute:create_instance"),)"
for a multiple action check, actions should look like
| "(("identity", "identity:list_users"),
| ("identity", "identity:list_roles"))"
:param request: django http request object. If not specified, credentials
must be passed.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary
representing the location of the object e.g.
{'project_id': object.project_id}
:returns: boolean if the user has permission or not for the actions.
|
[
"Check",
"user",
"permission",
"."
] |
python
|
train
|
hhatto/autopep8
|
autopep8.py
|
https://github.com/hhatto/autopep8/blob/fda3bb39181437b6b8a0aa0185f21ae5f14385dd/autopep8.py#L1579-L1599
|
def _get_logical(source_lines, result, logical_start, logical_end):
"""Return the logical line corresponding to the result.
Assumes input is already E702-clean.
"""
row = result['line'] - 1
col = result['column'] - 1
ls = None
le = None
for i in range(0, len(logical_start), 1):
assert logical_end
x = logical_end[i]
if x[0] > row or (x[0] == row and x[1] > col):
le = x
ls = logical_start[i]
break
if ls is None:
return None
original = source_lines[ls[0]:le[0] + 1]
return ls, le, original
|
[
"def",
"_get_logical",
"(",
"source_lines",
",",
"result",
",",
"logical_start",
",",
"logical_end",
")",
":",
"row",
"=",
"result",
"[",
"'line'",
"]",
"-",
"1",
"col",
"=",
"result",
"[",
"'column'",
"]",
"-",
"1",
"ls",
"=",
"None",
"le",
"=",
"None",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"logical_start",
")",
",",
"1",
")",
":",
"assert",
"logical_end",
"x",
"=",
"logical_end",
"[",
"i",
"]",
"if",
"x",
"[",
"0",
"]",
">",
"row",
"or",
"(",
"x",
"[",
"0",
"]",
"==",
"row",
"and",
"x",
"[",
"1",
"]",
">",
"col",
")",
":",
"le",
"=",
"x",
"ls",
"=",
"logical_start",
"[",
"i",
"]",
"break",
"if",
"ls",
"is",
"None",
":",
"return",
"None",
"original",
"=",
"source_lines",
"[",
"ls",
"[",
"0",
"]",
":",
"le",
"[",
"0",
"]",
"+",
"1",
"]",
"return",
"ls",
",",
"le",
",",
"original"
] |
Return the logical line corresponding to the result.
Assumes input is already E702-clean.
|
[
"Return",
"the",
"logical",
"line",
"corresponding",
"to",
"the",
"result",
"."
] |
python
|
train
|
cltk/cltk
|
cltk/corpus/sanskrit/itrans/langinfo.py
|
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/corpus/sanskrit/itrans/langinfo.py#L161-L166
|
def is_retroflex(c,lang):
"""
Is the character a retroflex
"""
o=get_offset(c,lang)
return (o>=RETROFLEX_RANGE[0] and o<=RETROFLEX_RANGE[1])
|
[
"def",
"is_retroflex",
"(",
"c",
",",
"lang",
")",
":",
"o",
"=",
"get_offset",
"(",
"c",
",",
"lang",
")",
"return",
"(",
"o",
">=",
"RETROFLEX_RANGE",
"[",
"0",
"]",
"and",
"o",
"<=",
"RETROFLEX_RANGE",
"[",
"1",
"]",
")"
] |
Is the character a retroflex
|
[
"Is",
"the",
"character",
"a",
"retroflex"
] |
python
|
train
|
lextoumbourou/txstripe
|
txstripe/resource.py
|
https://github.com/lextoumbourou/txstripe/blob/a69e67f524258026fd1840655a0578311bba3b89/txstripe/resource.py#L187-L192
|
def all(cls, api_key=None, idempotency_key=None,
stripe_account=None, **params):
"""Return a deferred."""
url = cls.class_url()
return make_request(
cls, 'get', url, stripe_acconut=None, params=params)
|
[
"def",
"all",
"(",
"cls",
",",
"api_key",
"=",
"None",
",",
"idempotency_key",
"=",
"None",
",",
"stripe_account",
"=",
"None",
",",
"*",
"*",
"params",
")",
":",
"url",
"=",
"cls",
".",
"class_url",
"(",
")",
"return",
"make_request",
"(",
"cls",
",",
"'get'",
",",
"url",
",",
"stripe_acconut",
"=",
"None",
",",
"params",
"=",
"params",
")"
] |
Return a deferred.
|
[
"Return",
"a",
"deferred",
"."
] |
python
|
train
|
DarkEnergySurvey/ugali
|
ugali/analysis/search.py
|
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/search.py#L73-L84
|
def createLabels2D(self):
""" 2D labeling at zmax """
logger.debug(" Creating 2D labels...")
self.zmax = np.argmax(self.values,axis=1)
self.vmax = self.values[np.arange(len(self.pixels),dtype=int),self.zmax]
kwargs=dict(pixels=self.pixels,values=self.vmax,nside=self.nside,
threshold=self.threshold,xsize=self.xsize)
labels,nlabels = CandidateSearch.labelHealpix(**kwargs)
self.nlabels = nlabels
self.labels = np.repeat(labels,len(self.distances)).reshape(len(labels),len(self.distances))
return self.labels, self.nlabels
|
[
"def",
"createLabels2D",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"\" Creating 2D labels...\"",
")",
"self",
".",
"zmax",
"=",
"np",
".",
"argmax",
"(",
"self",
".",
"values",
",",
"axis",
"=",
"1",
")",
"self",
".",
"vmax",
"=",
"self",
".",
"values",
"[",
"np",
".",
"arange",
"(",
"len",
"(",
"self",
".",
"pixels",
")",
",",
"dtype",
"=",
"int",
")",
",",
"self",
".",
"zmax",
"]",
"kwargs",
"=",
"dict",
"(",
"pixels",
"=",
"self",
".",
"pixels",
",",
"values",
"=",
"self",
".",
"vmax",
",",
"nside",
"=",
"self",
".",
"nside",
",",
"threshold",
"=",
"self",
".",
"threshold",
",",
"xsize",
"=",
"self",
".",
"xsize",
")",
"labels",
",",
"nlabels",
"=",
"CandidateSearch",
".",
"labelHealpix",
"(",
"*",
"*",
"kwargs",
")",
"self",
".",
"nlabels",
"=",
"nlabels",
"self",
".",
"labels",
"=",
"np",
".",
"repeat",
"(",
"labels",
",",
"len",
"(",
"self",
".",
"distances",
")",
")",
".",
"reshape",
"(",
"len",
"(",
"labels",
")",
",",
"len",
"(",
"self",
".",
"distances",
")",
")",
"return",
"self",
".",
"labels",
",",
"self",
".",
"nlabels"
] |
2D labeling at zmax
|
[
"2D",
"labeling",
"at",
"zmax"
] |
python
|
train
|
inveniosoftware/invenio-files-rest
|
invenio_files_rest/models.py
|
https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/models.py#L1648-L1653
|
def get_or_create(cls, mp, part_number):
"""Get or create a part."""
obj = cls.get_or_none(mp, part_number)
if obj:
return obj
return cls.create(mp, part_number)
|
[
"def",
"get_or_create",
"(",
"cls",
",",
"mp",
",",
"part_number",
")",
":",
"obj",
"=",
"cls",
".",
"get_or_none",
"(",
"mp",
",",
"part_number",
")",
"if",
"obj",
":",
"return",
"obj",
"return",
"cls",
".",
"create",
"(",
"mp",
",",
"part_number",
")"
] |
Get or create a part.
|
[
"Get",
"or",
"create",
"a",
"part",
"."
] |
python
|
train
|
MisterWil/abodepy
|
abodepy/devices/alarm.py
|
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/alarm.py#L111-L115
|
def mode(self):
"""Get alarm mode."""
mode = self.get_value('mode').get(self.device_id, None)
return mode.lower()
|
[
"def",
"mode",
"(",
"self",
")",
":",
"mode",
"=",
"self",
".",
"get_value",
"(",
"'mode'",
")",
".",
"get",
"(",
"self",
".",
"device_id",
",",
"None",
")",
"return",
"mode",
".",
"lower",
"(",
")"
] |
Get alarm mode.
|
[
"Get",
"alarm",
"mode",
"."
] |
python
|
train
|
materialsproject/pymatgen
|
pymatgen/core/tensors.py
|
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/tensors.py#L569-L610
|
def from_values_indices(cls, values, indices, populate=False,
structure=None, voigt_rank=None,
vsym=True, verbose=False):
"""
Creates a tensor from values and indices, with options
for populating the remainder of the tensor.
Args:
values (floats): numbers to place at indices
indices (array-likes): indices to place values at
populate (bool): whether to populate the tensor
structure (Structure): structure to base population
or fit_to_structure on
voigt_rank (int): full tensor rank to indicate the
shape of the resulting tensor. This is necessary
if one provides a set of indices more minimal than
the shape of the tensor they want, e.g.
Tensor.from_values_indices((0, 0), 100)
vsym (bool): whether to voigt symmetrize during the
optimization procedure
verbose (bool): whether to populate verbosely
"""
# auto-detect voigt notation
# TODO: refactor rank inheritance to make this easier
indices = np.array(indices)
if voigt_rank:
shape = ([3]*(voigt_rank % 2) + [6]*(voigt_rank // 2))
else:
shape = np.ceil(np.max(indices+1, axis=0) / 3.) * 3
base = np.zeros(shape.astype(int))
for v, idx in zip(values, indices):
base[tuple(idx)] = v
if 6 in shape:
obj = cls.from_voigt(base)
else:
obj = cls(base)
if populate:
assert structure, "Populate option must include structure input"
obj = obj.populate(structure, vsym=vsym, verbose=verbose)
elif structure:
obj = obj.fit_to_structure(structure)
return obj
|
[
"def",
"from_values_indices",
"(",
"cls",
",",
"values",
",",
"indices",
",",
"populate",
"=",
"False",
",",
"structure",
"=",
"None",
",",
"voigt_rank",
"=",
"None",
",",
"vsym",
"=",
"True",
",",
"verbose",
"=",
"False",
")",
":",
"# auto-detect voigt notation",
"# TODO: refactor rank inheritance to make this easier",
"indices",
"=",
"np",
".",
"array",
"(",
"indices",
")",
"if",
"voigt_rank",
":",
"shape",
"=",
"(",
"[",
"3",
"]",
"*",
"(",
"voigt_rank",
"%",
"2",
")",
"+",
"[",
"6",
"]",
"*",
"(",
"voigt_rank",
"//",
"2",
")",
")",
"else",
":",
"shape",
"=",
"np",
".",
"ceil",
"(",
"np",
".",
"max",
"(",
"indices",
"+",
"1",
",",
"axis",
"=",
"0",
")",
"/",
"3.",
")",
"*",
"3",
"base",
"=",
"np",
".",
"zeros",
"(",
"shape",
".",
"astype",
"(",
"int",
")",
")",
"for",
"v",
",",
"idx",
"in",
"zip",
"(",
"values",
",",
"indices",
")",
":",
"base",
"[",
"tuple",
"(",
"idx",
")",
"]",
"=",
"v",
"if",
"6",
"in",
"shape",
":",
"obj",
"=",
"cls",
".",
"from_voigt",
"(",
"base",
")",
"else",
":",
"obj",
"=",
"cls",
"(",
"base",
")",
"if",
"populate",
":",
"assert",
"structure",
",",
"\"Populate option must include structure input\"",
"obj",
"=",
"obj",
".",
"populate",
"(",
"structure",
",",
"vsym",
"=",
"vsym",
",",
"verbose",
"=",
"verbose",
")",
"elif",
"structure",
":",
"obj",
"=",
"obj",
".",
"fit_to_structure",
"(",
"structure",
")",
"return",
"obj"
] |
Creates a tensor from values and indices, with options
for populating the remainder of the tensor.
Args:
values (floats): numbers to place at indices
indices (array-likes): indices to place values at
populate (bool): whether to populate the tensor
structure (Structure): structure to base population
or fit_to_structure on
voigt_rank (int): full tensor rank to indicate the
shape of the resulting tensor. This is necessary
if one provides a set of indices more minimal than
the shape of the tensor they want, e.g.
Tensor.from_values_indices((0, 0), 100)
vsym (bool): whether to voigt symmetrize during the
optimization procedure
verbose (bool): whether to populate verbosely
|
[
"Creates",
"a",
"tensor",
"from",
"values",
"and",
"indices",
"with",
"options",
"for",
"populating",
"the",
"remainder",
"of",
"the",
"tensor",
"."
] |
python
|
train
|
pyviz/holoviews
|
holoviews/plotting/mpl/plot.py
|
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/mpl/plot.py#L154-L176
|
def _init_axis(self, fig, axis):
"""
Return an axis which may need to be initialized from
a new figure.
"""
if not fig and self._create_fig:
fig = plt.figure()
l, b, r, t = self.fig_bounds
inches = self.fig_inches
fig.subplots_adjust(left=l, bottom=b, right=r, top=t)
fig.patch.set_alpha(self.fig_alpha)
if isinstance(inches, (tuple, list)):
inches = list(inches)
if inches[0] is None:
inches[0] = inches[1]
elif inches[1] is None:
inches[1] = inches[0]
fig.set_size_inches(list(inches))
else:
fig.set_size_inches([inches, inches])
axis = fig.add_subplot(111, projection=self.projection)
axis.set_aspect('auto')
return fig, axis
|
[
"def",
"_init_axis",
"(",
"self",
",",
"fig",
",",
"axis",
")",
":",
"if",
"not",
"fig",
"and",
"self",
".",
"_create_fig",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"l",
",",
"b",
",",
"r",
",",
"t",
"=",
"self",
".",
"fig_bounds",
"inches",
"=",
"self",
".",
"fig_inches",
"fig",
".",
"subplots_adjust",
"(",
"left",
"=",
"l",
",",
"bottom",
"=",
"b",
",",
"right",
"=",
"r",
",",
"top",
"=",
"t",
")",
"fig",
".",
"patch",
".",
"set_alpha",
"(",
"self",
".",
"fig_alpha",
")",
"if",
"isinstance",
"(",
"inches",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"inches",
"=",
"list",
"(",
"inches",
")",
"if",
"inches",
"[",
"0",
"]",
"is",
"None",
":",
"inches",
"[",
"0",
"]",
"=",
"inches",
"[",
"1",
"]",
"elif",
"inches",
"[",
"1",
"]",
"is",
"None",
":",
"inches",
"[",
"1",
"]",
"=",
"inches",
"[",
"0",
"]",
"fig",
".",
"set_size_inches",
"(",
"list",
"(",
"inches",
")",
")",
"else",
":",
"fig",
".",
"set_size_inches",
"(",
"[",
"inches",
",",
"inches",
"]",
")",
"axis",
"=",
"fig",
".",
"add_subplot",
"(",
"111",
",",
"projection",
"=",
"self",
".",
"projection",
")",
"axis",
".",
"set_aspect",
"(",
"'auto'",
")",
"return",
"fig",
",",
"axis"
] |
Return an axis which may need to be initialized from
a new figure.
|
[
"Return",
"an",
"axis",
"which",
"may",
"need",
"to",
"be",
"initialized",
"from",
"a",
"new",
"figure",
"."
] |
python
|
train
|
sleepyfran/itunespy
|
itunespy/ebook_artist.py
|
https://github.com/sleepyfran/itunespy/blob/0e7e931b135b5e0daae49ba68e9167ff4ac73eb5/itunespy/ebook_artist.py#L29-L34
|
def get_books(self):
"""
Retrieves all the books published by the artist
:return: List. Books published by the artist
"""
return itunespy.lookup(id=self.artist_id, entity=itunespy.entities['ebook'])[1:]
|
[
"def",
"get_books",
"(",
"self",
")",
":",
"return",
"itunespy",
".",
"lookup",
"(",
"id",
"=",
"self",
".",
"artist_id",
",",
"entity",
"=",
"itunespy",
".",
"entities",
"[",
"'ebook'",
"]",
")",
"[",
"1",
":",
"]"
] |
Retrieves all the books published by the artist
:return: List. Books published by the artist
|
[
"Retrieves",
"all",
"the",
"books",
"published",
"by",
"the",
"artist",
":",
"return",
":",
"List",
".",
"Books",
"published",
"by",
"the",
"artist"
] |
python
|
train
|
trailofbits/manticore
|
manticore/utils/fallback_emulator.py
|
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/utils/fallback_emulator.py#L123-L136
|
def _hook_unmapped(self, uc, access, address, size, value, data):
"""
We hit an unmapped region; map it into unicorn.
"""
try:
m = self._create_emulated_mapping(uc, address)
except MemoryException as e:
self._to_raise = e
self._should_try_again = False
return False
self._should_try_again = True
return False
|
[
"def",
"_hook_unmapped",
"(",
"self",
",",
"uc",
",",
"access",
",",
"address",
",",
"size",
",",
"value",
",",
"data",
")",
":",
"try",
":",
"m",
"=",
"self",
".",
"_create_emulated_mapping",
"(",
"uc",
",",
"address",
")",
"except",
"MemoryException",
"as",
"e",
":",
"self",
".",
"_to_raise",
"=",
"e",
"self",
".",
"_should_try_again",
"=",
"False",
"return",
"False",
"self",
".",
"_should_try_again",
"=",
"True",
"return",
"False"
] |
We hit an unmapped region; map it into unicorn.
|
[
"We",
"hit",
"an",
"unmapped",
"region",
";",
"map",
"it",
"into",
"unicorn",
"."
] |
python
|
valid
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.