id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
43,691 |
def edge_driver(graph, reward):
r"""Returns the edge-driver cost Hamiltonian component.
Given some graph, :math:`G`, this method will return a Hamiltonian that assigns
lower energies to two-bit bitstrings supplied in ``reward``. Each bitstring corresponds
to the state of some edge in :math:`G`, which is defined by the states of its vertex endpoints.
See usage details for more information.
Args:
graph (nx.Graph): The graph on which the Hamiltonian is defined
reward (list[str]): The list of two-bit bitstrings that are assigned a lower energy by the Hamiltonian
Returns:
.Hamiltonian
**Example**
>>> graph = nx.Graph([(0, 1), (1, 2)])
>>> hamiltonian = qaoa.edge_driver(graph, ["11", "10", "01"])
>>> print(hamiltonian)
(0.25) [Z0 Z1] + (0.25) [Z0] + (0.25) [Z1] + (0.25) [Z1 Z2] + (0.25) [Z2]
..UsageDetails::
The goal of many combinatorial problems that can be solved with QAOA is to
find a `Graph colouring <https://en.wikipedia.org/wiki/Graph_coloring>`__ of some supplied
graph :math:`G`, that minimizes some cost function. It is oftentimes natural to consider the class
of graph colouring problems that only admit two colours, as we can easily encode these two colours
using the :math:`|1\rangle` and :math:`|0\rangle` states of qubits. Therefore, given
some graph :math:`G`, each edge of the graph can be described by a pair of qubits, :math:`|00\rangle`,
:math:`01\rangle`, :math:`|10\rangle`, or :math:`|11\rangle`, corresponding to the colourings of its endpoints.
When constructing QAOA cost functions, one must "penalize" certain states of the graph, and "reward"
others, by assigning higher and lower energies to these respective configurations. Given a set of vertex-colour
pairs (which each describe a possible state of a graph edge), the `edge_driver`
method will output a Hamiltonian that rewards the edges in the set, and penalizes the others. For example,
given the set: :math:`\{|00\rangle, \ |01\rangle, \ |10\rangle}` and the graph :math:`G`,
the `edge_driver` method will output the following Hamiltonian:
..math:: H \ = \ \frac{1}{4} \displaystyle\sum_{(i, j) \in E(G)} \big( Z_{i} Z_{j} \ - \ Z_{i} \ - \ Z_{j} \big)
where :math:`E(G)` is the set of edges of :math:`G`, and :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire. As can be checked, this Hamiltonian assigns an energy of :math:`-1/4` to the states
:math:`|00\rangle`, :math:`|01\rangle` and :math:`|10\rangle`, and an energy of :math:`3/4` to the state
:math:`|11\rangle`.
.. Note::
If either of the states :math:`\01\rangle` or :math:`|10\rangle` is contained in ``reward``, then so too
must :math:`|10\rangle` or :math:`|01\rangle`, respectively. Within a graph, there is no notion of "order"
of edge endpoints, so these two states are effectively the same.
"""
allowed = ["00", "01", "10", "11"]
if not all([e in allowed for e in reward]):
raise ValueError("Encountered invalid entry in 'reward', expected 2-bit bitstrings.")
if "01" in reward and "10" not in reward or "10" in reward and "01" not in reward:
raise ValueError(
"'reward' cannot contain either '10' or '01', must contain neither or both."
)
if not isinstance(graph, nx.Graph):
raise ValueError("Input graph must be a nx.Graph, got {}".format(type(graph).__name__))
coeffs = []
ops = []
if len(reward) == 0 or len(reward) == 4:
coeffs = [1 for _ in graph.nodes]
ops = [qml.Identity(v) for v in graph.nodes]
else:
reward = list(set(reward) - {"01"})
sign = -1
if len(reward) == 2:
reward = list({"00", "10", "11"} - set(reward))
sign = 1
reward = reward[0]
if reward == "00":
for e in graph.edges:
coeffs.extend([0.25 * sign, 0.25 * sign, 0.25 * sign])
ops.extend(
[qml.PauliZ(e[0]) @ qml.PauliZ(e[1]), qml.PauliZ(e[0]), qml.PauliZ(e[1])]
)
if reward == "10":
for e in graph.edges:
coeffs.append(-0.5 * sign)
ops.append(qml.PauliZ(e[0]) @ qml.PauliZ(e[1]))
if reward == "11":
for e in graph.edges:
coeffs.extend([0.25 * sign, -0.25 * sign, -0.25 * sign])
ops.extend(
[qml.PauliZ(e[0]) @ qml.PauliZ(e[1]), qml.PauliZ(e[0]), qml.PauliZ(e[1])]
)
return qml.Hamiltonian(coeffs, ops)
|
def edge_driver(graph, reward):
r"""Returns the edge-driver cost Hamiltonian component.
Given some graph, :math:`G`, this method will return a Hamiltonian that assigns
lower energies to two-bit bitstrings supplied in ``reward``. Each bitstring corresponds
to the state of some edge in :math:`G`, which is defined by the states of its vertex endpoints.
See usage details for more information.
Args:
graph (nx.Graph): The graph on which the Hamiltonian is defined
reward (list[str]): The list of two-bit bitstrings that are assigned a lower energy by the Hamiltonian
Returns:
.Hamiltonian
**Example**
>>> graph = nx.Graph([(0, 1), (1, 2)])
>>> hamiltonian = qaoa.edge_driver(graph, ["11", "10", "01"])
>>> print(hamiltonian)
(0.25) [Z0 Z1] + (0.25) [Z0] + (0.25) [Z1] + (0.25) [Z1 Z2] + (0.25) [Z2]
..UsageDetails::
The goal of many combinatorial problems that can be solved with QAOA is to
find a `Graph colouring <https://en.wikipedia.org/wiki/Graph_coloring>`__ of some supplied
graph :math:`G`, that minimizes some cost function. It is oftentimes natural to consider the class
of graph colouring problems that only admit two colours, as we can easily encode these two colours
using the :math:`|1\rangle` and :math:`|0\rangle` states of qubits. Therefore, given
some graph :math:`G`, each edge of the graph can be described by a pair of qubits, :math:`|00\rangle`,
:math:`01\rangle`, :math:`|10\rangle`, or :math:`|11\rangle`, corresponding to the colourings of its endpoints.
When constructing QAOA cost functions, one must "penalize" certain states of the graph, and "reward"
others, by assigning higher and lower energies to these respective configurations. Given a set of vertex-colour
pairs (which each describe a possible state of a graph edge), the `edge_driver`
method will output a Hamiltonian that rewards the edges in the set, and penalizes the others. For example,
given the set: :math:`\{|00\rangle, \ |01\rangle, \ |10\rangle}` and the graph :math:`G`,
the `edge_driver` method will output the following Hamiltonian:
..math:: H \ = \ \frac{1}{4} \displaystyle\sum_{(i, j) \in E(G)} \big( Z_{i} Z_{j} \ - \ Z_{i} \ - \ Z_{j} \big)
where :math:`E(G)` is the set of edges of :math:`G`, and :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire. As can be checked, this Hamiltonian assigns an energy of :math:`-1/4` to the states
:math:`|00\rangle`, :math:`|01\rangle` and :math:`|10\rangle`, and an energy of :math:`3/4` to the state
:math:`|11\rangle`.
.. Note::
If either of the states :math:`|01\rangle` or :math:`|10\rangle` is contained in ``reward``, then so too
must :math:`|10\rangle` or :math:`|01\rangle`, respectively. Within a graph, there is no notion of "order"
of edge endpoints, so these two states are effectively the same.
"""
allowed = ["00", "01", "10", "11"]
if not all([e in allowed for e in reward]):
raise ValueError("Encountered invalid entry in 'reward', expected 2-bit bitstrings.")
if "01" in reward and "10" not in reward or "10" in reward and "01" not in reward:
raise ValueError(
"'reward' cannot contain either '10' or '01', must contain neither or both."
)
if not isinstance(graph, nx.Graph):
raise ValueError("Input graph must be a nx.Graph, got {}".format(type(graph).__name__))
coeffs = []
ops = []
if len(reward) == 0 or len(reward) == 4:
coeffs = [1 for _ in graph.nodes]
ops = [qml.Identity(v) for v in graph.nodes]
else:
reward = list(set(reward) - {"01"})
sign = -1
if len(reward) == 2:
reward = list({"00", "10", "11"} - set(reward))
sign = 1
reward = reward[0]
if reward == "00":
for e in graph.edges:
coeffs.extend([0.25 * sign, 0.25 * sign, 0.25 * sign])
ops.extend(
[qml.PauliZ(e[0]) @ qml.PauliZ(e[1]), qml.PauliZ(e[0]), qml.PauliZ(e[1])]
)
if reward == "10":
for e in graph.edges:
coeffs.append(-0.5 * sign)
ops.append(qml.PauliZ(e[0]) @ qml.PauliZ(e[1]))
if reward == "11":
for e in graph.edges:
coeffs.extend([0.25 * sign, -0.25 * sign, -0.25 * sign])
ops.extend(
[qml.PauliZ(e[0]) @ qml.PauliZ(e[1]), qml.PauliZ(e[0]), qml.PauliZ(e[1])]
)
return qml.Hamiltonian(coeffs, ops)
|
57,682 |
def main():
global fields_to_hash, unpopulate_fields, populate_fields
fields_to_hash = frozenset([x for x in argToList(demisto.args().get('fieldsToHash', '')) if x]) # type: ignore
unpopulate_fields = frozenset([x for x in argToList(demisto.args().get('dontPopulateFields', ''))]) # type: ignore
populate_fields = frozenset([x for x in argToList(demisto.args().get('populateFields', ''))]) # type: ignore
limit = int(demisto.args().get('limit', PAGE_SIZE))
query = demisto.args().get('query', '')
offset = int(demisto.args().get('offset', 0))
indicators = find_indicators_with_limit(query, limit, offset)
entry = fileResult("indicators.json", json.dumps(indicators).encode('utf8'))
entry['Contents'] = indicators
entry['ContentsFormat'] = formats['json']
entry['HumanReadable'] = "Fetched %d indicators successfully by the query: %s" % (len(indicators), query)
return entry
|
def main():
global fields_to_hash, unpopulate_fields, populate_fields
args = demisto.args()
fields_to_hash = frozenset([x for x in argToList(args.get('fieldsToHash', '')) if x]) # type: ignore
unpopulate_fields = frozenset([x for x in argToList(args.get('dontPopulateFields', ''))]) # type: ignore
populate_fields = frozenset([x for x in argToList(args.get('populateFields', ''))]) # type: ignore
limit = int(args.get('limit', PAGE_SIZE))
query = args.get('query', '')
offset = int(args.get('offset', 0))
indicators = find_indicators_with_limit(query, limit, offset)
entry = fileResult("indicators.json", json.dumps(indicators).encode('utf8'))
entry['Contents'] = indicators
entry['ContentsFormat'] = formats['json']
entry['HumanReadable'] = "Fetched %d indicators successfully by the query: %s" % (len(indicators), query)
return entry
|
8,525 |
def status_show(context: Context, data_dict: DataDict) -> ActionResult.StatusShow:
'''Return a dictionary with information about the site's configuration.
:rtype: dictionary
'''
extensions = config.get_value('ckan.plugins')
site_info = {
'site_title': config.get_value('ckan.site_title'),
'site_description': config.get_value('ckan.site_description'),
'site_url': config.get_value('ckan.site_url'),
'error_emails_to': config.get_value('email_to'),
'locale_default': config.get_value('ckan.locale_default'),
'extensions': extensions,
}
if not asbool(config.get('ckan.hide_version')):
site_info['ckan_version'] = ckan.__version__
return site_info
|
def status_show(context: Context, data_dict: DataDict) -> ActionResult.StatusShow:
'''Return a dictionary with information about the site's configuration.
:rtype: dictionary
'''
extensions = config.get_value('ckan.plugins')
site_info = {
'site_title': config.get_value('ckan.site_title'),
'site_description': config.get_value('ckan.site_description'),
'site_url': config.get_value('ckan.site_url'),
'error_emails_to': config.get_value('email_to'),
'locale_default': config.get_value('ckan.locale_default'),
'extensions': extensions,
}
if not config.get_value('ckan.hide_version'):
site_info['ckan_version'] = ckan.__version__
return site_info
|
40,694 |
def Fbeta(
beta: float,
average: bool = True,
precision: Optional[Precision] = None,
recall: Optional[Recall] = None,
output_transform: Optional[Callable] = None,
device: Union[str, torch.device] = torch.device("cpu"),
) -> MetricsLambda:
"""Calculates F-beta score
Args:
beta (float): weight of precision in harmonic mean
average (bool, optional): if True, F-beta score is computed as the unweighted average (across all classes
in multiclass case), otherwise, returns a tensor with F-beta score for each class in multiclass case.
precision (Precision, optional): precision object metric with `average=False` to compute F-beta score
recall (Precision, optional): recall object metric with `average=False` to compute F-beta score
output_transform (callable, optional): a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. It is used only if precision or recall are not provided.
device (str or torch.device): specifies which device updates are accumulated on. Setting the metric's
device to be the same as your `update` arguments ensures the `update` method is non-blocking. By
default, CPU.
Returns:
MetricsLambda, F-beta metric
"""
if not (beta > 0):
raise ValueError("Beta should be a positive integer, but given {}".format(beta))
if precision is not None and output_transform is not None:
raise ValueError("If precision argument is provided, output_transform should be None")
if recall is not None and output_transform is not None:
raise ValueError("If recall argument is provided, output_transform should be None")
if precision is None:
precision = Precision(
output_transform=(lambda x: x) if output_transform is None else output_transform,
average=False,
device=device,
)
elif precision._average:
raise ValueError("Input precision metric should have average=False")
if recall is None:
recall = Recall(
output_transform=(lambda x: x) if output_transform is None else output_transform,
average=False,
device=device,
)
elif recall._average:
raise ValueError("Input recall metric should have average=False")
fbeta = (1.0 + beta ** 2) * precision * recall / (beta ** 2 * precision + recall + 1e-15)
if average:
fbeta = fbeta.mean().item()
return fbeta
|
def Fbeta(
beta: float,
average: bool = True,
precision: Optional[Precision] = None,
recall: Optional[Recall] = None,
output_transform: Optional[Callable] = None,
device: Union[str, torch.device] = torch.device("cpu"),
) -> MetricsLambda:
"""Calculates F-beta score
Args:
beta (float): weight of precision in harmonic mean
average (bool, optional): if True, F-beta score is computed as the unweighted average (across all classes
in multiclass case), otherwise, returns a tensor with F-beta score for each class in multiclass case.
precision (Precision, optional): precision object metric with `average=False` to compute F-beta score
recall (Precision, optional): recall object metric with `average=False` to compute F-beta score
output_transform (callable, optional): a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. It is used only if precision or recall are not provided.
device (str or torch.device): specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Returns:
MetricsLambda, F-beta metric
"""
if not (beta > 0):
raise ValueError("Beta should be a positive integer, but given {}".format(beta))
if precision is not None and output_transform is not None:
raise ValueError("If precision argument is provided, output_transform should be None")
if recall is not None and output_transform is not None:
raise ValueError("If recall argument is provided, output_transform should be None")
if precision is None:
precision = Precision(
output_transform=(lambda x: x) if output_transform is None else output_transform,
average=False,
device=device,
)
elif precision._average:
raise ValueError("Input precision metric should have average=False")
if recall is None:
recall = Recall(
output_transform=(lambda x: x) if output_transform is None else output_transform,
average=False,
device=device,
)
elif recall._average:
raise ValueError("Input recall metric should have average=False")
fbeta = (1.0 + beta ** 2) * precision * recall / (beta ** 2 * precision + recall + 1e-15)
if average:
fbeta = fbeta.mean().item()
return fbeta
|
5,299 |
def _load_fasttext_format(model_file, encoding='utf-8', full_model=True):
"""Load the input-hidden weight matrix from Facebook's native fasttext `.bin` output files.
Parameters
----------
model_file : str
Full path to the FastText model file.
encoding : str, optional
Specifies the file encoding.
full_model : boolean, optional
If False, skips loading the hidden output matrix. This saves a fair bit
of CPU time and RAM, but prevents training continuation.
Returns
-------
:class: `~gensim.models.fasttext.FastText`
The loaded model.
"""
with utils.open(model_file, 'rb') as fin:
m = gensim.models._fasttext_bin.load(fin, encoding=encoding, full_model=full_model)
# Here we are checking for unsupported FB FT Modes
if m.loss != 1 and m.loss != 2:
raise ValueError("Loss paramter value can be either 1 (for Hierarchical Softmax) or 2 (for Negative Sampling)")
elif m.model != 1 and m.model != 2:
raise ValueError(
"Model parameter value can be either 1 (for Continous Bag of Words model) or 2 (for Skip-gram model)")
model = FastText(
vector_size=m.dim,
window=m.ws,
epochs=m.epoch,
negative=m.neg,
hs=int(m.loss == 1),
sg=int(m.model == 2),
bucket=m.bucket,
min_count=m.min_count,
sample=m.t,
min_n=m.minn,
max_n=m.maxn,
)
model.corpus_total_words = m.ntokens
model.raw_vocab = m.raw_vocab
model.nwords = m.nwords
model.vocab_size = m.vocab_size
#
# This is here to fix https://github.com/RaRe-Technologies/gensim/pull/2373.
#
# We explicitly set min_count=1 regardless of the model's parameters to
# ignore the trim rule when building the vocabulary. We do this in order
# to support loading native models that were trained with pretrained vectors.
# Such models will contain vectors for _all_ encountered words, not only
# those occurring more frequently than min_count.
#
# Native models trained _without_ pretrained vectors already contain the
# trimmed raw_vocab, so this change does not affect them.
#
model.prepare_vocab(update=True, min_count=1)
model.num_original_vectors = m.vectors_ngrams.shape[0]
model.wv.init_post_load(m.vectors_ngrams)
model._init_post_load(m.hidden_output)
_check_model(model)
model.add_lifecycle_event(
"load_fasttext_format",
msg=f"loaded {m.vectors_ngrams.shape} weight matrix for fastText model from {fin.name}",
)
return model
|
def _load_fasttext_format(model_file, encoding='utf-8', full_model=True):
"""Load the input-hidden weight matrix from Facebook's native fasttext `.bin` output files.
Parameters
----------
model_file : str
Full path to the FastText model file.
encoding : str, optional
Specifies the file encoding.
full_model : boolean, optional
If False, skips loading the hidden output matrix. This saves a fair bit
of CPU time and RAM, but prevents training continuation.
Returns
-------
:class: `~gensim.models.fasttext.FastText`
The loaded model.
"""
with utils.open(model_file, 'rb') as fin:
m = gensim.models._fasttext_bin.load(fin, encoding=encoding, full_model=full_model)
# Here we are checking for unsupported FB FT Modes
if m.loss != 1 and m.loss != 2:
raise ValueError("Loss paramter value can be either 1 (for Hierarchical Softmax) or 2 (for Negative Sampling)")
elif m.model != 1 and m.model != 2:
raise ValueError(
"The fasttext `model` parameter must be either 1 (for Continuous Bag of Words model) or 2 (for Skip-gram model)")
model = FastText(
vector_size=m.dim,
window=m.ws,
epochs=m.epoch,
negative=m.neg,
hs=int(m.loss == 1),
sg=int(m.model == 2),
bucket=m.bucket,
min_count=m.min_count,
sample=m.t,
min_n=m.minn,
max_n=m.maxn,
)
model.corpus_total_words = m.ntokens
model.raw_vocab = m.raw_vocab
model.nwords = m.nwords
model.vocab_size = m.vocab_size
#
# This is here to fix https://github.com/RaRe-Technologies/gensim/pull/2373.
#
# We explicitly set min_count=1 regardless of the model's parameters to
# ignore the trim rule when building the vocabulary. We do this in order
# to support loading native models that were trained with pretrained vectors.
# Such models will contain vectors for _all_ encountered words, not only
# those occurring more frequently than min_count.
#
# Native models trained _without_ pretrained vectors already contain the
# trimmed raw_vocab, so this change does not affect them.
#
model.prepare_vocab(update=True, min_count=1)
model.num_original_vectors = m.vectors_ngrams.shape[0]
model.wv.init_post_load(m.vectors_ngrams)
model._init_post_load(m.hidden_output)
_check_model(model)
model.add_lifecycle_event(
"load_fasttext_format",
msg=f"loaded {m.vectors_ngrams.shape} weight matrix for fastText model from {fin.name}",
)
return model
|
3,359 |
def _path_to_regex(pattern: str) -> Pattern[str]:
"""
ported from https://github.com/hmarr/codeowners/blob/d0452091447bd2a29ee508eebc5a79874fb5d4ff/match.go#L33
ported from https://github.com/sbdchd/codeowners/blob/6c5e8563f4c675abb098df704e19f4c6b95ff9aa/codeowners/__init__.py#L16
There are some special cases like backslash that was added
MIT License
Copyright (c) 2020 Harry Marr
Copyright (c) 2019-2020 Steve Dignam
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
regex = ""
# Special case backslash can match a backslash file or directory
if pattern[0] == "\\":
return re.compile(r"\\(?:\Z|/)")
slash_pos = pattern.find("/")
anchored = slash_pos > -1 and slash_pos != len(pattern) - 1
regex += r"\A" if anchored else r"(?:\A|/)"
matches_dir = pattern[-1] == "/"
if matches_dir:
pattern = pattern.rstrip("/")
# patterns ending with "/*" are special. They only match items directly in the directory
# not deeper
trailing_slash_star = pattern[-1] == "*" and len(pattern) > 1 and pattern[-2] == "/"
iterator = enumerate(pattern)
# Anchored paths may or may not start with a slash
if anchored and pattern[0] == "/":
next(iterator, None)
regex += r"/?"
for i, ch in iterator:
if ch == "*":
# Handle double star (**) case properly
if i + 1 < len(pattern) and pattern[i + 1] == "*":
left_anchored = i == 0
leading_slash = i > 0 and pattern[i - 1] == "/"
right_anchored = i + 2 == len(pattern)
trailing_slash = i + 2 < len(pattern) and pattern[i + 2] == "/"
if (left_anchored or leading_slash) and (right_anchored or trailing_slash):
regex += ".*"
next(iterator, None)
next(iterator, None)
continue
regex += "[^/]*"
elif ch == "?":
regex += "[^/]"
else:
regex += re.escape(ch)
if matches_dir:
regex += "/"
elif trailing_slash_star:
regex += r"\Z"
else:
regex += r"(?:\Z|/)"
return re.compile(regex)
|
def _path_to_regex(pattern: str) -> Pattern[str]:
"""
ported from https://github.com/hmarr/codeowners/blob/d0452091447bd2a29ee508eebc5a79874fb5d4ff/match.go#L33
ported from https://github.com/sbdchd/codeowners/blob/6c5e8563f4c675abb098df704e19f4c6b95ff9aa/codeowners/__init__.py#L16
There are some special cases like backslash that were added
MIT License
Copyright (c) 2020 Harry Marr
Copyright (c) 2019-2020 Steve Dignam
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
regex = ""
# Special case backslash can match a backslash file or directory
if pattern[0] == "\\":
return re.compile(r"\\(?:\Z|/)")
slash_pos = pattern.find("/")
anchored = slash_pos > -1 and slash_pos != len(pattern) - 1
regex += r"\A" if anchored else r"(?:\A|/)"
matches_dir = pattern[-1] == "/"
if matches_dir:
pattern = pattern.rstrip("/")
# patterns ending with "/*" are special. They only match items directly in the directory
# not deeper
trailing_slash_star = pattern[-1] == "*" and len(pattern) > 1 and pattern[-2] == "/"
iterator = enumerate(pattern)
# Anchored paths may or may not start with a slash
if anchored and pattern[0] == "/":
next(iterator, None)
regex += r"/?"
for i, ch in iterator:
if ch == "*":
# Handle double star (**) case properly
if i + 1 < len(pattern) and pattern[i + 1] == "*":
left_anchored = i == 0
leading_slash = i > 0 and pattern[i - 1] == "/"
right_anchored = i + 2 == len(pattern)
trailing_slash = i + 2 < len(pattern) and pattern[i + 2] == "/"
if (left_anchored or leading_slash) and (right_anchored or trailing_slash):
regex += ".*"
next(iterator, None)
next(iterator, None)
continue
regex += "[^/]*"
elif ch == "?":
regex += "[^/]"
else:
regex += re.escape(ch)
if matches_dir:
regex += "/"
elif trailing_slash_star:
regex += r"\Z"
else:
regex += r"(?:\Z|/)"
return re.compile(regex)
|
32,357 |
def handle_prevalence_command(client: Client, command: str, args: dict):
key_names_in_response = {
'ip': 'ip_address',
'domain': 'domain_name',
'process': 'process_name',
'cmd': 'process_command_line',
'hash': 'sha256',
'registry': 'key_name'
}
args.pop('integration_context_brand', None)
args.pop('integration_name', None)
if command == 'core-get-registry-analytics-prevalence':
# arg list should in the following structure:
# args: [
# {"key_name": "some_key1", "value_name": "some_value1"},
# {"key_name": "some_key2", "value_name": "some_value2"}
# ]
args_list = []
keys = argToList(args.get('key_name'))
values = argToList(args.get('value_name'))
if len(keys) != len(values):
raise DemistoException('Number of elements in key_name argument should be equal to the number '
'of elements in value_name argument.')
for i in range(len(keys)):
args_list.append({'key_name': keys[i], 'value_name': values[i]})
else:
args_list = []
for key, value in args.items():
values = argToList(value)
for val in values:
args_list.append({key: val})
request_body = {
'api_id': command,
'args': args_list
}
res = client.get_prevalence(request_body).get('results', [])
command_type = PREVALENCE_COMMANDS[command]
return CommandResults(
readable_output=tableToMarkdown(string_to_table_header(f'{command_type} Prevalence'),
[{
key_names_in_response[command_type]: item.get('args', {}).get(
key_names_in_response[command_type]),
'Prevalence': item.get('value')
} for item in res],
headerTransform=string_to_table_header),
outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.AnalyticsPrevalence.{command_type.title()}',
outputs=res,
raw_response=res,
)
|
def handle_prevalence_command(client: Client, command: str, args: dict):
key_names_in_response = {
'ip': 'ip_address',
'domain': 'domain_name',
'process': 'process_name',
'cmd': 'process_command_line',
'hash': 'sha256',
'registry': 'key_name',
}
args.pop('integration_context_brand', None)
args.pop('integration_name', None)
if command == 'core-get-registry-analytics-prevalence':
# arg list should in the following structure:
# args: [
# {"key_name": "some_key1", "value_name": "some_value1"},
# {"key_name": "some_key2", "value_name": "some_value2"}
# ]
args_list = []
keys = argToList(args.get('key_name'))
values = argToList(args.get('value_name'))
if len(keys) != len(values):
raise DemistoException('Number of elements in key_name argument should be equal to the number '
'of elements in value_name argument.')
for i in range(len(keys)):
args_list.append({'key_name': keys[i], 'value_name': values[i]})
else:
args_list = []
for key, value in args.items():
values = argToList(value)
for val in values:
args_list.append({key: val})
request_body = {
'api_id': command,
'args': args_list
}
res = client.get_prevalence(request_body).get('results', [])
command_type = PREVALENCE_COMMANDS[command]
return CommandResults(
readable_output=tableToMarkdown(string_to_table_header(f'{command_type} Prevalence'),
[{
key_names_in_response[command_type]: item.get('args', {}).get(
key_names_in_response[command_type]),
'Prevalence': item.get('value')
} for item in res],
headerTransform=string_to_table_header),
outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.AnalyticsPrevalence.{command_type.title()}',
outputs=res,
raw_response=res,
)
|
42,466 |
def test_ipynb_diff_with_no_change_dir(tmp_path: tmp_path) -> None:
jupyter_dependencies_are_installed.cache_clear()
runner = CliRunner()
nb = os.path.join("tests", "data", "notebook_trailing_newline.ipynb")
tmp_nb = tmp_path / "notebook.ipynb"
with open(nb) as src, open(tmp_nb, "w") as dst:
dst.write(src.read())
result = runner.invoke(main, [str(tmp_path)])
expected_output = (
"Skipping .ipynb files as Jupyter dependencies are not installed.\n"
"You can fix this by running ``pip install black[jupyter]``\n"
)
assert expected_output in result.output
|
def test_ipynb_diff_with_no_change_dir(tmp_path: pathlib.Path) -> None:
jupyter_dependencies_are_installed.cache_clear()
runner = CliRunner()
nb = os.path.join("tests", "data", "notebook_trailing_newline.ipynb")
tmp_nb = tmp_path / "notebook.ipynb"
with open(nb) as src, open(tmp_nb, "w") as dst:
dst.write(src.read())
result = runner.invoke(main, [str(tmp_path)])
expected_output = (
"Skipping .ipynb files as Jupyter dependencies are not installed.\n"
"You can fix this by running ``pip install black[jupyter]``\n"
)
assert expected_output in result.output
|
8,120 |
def sample_at_coords(smap, coordinates):
"""
Samples the data in a map at given series of coordinates.
Parameters
----------
smap : `~sunpy.map.GenericMap`
A SunPy map.
coordinates : `~astropy.coordinates.SkyCoord`
A list of input coordinates
Returns
-------
`~numpy.array`
A `numpy.array` corresponding to the data obtained from the map,
at the input coordinates.
"""
return smap.data[smap.wcs.world_to_array_index(coordinates)]
|
def sample_at_coords(smap, coordinates):
"""
Samples the data in a map at given series of coordinates.
Parameters
----------
smap : `~sunpy.map.GenericMap`
A SunPy map.
coordinates : `~astropy.coordinates.SkyCoord`
A list of input coordinates.
Returns
-------
`~numpy.array`
A `numpy.array` corresponding to the data obtained from the map,
at the input coordinates.
"""
return smap.data[smap.wcs.world_to_array_index(coordinates)]
|
5,315 |
def list_tags_for_resource(Type, Id, region=None, key=None, keyid=None, profile=None):
'''
Lists all tags attadhed to given resource.
Type
Type of resource to lookup.
Id
The Id of resource to lookup.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
CLI Example:
.. code-block:: bash
salt myminion boto3_route53.list_tags_for_resource TYPE ID \
profile='{"region": "us-east-1", "keyid": "A12345678AB", "key": "xblahblahblah"}'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
args = {'ResourceType': Type, 'ResourceId': Id}
return _collect_results(conn.list_tags_for_resource, None, args)
|
def list_tags_for_resource(Type, Id, region=None, key=None, keyid=None, profile=None):
'''
Lists all tags attached to given resource.
Type
Type of resource to lookup.
Id
The Id of resource to lookup.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
CLI Example:
.. code-block:: bash
salt myminion boto3_route53.list_tags_for_resource TYPE ID \
profile='{"region": "us-east-1", "keyid": "A12345678AB", "key": "xblahblahblah"}'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
args = {'ResourceType': Type, 'ResourceId': Id}
return _collect_results(conn.list_tags_for_resource, None, args)
|
12,895 |
def patch_pagination_args(field: DjangoConnectionField):
"""Add descriptions to pagination arguments in a connection field.
By default Graphene's connection fields comes without description for pagination
arguments. This functions patches those fields to add the descriptions.
"""
field.args["first"].description = "Returns the first n elements from the list."
field.args["last"].description = "Returns the last n elements from the list."
field.args[
"before"
].description = (
"Returns the elements in the list that come before the specified cursor."
)
field.args[
"after"
].description = (
"Returns the elements in the list that come after the specified cursor."
)
|
def patch_pagination_args(field: DjangoConnectionField):
"""Add descriptions to pagination arguments in a connection field.
By default Graphene's connection fields comes without description for pagination
arguments. This functions patches those fields to add the descriptions.
"""
field.args["first"].description = "Returns the first n elements from the list."
field.args["last"].description = "Returns the last n elements from the list."
field.args[
"before"
].description = (
"Returns the elements in the list that come before the specified cursor."
)
field.args[
"after"
].description = (
"Return the elements in the list that come after the specified cursor."
)
|
13,872 |
def test_gif_fps_error(test_images, tmp_path):
im = iio.v3.imread(
test_images / "newtonscradle.gif",
plugin="pillow",
mode="L",
)
with pytest.raises(NotImplementedError, match="The keyword, `fps`"):
iio.v3.imwrite(
tmp_path / "test.gif",
im[..., 0],
plugin="pillow",
fps=60,
mode="L",
)
|
def test_gif_fps_error(test_images, tmp_path):
im = iio.v3.imread(
test_images / "newtonscradle.gif",
plugin="pillow",
mode="L",
)
with pytest.raises(TypeError, match="The keyword, `fps`"):
iio.v3.imwrite(
tmp_path / "test.gif",
im[..., 0],
plugin="pillow",
fps=60,
mode="L",
)
|
49,031 |
def deferred_to_future(d: Deferred) -> Future:
""" Wraps a Deferred into a Future. Requires the asyncio reactor.
"""
return d.asFuture(asyncio.get_event_loop())
|
def deferred_to_future(d: Deferred) -> Future:
"""Return an :class:`asyncio.Future` object that wraps *d*.
When :ref:`using the asyncio reactor <install-asyncio>`, you cannot await
on :class:`~twisted.internet.defer.Deferred` objects from :ref:`Scrapy
callables defined as coroutines <coroutine-support>`, you can only await on
``Future`` objects. Wrapping ``Deferred`` objects into ``Future`` objects
allows you to wait on them::
class MySpider(Spider):
...
async def parse(self, response):
d = treq.get('https://example.com/additional')
additional_response = await deferred_to_future(d)
"""
return d.asFuture(asyncio.get_event_loop())
|
30,321 |
def get_alarm_events(data_args):
id = data_args.get('alarm-id')
count = int(data_args.get('count'))
fields = data_args.get('fields')
show_log_message = data_args.get('get-log-message') == 'True'
res = http_request('GET', 'lr-drilldown-cache-api/drilldown/' + id)
res = res['Data']['DrillDownResults']['RuleBlocks']
events = []
for block in res:
if not block['DrillDownLogs']:
continue
logs = json.loads(block['DrillDownLogs'])
for log in logs:
fix_date_values(log)
if not show_log_message:
del log['logMessage']
events.append((log))
events = events[:count]
human_readable = tableToMarkdown('Events information for alarm ' + id, events)
if fields:
fields = string.split(fields, ',')
for event in events:
for key in event.keys():
if key not in fields:
del event[key]
ec = {"ID": int(id), "Event": events}
context = createContext(ec, removeNull=True)
outputs = {'Logrhythm.Alarm(val.ID === obj.ID)': context}
return_outputs(readable_output=human_readable, outputs=outputs, raw_response=res)
|
def get_alarm_events(data_args):
id = data_args.get('alarm-id')
count = int(data_args.get('count'))
fields = data_args.get('fields')
show_log_message = data_args.get('get-log-message') == 'True'
res = http_request('GET', 'lr-drilldown-cache-api/drilldown/' + id)
res = res['Data']['DrillDownResults']['RuleBlocks']
events = []
for block in res:
if not block.get('DrillDownLogs'):
continue
logs = json.loads(block['DrillDownLogs'])
for log in logs:
fix_date_values(log)
if not show_log_message:
del log['logMessage']
events.append((log))
events = events[:count]
human_readable = tableToMarkdown('Events information for alarm ' + id, events)
if fields:
fields = string.split(fields, ',')
for event in events:
for key in event.keys():
if key not in fields:
del event[key]
ec = {"ID": int(id), "Event": events}
context = createContext(ec, removeNull=True)
outputs = {'Logrhythm.Alarm(val.ID === obj.ID)': context}
return_outputs(readable_output=human_readable, outputs=outputs, raw_response=res)
|
7,405 |
def farid(image, mask=None, *, axis=None, mode='reflect', cval=0.0):
"""Find the edge magnitude using the Scharr transform.
Parameters
----------
image : array
The input image.
mask : array of bool, optional
Clip the output image to this mask. (Values where mask=0 will be set
to 0.)
axis : int or sequence of int, optional
Compute the edge filter along this axis. If not provided, the edge
magnitude is computed. This is defined as::
farid_mag = np.sqrt(sum([farid(image, axis=i)**2
for i in range(image.ndim)]) / image.ndim)
The magnitude is also computed if axis is a sequence.
mode : str or sequence of str, optional
The boundary mode for the convolution. See `scipy.ndimage.convolve`
for a description of the modes. This can be either a single boundary
mode or one boundary mode per axis.
cval : float, optional
When `mode` is ``'constant'``, this is the constant used in values
outside the boundary of the image data.
Returns
-------
output : array of float
The Scharr edge map.
See also
--------
farid_h, farid_v : horizontal and vertical edge detection.
scharr, sobel, prewitt, skimage.feature.canny
Notes
-----
Take the square root of the sum of the squares of the horizontal and
vertical derivatives to get a magnitude that is somewhat insensitive to
direction. Similar to the Scharr operator, this operator is designed with
a rotation invariance constraint.
References
----------
.. [1] Farid, H. and Simoncelli, E. P., "Differentiation of discrete
multidimensional signals", IEEE Transactions on Image Processing
13(4): 496-508, 2004. :DOI:`10.1109/TIP.2004.823819`
.. [2] Wikipedia, "Farid and Simoncelli Derivatives." Available at:
<https://en.wikipedia.org/wiki/Image_derivatives#Farid_and_Simoncelli_Derivatives>
Examples
--------
>>> from skimage import data
>>> camera = data.camera()
>>> from skimage import filters
>>> edges = filters.farid(camera)
"""
output = _generic_edge_filter(image, smooth_weights=farid_smooth,
edge_weights=farid_edge, axis=axis,
mode=mode, cval=cval)
output = _mask_filter_result(output, mask)
return output
|
def farid(image, mask=None, *, axis=None, mode='reflect', cval=0.0):
"""Find the edge magnitude using the Farid transform.
Parameters
----------
image : array
The input image.
mask : array of bool, optional
Clip the output image to this mask. (Values where mask=0 will be set
to 0.)
axis : int or sequence of int, optional
Compute the edge filter along this axis. If not provided, the edge
magnitude is computed. This is defined as::
farid_mag = np.sqrt(sum([farid(image, axis=i)**2
for i in range(image.ndim)]) / image.ndim)
The magnitude is also computed if axis is a sequence.
mode : str or sequence of str, optional
The boundary mode for the convolution. See `scipy.ndimage.convolve`
for a description of the modes. This can be either a single boundary
mode or one boundary mode per axis.
cval : float, optional
When `mode` is ``'constant'``, this is the constant used in values
outside the boundary of the image data.
Returns
-------
output : array of float
The Scharr edge map.
See also
--------
farid_h, farid_v : horizontal and vertical edge detection.
scharr, sobel, prewitt, skimage.feature.canny
Notes
-----
Take the square root of the sum of the squares of the horizontal and
vertical derivatives to get a magnitude that is somewhat insensitive to
direction. Similar to the Scharr operator, this operator is designed with
a rotation invariance constraint.
References
----------
.. [1] Farid, H. and Simoncelli, E. P., "Differentiation of discrete
multidimensional signals", IEEE Transactions on Image Processing
13(4): 496-508, 2004. :DOI:`10.1109/TIP.2004.823819`
.. [2] Wikipedia, "Farid and Simoncelli Derivatives." Available at:
<https://en.wikipedia.org/wiki/Image_derivatives#Farid_and_Simoncelli_Derivatives>
Examples
--------
>>> from skimage import data
>>> camera = data.camera()
>>> from skimage import filters
>>> edges = filters.farid(camera)
"""
output = _generic_edge_filter(image, smooth_weights=farid_smooth,
edge_weights=farid_edge, axis=axis,
mode=mode, cval=cval)
output = _mask_filter_result(output, mask)
return output
|
24,856 |
def my_func(self):
"""This is a docstring.
Returns:
bool: Always False
"""
return False
|
def my_func(self):
"""find_google_returns
Returns:
bool: Always False
"""
return False
|
4,252 |
def _set_sfreq(ft_struct):
"""Set the sample frequency."""
try:
sfreq = ft_struct['fsample']
except KeyError:
try:
time = ft_struct['time']
except KeyError:
raise ValueError('No Source for sfreq found')
else:
t1, t2 = float(time[0]), float(time[1])
sfreq = 1 / abs(t1 - t2)
try:
sfreq = float(sfreq)
except TypeError:
warn('FieldTrip structure contained multiple sample rates, trying the '
f'first of:\n{sfreq}')
sfreq = float(sfreq.ravel()[0])
return sfreq
|
def _set_sfreq(ft_struct):
"""Set the sample frequency."""
try:
sfreq = ft_struct['fsample']
except KeyError:
try:
time = ft_struct['time']
except KeyError:
raise ValueError('No Source for sfreq found')
else:
t1, t2 = float(time[0]), float(time[1])
sfreq = 1 / (t2 - t1)
try:
sfreq = float(sfreq)
except TypeError:
warn('FieldTrip structure contained multiple sample rates, trying the '
f'first of:\n{sfreq}')
sfreq = float(sfreq.ravel()[0])
return sfreq
|
53,790 |
def parse_html(content):
try:
# We need the reference to the HTMLParser later to get the document encoding,
# so we can't use the html5lib.parse convenience function here.
tb = html5lib.treebuilders.getTreeBuilder("etree")
p = html5lib.HTMLParser(tb, namespaceHTMLElements=False)
document = p.parse(content)
if not document:
# Could not parse
return content
for parent in document.findall(".//script/.."):
for script in parent.findall("script"):
replace_script(parent, script)
# Because html5lib parses like a browser, it will
# always create head and body tags if they are missing.
head = document.find("head")
SubElement(
head,
"script",
attrib={
"src": static(
"content/{filename}".format(filename=get_hashi_filename())
)
},
)
# Currently, html5lib strips the doctype, but it's important for correct rendering, so check the original
# content for the doctype and, if found, prepend it to the content serialized by html5lib
doctype = None
try:
encoding = p.documentEncoding
if not encoding:
encoding = "utf-8"
doctype = re.match("(?i)<!DOCTYPE[^<>]*(?:<!ENTITY[^<>]*>[^<>]*)?>", content.decode(encoding))
except:
# Losing the doctype could lead to some rendering issues, but they are usually not severe enough
# to be worth stopping the content from loading entirely.
pass
html = html5lib.serialize(
document,
quote_attr_values="always",
omit_optional_tags=False,
minimize_boolean_attributes=False,
use_trailing_solidus=True,
space_before_trailing_solidus=False,
)
if doctype:
html = doctype.group() + html
return html
except html5lib.html5parser.ParseError:
return content
|
def parse_html(content):
try:
# We need the reference to the HTMLParser later to get the document encoding,
# so we can't use the html5lib.parse convenience function here.
tree_builder = html5lib.treebuilders.getTreeBuilder("etree")
parser = html5lib.HTMLParser(tree_builder, namespaceHTMLElements=False)
document = p.parse(content)
if not document:
# Could not parse
return content
for parent in document.findall(".//script/.."):
for script in parent.findall("script"):
replace_script(parent, script)
# Because html5lib parses like a browser, it will
# always create head and body tags if they are missing.
head = document.find("head")
SubElement(
head,
"script",
attrib={
"src": static(
"content/{filename}".format(filename=get_hashi_filename())
)
},
)
# Currently, html5lib strips the doctype, but it's important for correct rendering, so check the original
# content for the doctype and, if found, prepend it to the content serialized by html5lib
doctype = None
try:
encoding = p.documentEncoding
if not encoding:
encoding = "utf-8"
doctype = re.match("(?i)<!DOCTYPE[^<>]*(?:<!ENTITY[^<>]*>[^<>]*)?>", content.decode(encoding))
except:
# Losing the doctype could lead to some rendering issues, but they are usually not severe enough
# to be worth stopping the content from loading entirely.
pass
html = html5lib.serialize(
document,
quote_attr_values="always",
omit_optional_tags=False,
minimize_boolean_attributes=False,
use_trailing_solidus=True,
space_before_trailing_solidus=False,
)
if doctype:
html = doctype.group() + html
return html
except html5lib.html5parser.ParseError:
return content
|
58,077 |
def entries_to_markdown(entry_list: List[str]):
"""
Args:
entry_list (List[str]): the _return_value array from demisto context
Returns:
str: a markdown table to be displayed on the layout.
"""
process_list = []
if not entry_list:
return ""
for entry in entry_list:
start_cpu = entry.find('CPU')
end_cpu = start_cpu + 5
start_memory = entry.find('Memory')
end_memory = start_memory + 8
process_list.append({
'Name': entry[6:start_cpu - 2],
'CPU': entry[end_cpu:start_memory - 2],
'Memory': entry[end_memory: len(entry)]
})
md = tableToMarkdown('', process_list, ['Name', 'CPU', 'Memory'])
return md
|
def entries_to_markdown(entry_list: List[str]):
"""
Args:
entry_list (List[str]): the _return_value array from demisto context
Returns:
str: a markdown table to be displayed on the layout.
"""
if not entry_list:
return ''
process_list = []
for entry in entry_list:
start_cpu = entry.find('CPU')
end_cpu = start_cpu + 5
start_memory = entry.find('Memory')
end_memory = start_memory + 8
process_list.append({
'Name': entry[6:start_cpu - 2],
'CPU': entry[end_cpu:start_memory - 2],
'Memory': entry[end_memory: len(entry)]
})
md = tableToMarkdown('', process_list, ['Name', 'CPU', 'Memory'])
return md
|
6,591 |
def get_data(filters):
data = []
# Validate if vat settings exist
company = filters.get('company')
if frappe.db.exists('KSA VAT Setting', company) is None:
url = get_url_to_list('KSA VAT Setting')
frappe.msgprint(f'Create <a href="{url}">KSA VAT Setting</a> for this company')
return data
ksa_vat_setting = frappe.get_doc('KSA VAT Setting', company)
# Sales Heading
append_data(data, 'VAT on Sales', '', '', '')
grand_total_taxable_amount = 0
grand_total_taxable_adjustment_amount = 0
grand_total_tax = 0
for vat_setting in ksa_vat_setting.ksa_vat_sales_accounts:
total_taxable_amount, total_taxable_adjustment_amount, \
total_tax = get_tax_data_for_each_vat_setting(vat_setting, filters, 'Sales Invoice')
# Adding results to data
append_data(data, vat_setting.title, total_taxable_amount,
total_taxable_adjustment_amount, total_tax)
grand_total_taxable_amount += total_taxable_amount
grand_total_taxable_adjustment_amount += total_taxable_adjustment_amount
grand_total_tax += total_tax
# Sales Grand Total
append_data(data, 'Grand Total', grand_total_taxable_amount,
grand_total_taxable_adjustment_amount, grand_total_tax )
# Blank Line
append_data(data, '', '', '', '')
# Purchase Heading
append_data(data, 'VAT on Purchases', '', '', '')
grand_total_taxable_amount = 0
grand_total_taxable_adjustment_amount = 0
grand_total_tax = 0
for vat_setting in ksa_vat_setting.ksa_vat_purchase_accounts:
total_taxable_amount, total_taxable_adjustment_amount, \
total_tax = get_tax_data_for_each_vat_setting(vat_setting, filters, 'Purchase Invoice')
# Adding results to data
append_data(data, vat_setting.title, total_taxable_amount,
total_taxable_adjustment_amount, total_tax)
grand_total_taxable_amount += total_taxable_amount
grand_total_taxable_adjustment_amount += total_taxable_adjustment_amount
grand_total_tax += total_tax
# Purchase Grand Total
append_data(data, 'Grand Total', grand_total_taxable_amount,
grand_total_taxable_adjustment_amount, grand_total_tax )
return data
|
def get_data(filters):
data = []
# Validate if vat settings exist
company = filters.get('company')
if frappe.db.exists('KSA VAT Setting', company) is None:
url = get_url_to_list('KSA VAT Setting')
frappe.msgprint(_('Create <a href="{}">KSA VAT Setting</a> for this company').format(url))
return data
ksa_vat_setting = frappe.get_doc('KSA VAT Setting', company)
# Sales Heading
append_data(data, 'VAT on Sales', '', '', '')
grand_total_taxable_amount = 0
grand_total_taxable_adjustment_amount = 0
grand_total_tax = 0
for vat_setting in ksa_vat_setting.ksa_vat_sales_accounts:
total_taxable_amount, total_taxable_adjustment_amount, \
total_tax = get_tax_data_for_each_vat_setting(vat_setting, filters, 'Sales Invoice')
# Adding results to data
append_data(data, vat_setting.title, total_taxable_amount,
total_taxable_adjustment_amount, total_tax)
grand_total_taxable_amount += total_taxable_amount
grand_total_taxable_adjustment_amount += total_taxable_adjustment_amount
grand_total_tax += total_tax
# Sales Grand Total
append_data(data, 'Grand Total', grand_total_taxable_amount,
grand_total_taxable_adjustment_amount, grand_total_tax )
# Blank Line
append_data(data, '', '', '', '')
# Purchase Heading
append_data(data, 'VAT on Purchases', '', '', '')
grand_total_taxable_amount = 0
grand_total_taxable_adjustment_amount = 0
grand_total_tax = 0
for vat_setting in ksa_vat_setting.ksa_vat_purchase_accounts:
total_taxable_amount, total_taxable_adjustment_amount, \
total_tax = get_tax_data_for_each_vat_setting(vat_setting, filters, 'Purchase Invoice')
# Adding results to data
append_data(data, vat_setting.title, total_taxable_amount,
total_taxable_adjustment_amount, total_tax)
grand_total_taxable_amount += total_taxable_amount
grand_total_taxable_adjustment_amount += total_taxable_adjustment_amount
grand_total_tax += total_tax
# Purchase Grand Total
append_data(data, 'Grand Total', grand_total_taxable_amount,
grand_total_taxable_adjustment_amount, grand_total_tax )
return data
|
14,073 |
def geom_equals_mask(this, that):
"""
Test for geometric equality. Empty or missing geometries are considered
equal.
Parameters
----------
this, that : arrays of Geo objects (or anything that has an `is_empty`
attribute)
Returns
-------
Series
boolean Series, True if geometries in left equal geometries in right
"""
return (
this.geom_equals(that)
| (this.is_empty & that.is_empty)
| (_isna(this) & _isna(that))
)
|
def _geom_equals_mask(this, that):
"""
Test for geometric equality. Empty or missing geometries are considered
equal.
Parameters
----------
this, that : arrays of Geo objects (or anything that has an `is_empty`
attribute)
Returns
-------
Series
boolean Series, True if geometries in left equal geometries in right
"""
return (
this.geom_equals(that)
| (this.is_empty & that.is_empty)
| (_isna(this) & _isna(that))
)
|
17,471 |
def test_xarray_ufuncs_deprecation():
with pytest.warns(DeprecationWarning, match="xarray.ufuncs"):
xu.cos(xr.DataArray([0, 1]))
with pytest.warns(None) as record:
xu.angle(xr.DataArray([0, 1]))
assert len(record) == 0
|
def test_xarray_ufuncs_deprecation():
with pytest.warns(FutureWarning, match="xarray.ufuncs"):
xu.cos(xr.DataArray([0, 1]))
with pytest.warns(None) as record:
xu.angle(xr.DataArray([0, 1]))
assert len(record) == 0
|
5,704 |
def minimize(fun, x0, args=(), method=None, jac=None, hess=None,
hessp=None, bounds=None, constraints=(), tol=None,
callback=None, options=None):
"""Minimization of scalar function of one or more variables.
Parameters
----------
fun : callable
The objective function to be minimized.
``fun(x, *args) -> float``
where ``x`` is an 1-D array with shape (n,) and ``args``
is a tuple of the fixed parameters needed to completely
specify the function.
x0 : ndarray, shape (n,)
Initial guess. Array of real elements of size (n,),
where 'n' is the number of independent variables.
args : tuple, optional
Extra arguments passed to the objective function and its
derivatives (`fun`, `jac` and `hess` functions).
method : str or callable, optional
Type of solver. Should be one of
- 'Nelder-Mead' :ref:`(see here) <optimize.minimize-neldermead>`
- 'Powell' :ref:`(see here) <optimize.minimize-powell>`
- 'CG' :ref:`(see here) <optimize.minimize-cg>`
- 'BFGS' :ref:`(see here) <optimize.minimize-bfgs>`
- 'Newton-CG' :ref:`(see here) <optimize.minimize-newtoncg>`
- 'L-BFGS-B' :ref:`(see here) <optimize.minimize-lbfgsb>`
- 'TNC' :ref:`(see here) <optimize.minimize-tnc>`
- 'COBYLA' :ref:`(see here) <optimize.minimize-cobyla>`
- 'SLSQP' :ref:`(see here) <optimize.minimize-slsqp>`
- 'trust-constr':ref:`(see here) <optimize.minimize-trustconstr>`
- 'dogleg' :ref:`(see here) <optimize.minimize-dogleg>`
- 'trust-ncg' :ref:`(see here) <optimize.minimize-trustncg>`
- 'trust-exact' :ref:`(see here) <optimize.minimize-trustexact>`
- 'trust-krylov' :ref:`(see here) <optimize.minimize-trustkrylov>`
- custom - a callable object (added in version 0.14.0),
see below for description.
If not given, chosen to be one of ``BFGS``, ``L-BFGS-B``, ``SLSQP``,
depending if the problem has constraints or bounds.
jac : {callable, '2-point', '3-point', 'cs', bool}, optional
Method for computing the gradient vector. Only for CG, BFGS,
Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg, trust-krylov,
trust-exact and trust-constr.
If it is a callable, it should be a function that returns the gradient
vector:
``jac(x, *args) -> array_like, shape (n,)``
where ``x`` is an array with shape (n,) and ``args`` is a tuple with
the fixed parameters. If `jac` is a Boolean and is True, `fun` is
assumed to return and objective and gradient as an ``(f, g)`` tuple.
Methods 'Newton-CG', 'trust-ncg', 'dogleg', 'trust-exact', and
'trust-krylov' require that either a callable be supplied, or that
`fun` return the objective and gradient.
If None or False, the gradient will be estimated using 2-point finite
difference estimation with an absolute step size.
Alternatively, the keywords {'2-point', '3-point', 'cs'} can be used
to select a finite difference scheme for numerical estimation of the
gradient with a relative step size. These finite difference schemes
obey any specified `bounds`.
hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy}, optional
Method for computing the Hessian matrix. Only for Newton-CG, dogleg,
trust-ncg, trust-krylov, trust-exact and trust-constr. If it is
callable, it should return the Hessian matrix:
``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)``
where x is a (n,) ndarray and `args` is a tuple with the fixed
parameters. LinearOperator and sparse matrix returns are only allowed
for 'trust-constr' method. Alternatively, the keywords
{'2-point', '3-point', 'cs'} select a finite difference scheme
for numerical estimation. Or, objects implementing the
`HessianUpdateStrategy` interface can be used to approximate
the Hessian. Available quasi-Newton methods implementing
this interface are:
- `BFGS`;
- `SR1`.
Whenever the gradient is estimated via finite-differences,
the Hessian cannot be estimated with options
{'2-point', '3-point', 'cs'} and needs to be
estimated using one of the quasi-Newton strategies.
'trust-exact' cannot use a finite-difference scheme, and must be used
with a callable returning an (n, n) array.
hessp : callable, optional
Hessian of objective function times an arbitrary vector p. Only for
Newton-CG, trust-ncg, trust-krylov, trust-constr.
Only one of `hessp` or `hess` needs to be given. If `hess` is
provided, then `hessp` will be ignored. `hessp` must compute the
Hessian times an arbitrary vector:
``hessp(x, p, *args) -> ndarray shape (n,)``
where x is a (n,) ndarray, p is an arbitrary vector with
dimension (n,) and `args` is a tuple with the fixed
parameters.
bounds : sequence or `Bounds`, optional
Bounds on variables for L-BFGS-B, TNC, SLSQP, Powell, and
trust-constr methods. There are two ways to specify the bounds:
1. Instance of `Bounds` class.
2. Sequence of ``(min, max)`` pairs for each element in `x`. None
is used to specify no bound.
constraints : {Constraint, dict} or List of {Constraint, dict}, optional
Constraints definition (only for COBYLA, SLSQP and trust-constr).
Constraints for 'trust-constr' are defined as a single object or a
list of objects specifying constraints to the optimization problem.
Available constraints are:
- `LinearConstraint`
- `NonlinearConstraint`
Constraints for COBYLA, SLSQP are defined as a list of dictionaries.
Each dictionary with fields:
type : str
Constraint type: 'eq' for equality, 'ineq' for inequality.
fun : callable
The function defining the constraint.
jac : callable, optional
The Jacobian of `fun` (only for SLSQP).
args : sequence, optional
Extra arguments to be passed to the function and Jacobian.
Equality constraint means that the constraint function result is to
be zero whereas inequality means that it is to be non-negative.
Note that COBYLA only supports inequality constraints.
tol : float, optional
Tolerance for termination. When `tol` is specified, the selected
minimization algorithm sets some relevant solver-specific tolerance(s)
equal to `tol`. For detailed control, use solver-specific
options.
options : dict, optional
A dictionary of solver options. All methods accept the following
generic options:
maxiter : int
Maximum number of iterations to perform. Depending on the
method each iteration may use several function evaluations.
disp : bool
Set to True to print convergence messages.
For method-specific options, see :func:`show_options()`.
callback : callable, optional
Called after each iteration. For 'trust-constr' it is a callable with
the signature:
``callback(xk, OptimizeResult state) -> bool``
where ``xk`` is the current parameter vector. and ``state``
is an `OptimizeResult` object, with the same fields
as the ones from the return. If callback returns True
the algorithm execution is terminated.
For all the other methods, the signature is:
``callback(xk)``
where ``xk`` is the current parameter vector.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes.
See also
--------
minimize_scalar : Interface to minimization algorithms for scalar
univariate functions
show_options : Additional options accepted by the solvers
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter. The default method is *BFGS*.
**Unconstrained minimization**
Method :ref:`Nelder-Mead <optimize.minimize-neldermead>` uses the
Simplex algorithm [1]_, [2]_. This algorithm is robust in many
applications. However, if numerical computation of derivative can be
trusted, other algorithms using the first and/or second derivatives
information might be preferred for their better performance in
general.
Method :ref:`CG <optimize.minimize-cg>` uses a nonlinear conjugate
gradient algorithm by Polak and Ribiere, a variant of the
Fletcher-Reeves method described in [5]_ pp.120-122. Only the
first derivatives are used.
Method :ref:`BFGS <optimize.minimize-bfgs>` uses the quasi-Newton
method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) [5]_
pp. 136. It uses the first derivatives only. BFGS has proven good
performance even for non-smooth optimizations. This method also
returns an approximation of the Hessian inverse, stored as
`hess_inv` in the OptimizeResult object.
Method :ref:`Newton-CG <optimize.minimize-newtoncg>` uses a
Newton-CG algorithm [5]_ pp. 168 (also known as the truncated
Newton method). It uses a CG method to the compute the search
direction. See also *TNC* method for a box-constrained
minimization with a similar algorithm. Suitable for large-scale
problems.
Method :ref:`dogleg <optimize.minimize-dogleg>` uses the dog-leg
trust-region algorithm [5]_ for unconstrained minimization. This
algorithm requires the gradient and Hessian; furthermore the
Hessian is required to be positive definite.
Method :ref:`trust-ncg <optimize.minimize-trustncg>` uses the
Newton conjugate gradient trust-region algorithm [5]_ for
unconstrained minimization. This algorithm requires the gradient
and either the Hessian or a function that computes the product of
the Hessian with a given vector. Suitable for large-scale problems.
Method :ref:`trust-krylov <optimize.minimize-trustkrylov>` uses
the Newton GLTR trust-region algorithm [14]_, [15]_ for unconstrained
minimization. This algorithm requires the gradient
and either the Hessian or a function that computes the product of
the Hessian with a given vector. Suitable for large-scale problems.
On indefinite problems it requires usually less iterations than the
`trust-ncg` method and is recommended for medium and large-scale problems.
Method :ref:`trust-exact <optimize.minimize-trustexact>`
is a trust-region method for unconstrained minimization in which
quadratic subproblems are solved almost exactly [13]_. This
algorithm requires the gradient and the Hessian (which is
*not* required to be positive definite). It is, in many
situations, the Newton method to converge in fewer iteraction
and the most recommended for small and medium-size problems.
**Bound-Constrained minimization**
Method :ref:`L-BFGS-B <optimize.minimize-lbfgsb>` uses the L-BFGS-B
algorithm [6]_, [7]_ for bound constrained minimization.
Method :ref:`Powell <optimize.minimize-powell>` is a modification
of Powell's method [3]_, [4]_ which is a conjugate direction
method. It performs sequential one-dimensional minimizations along
each vector of the directions set (`direc` field in `options` and
`info`), which is updated at each iteration of the main
minimization loop. The function need not be differentiable, and no
derivatives are taken. If bounds are not provided, then an
unbounded line search will be used. If bounds are provided and
the initial guess is within the bounds, then every function
evaluation throughout the minimization procedure will be within
the bounds. If bounds are provided, the initial guess is outside
the bounds, and `direc` is full rank (default has full rank), then
some function evaluations during the first iteration may be
outside the bounds, but every function evaluation after the first
iteration will be within the bounds. If `direc` is not full rank,
then some parameters may not be optimized and the solution is not
guaranteed to be within the bounds.
Method :ref:`TNC <optimize.minimize-tnc>` uses a truncated Newton
algorithm [5]_, [8]_ to minimize a function with variables subject
to bounds. This algorithm uses gradient information; it is also
called Newton Conjugate-Gradient. It differs from the *Newton-CG*
method described above as it wraps a C implementation and allows
each variable to be given upper and lower bounds.
**Constrained Minimization**
Method :ref:`COBYLA <optimize.minimize-cobyla>` uses the
Constrained Optimization BY Linear Approximation (COBYLA) method
[9]_, [10]_, [11]_. The algorithm is based on linear
approximations to the objective function and each constraint. The
method wraps a FORTRAN implementation of the algorithm. The
constraints functions 'fun' may return either a single number
or an array or list of numbers.
Method :ref:`SLSQP <optimize.minimize-slsqp>` uses Sequential
Least SQuares Programming to minimize a function of several
variables with any combination of bounds, equality and inequality
constraints. The method wraps the SLSQP Optimization subroutine
originally implemented by Dieter Kraft [12]_. Note that the
wrapper handles infinite values in bounds by converting them into
large floating values.
Method :ref:`trust-constr <optimize.minimize-trustconstr>` is a
trust-region algorithm for constrained optimization. It swiches
between two implementations depending on the problem definition.
It is the most versatile constrained minimization algorithm
implemented in SciPy and the most appropriate for large-scale problems.
For equality constrained problems it is an implementation of Byrd-Omojokun
Trust-Region SQP method described in [17]_ and in [5]_, p. 549. When
inequality constraints are imposed as well, it swiches to the trust-region
interior point method described in [16]_. This interior point algorithm,
in turn, solves inequality constraints by introducing slack variables
and solving a sequence of equality-constrained barrier problems
for progressively smaller values of the barrier parameter.
The previously described equality constrained SQP method is
used to solve the subproblems with increasing levels of accuracy
as the iterate gets closer to a solution.
**Finite-Difference Options**
For Method :ref:`trust-constr <optimize.minimize-trustconstr>`
the gradient and the Hessian may be approximated using
three finite-difference schemes: {'2-point', '3-point', 'cs'}.
The scheme 'cs' is, potentially, the most accurate but it
requires the function to correctly handles complex inputs and to
be differentiable in the complex plane. The scheme '3-point' is more
accurate than '2-point' but requires twice as many operations.
**Custom minimizers**
It may be useful to pass a custom minimization method, for example
when using a frontend to this method such as `scipy.optimize.basinhopping`
or a different library. You can simply pass a callable as the ``method``
parameter.
The callable is called as ``method(fun, x0, args, **kwargs, **options)``
where ``kwargs`` corresponds to any other parameters passed to `minimize`
(such as `callback`, `hess`, etc.), except the `options` dict, which has
its contents also passed as `method` parameters pair by pair. Also, if
`jac` has been passed as a bool type, `jac` and `fun` are mangled so that
`fun` returns just the function values and `jac` is converted to a function
returning the Jacobian. The method shall return an `OptimizeResult`
object.
The provided `method` callable must be able to accept (and possibly ignore)
arbitrary parameters; the set of parameters accepted by `minimize` may
expand in future versions and then these parameters will be passed to
the method. You can find an example in the scipy.optimize tutorial.
.. versionadded:: 0.11.0
References
----------
.. [1] Nelder, J A, and R Mead. 1965. A Simplex Method for Function
Minimization. The Computer Journal 7: 308-13.
.. [2] Wright M H. 1996. Direct search methods: Once scorned, now
respectable, in Numerical Analysis 1995: Proceedings of the 1995
Dundee Biennial Conference in Numerical Analysis (Eds. D F
Griffiths and G A Watson). Addison Wesley Longman, Harlow, UK.
191-208.
.. [3] Powell, M J D. 1964. An efficient method for finding the minimum of
a function of several variables without calculating derivatives. The
Computer Journal 7: 155-162.
.. [4] Press W, S A Teukolsky, W T Vetterling and B P Flannery.
Numerical Recipes (any edition), Cambridge University Press.
.. [5] Nocedal, J, and S J Wright. 2006. Numerical Optimization.
Springer New York.
.. [6] Byrd, R H and P Lu and J. Nocedal. 1995. A Limited Memory
Algorithm for Bound Constrained Optimization. SIAM Journal on
Scientific and Statistical Computing 16 (5): 1190-1208.
.. [7] Zhu, C and R H Byrd and J Nocedal. 1997. L-BFGS-B: Algorithm
778: L-BFGS-B, FORTRAN routines for large scale bound constrained
optimization. ACM Transactions on Mathematical Software 23 (4):
550-560.
.. [8] Nash, S G. Newton-Type Minimization Via the Lanczos Method.
1984. SIAM Journal of Numerical Analysis 21: 770-778.
.. [9] Powell, M J D. A direct search optimization method that models
the objective and constraint functions by linear interpolation.
1994. Advances in Optimization and Numerical Analysis, eds. S. Gomez
and J-P Hennart, Kluwer Academic (Dordrecht), 51-67.
.. [10] Powell M J D. Direct search algorithms for optimization
calculations. 1998. Acta Numerica 7: 287-336.
.. [11] Powell M J D. A view of algorithms for optimization without
derivatives. 2007.Cambridge University Technical Report DAMTP
2007/NA03
.. [12] Kraft, D. A software package for sequential quadratic
programming. 1988. Tech. Rep. DFVLR-FB 88-28, DLR German Aerospace
Center -- Institute for Flight Mechanics, Koln, Germany.
.. [13] Conn, A. R., Gould, N. I., and Toint, P. L.
Trust region methods. 2000. Siam. pp. 169-200.
.. [14] F. Lenders, C. Kirches, A. Potschka: "trlib: A vector-free
implementation of the GLTR method for iterative solution of
the trust region problem", :arxiv:`1611.04718`
.. [15] N. Gould, S. Lucidi, M. Roma, P. Toint: "Solving the
Trust-Region Subproblem using the Lanczos Method",
SIAM J. Optim., 9(2), 504--525, (1999).
.. [16] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. 1999.
An interior point algorithm for large-scale nonlinear programming.
SIAM Journal on Optimization 9.4: 877-900.
.. [17] Lalee, Marucha, Jorge Nocedal, and Todd Plantega. 1998. On the
implementation of an algorithm for large-scale equality constrained
optimization. SIAM Journal on Optimization 8.3: 682-706.
Examples
--------
Let us consider the problem of minimizing the Rosenbrock function. This
function (and its respective derivatives) is implemented in `rosen`
(resp. `rosen_der`, `rosen_hess`) in the `scipy.optimize`.
>>> from scipy.optimize import minimize, rosen, rosen_der
A simple application of the *Nelder-Mead* method is:
>>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
>>> res = minimize(rosen, x0, method='Nelder-Mead', tol=1e-6)
>>> res.x
array([ 1., 1., 1., 1., 1.])
Now using the *BFGS* algorithm, using the first derivative and a few
options:
>>> res = minimize(rosen, x0, method='BFGS', jac=rosen_der,
... options={'gtol': 1e-6, 'disp': True})
Optimization terminated successfully.
Current function value: 0.000000
Iterations: 26
Function evaluations: 31
Gradient evaluations: 31
>>> res.x
array([ 1., 1., 1., 1., 1.])
>>> print(res.message)
Optimization terminated successfully.
>>> res.hess_inv
array([[ 0.00749589, 0.01255155, 0.02396251, 0.04750988, 0.09495377], # may vary
[ 0.01255155, 0.02510441, 0.04794055, 0.09502834, 0.18996269],
[ 0.02396251, 0.04794055, 0.09631614, 0.19092151, 0.38165151],
[ 0.04750988, 0.09502834, 0.19092151, 0.38341252, 0.7664427 ],
[ 0.09495377, 0.18996269, 0.38165151, 0.7664427, 1.53713523]])
Next, consider a minimization problem with several constraints (namely
Example 16.4 from [5]_). The objective function is:
>>> fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2
There are three constraints defined as:
>>> cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
... {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6},
... {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2})
And variables must be positive, hence the following bounds:
>>> bnds = ((0, None), (0, None))
The optimization problem is solved using the SLSQP method as:
>>> res = minimize(fun, (2, 0), method='SLSQP', bounds=bnds,
... constraints=cons)
It should converge to the theoretical solution (1.4 ,1.7).
"""
x0 = np.asarray(x0)
if x0.dtype.kind in np.typecodes["AllInteger"]:
x0 = np.asarray(x0, dtype=float)
if not isinstance(args, tuple):
args = (args,)
if method is None:
# Select automatically
if constraints:
method = 'SLSQP'
elif bounds is not None:
method = 'L-BFGS-B'
else:
method = 'BFGS'
if callable(method):
meth = "_custom"
else:
meth = method.lower()
if options is None:
options = {}
# check if optional parameters are supported by the selected method
# - jac
if meth in ('nelder-mead', 'powell', 'cobyla') and bool(jac):
warn('Method %s does not use gradient information (jac).' % method,
RuntimeWarning)
# - hess
if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-constr',
'trust-krylov', 'trust-exact', '_custom') and hess is not None:
warn('Method %s does not use Hessian information (hess).' % method,
RuntimeWarning)
# - hessp
if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-constr',
'trust-krylov', '_custom') \
and hessp is not None:
warn('Method %s does not use Hessian-vector product '
'information (hessp).' % method, RuntimeWarning)
# - constraints or bounds
if (meth in ('nelder-mead', 'cg', 'bfgs', 'newton-cg', 'dogleg',
'trust-ncg') and (bounds is not None or np.any(constraints))):
warn('Method %s cannot handle constraints nor bounds.' % method,
RuntimeWarning)
if meth in ('l-bfgs-b', 'tnc', 'powell') and np.any(constraints):
warn('Method %s cannot handle constraints.' % method,
RuntimeWarning)
if meth == 'cobyla' and bounds is not None:
warn('Method %s cannot handle bounds.' % method,
RuntimeWarning)
# - callback
if (meth in ('cobyla',) and callback is not None):
warn('Method %s does not support callback.' % method, RuntimeWarning)
# - return_all
if (meth in ('l-bfgs-b', 'tnc', 'cobyla', 'slsqp') and
options.get('return_all', False)):
warn('Method %s does not support the return_all option.' % method,
RuntimeWarning)
# check gradient vector
if callable(jac):
pass
elif jac is True:
# fun returns func and grad
fun = MemoizeJac(fun)
jac = fun.derivative
elif (jac in FD_METHODS and
meth in ['trust-constr', 'bfgs', 'cg', 'l-bfgs-b', 'tnc', 'slsqp']):
# finite differences with relative step
pass
elif meth in ['trust-constr']:
# default jac calculation for this method
jac = '2-point'
elif jac is None or bool(jac) is False:
# this will cause e.g. LBFGS to use forward difference, absolute step
jac = None
else:
# default if jac option is not understood
jac = None
# set default tolerances
if tol is not None:
options = dict(options)
if meth == 'nelder-mead':
options.setdefault('xatol', tol)
options.setdefault('fatol', tol)
if meth in ('newton-cg', 'powell', 'tnc'):
options.setdefault('xtol', tol)
if meth in ('powell', 'l-bfgs-b', 'tnc', 'slsqp'):
options.setdefault('ftol', tol)
if meth in ('bfgs', 'cg', 'l-bfgs-b', 'tnc', 'dogleg',
'trust-ncg', 'trust-exact', 'trust-krylov'):
options.setdefault('gtol', tol)
if meth in ('cobyla', '_custom'):
options.setdefault('tol', tol)
if meth == 'trust-constr':
options.setdefault('xtol', tol)
options.setdefault('gtol', tol)
options.setdefault('barrier_tol', tol)
if meth == '_custom':
# custom method called before bounds and constraints are 'standardised'
# custom method should be able to accept whatever bounds/constraints
# are provided to it.
return method(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp,
bounds=bounds, constraints=constraints,
callback=callback, **options)
if bounds is not None:
bounds = standardize_bounds(bounds, x0, meth)
if constraints is not None:
constraints = standardize_constraints(constraints, x0, meth)
if meth == 'nelder-mead':
return _minimize_neldermead(fun, x0, args, callback, **options)
elif meth == 'powell':
return _minimize_powell(fun, x0, args, callback, bounds, **options)
elif meth == 'cg':
return _minimize_cg(fun, x0, args, jac, callback, **options)
elif meth == 'bfgs':
return _minimize_bfgs(fun, x0, args, jac, callback, **options)
elif meth == 'newton-cg':
return _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback,
**options)
elif meth == 'l-bfgs-b':
return _minimize_lbfgsb(fun, x0, args, jac, bounds,
callback=callback, **options)
elif meth == 'tnc':
return _minimize_tnc(fun, x0, args, jac, bounds, callback=callback,
**options)
elif meth == 'cobyla':
return _minimize_cobyla(fun, x0, args, constraints, **options)
elif meth == 'slsqp':
return _minimize_slsqp(fun, x0, args, jac, bounds,
constraints, callback=callback, **options)
elif meth == 'trust-constr':
return _minimize_trustregion_constr(fun, x0, args, jac, hess, hessp,
bounds, constraints,
callback=callback, **options)
elif meth == 'dogleg':
return _minimize_dogleg(fun, x0, args, jac, hess,
callback=callback, **options)
elif meth == 'trust-ncg':
return _minimize_trust_ncg(fun, x0, args, jac, hess, hessp,
callback=callback, **options)
elif meth == 'trust-krylov':
return _minimize_trust_krylov(fun, x0, args, jac, hess, hessp,
callback=callback, **options)
elif meth == 'trust-exact':
return _minimize_trustregion_exact(fun, x0, args, jac, hess,
callback=callback, **options)
else:
raise ValueError('Unknown solver %s' % method)
|
def minimize(fun, x0, args=(), method=None, jac=None, hess=None,
hessp=None, bounds=None, constraints=(), tol=None,
callback=None, options=None):
"""Minimization of scalar function of one or more variables.
Parameters
----------
fun : callable
The objective function to be minimized.
``fun(x, *args) -> float``
where ``x`` is an 1-D array with shape (n,) and ``args``
is a tuple of the fixed parameters needed to completely
specify the function.
x0 : ndarray, shape (n,)
Initial guess. Array of real elements of size (n,),
where 'n' is the number of independent variables.
args : tuple, optional
Extra arguments passed to the objective function and its
derivatives (`fun`, `jac` and `hess` functions).
method : str or callable, optional
Type of solver. Should be one of
- 'Nelder-Mead' :ref:`(see here) <optimize.minimize-neldermead>`
- 'Powell' :ref:`(see here) <optimize.minimize-powell>`
- 'CG' :ref:`(see here) <optimize.minimize-cg>`
- 'BFGS' :ref:`(see here) <optimize.minimize-bfgs>`
- 'Newton-CG' :ref:`(see here) <optimize.minimize-newtoncg>`
- 'L-BFGS-B' :ref:`(see here) <optimize.minimize-lbfgsb>`
- 'TNC' :ref:`(see here) <optimize.minimize-tnc>`
- 'COBYLA' :ref:`(see here) <optimize.minimize-cobyla>`
- 'SLSQP' :ref:`(see here) <optimize.minimize-slsqp>`
- 'trust-constr':ref:`(see here) <optimize.minimize-trustconstr>`
- 'dogleg' :ref:`(see here) <optimize.minimize-dogleg>`
- 'trust-ncg' :ref:`(see here) <optimize.minimize-trustncg>`
- 'trust-exact' :ref:`(see here) <optimize.minimize-trustexact>`
- 'trust-krylov' :ref:`(see here) <optimize.minimize-trustkrylov>`
- custom - a callable object (added in version 0.14.0),
see below for description.
If not given, chosen to be one of ``BFGS``, ``L-BFGS-B``, ``SLSQP``,
depending if the problem has constraints or bounds.
jac : {callable, '2-point', '3-point', 'cs', bool}, optional
Method for computing the gradient vector. Only for CG, BFGS,
Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg, trust-krylov,
trust-exact and trust-constr.
If it is a callable, it should be a function that returns the gradient
vector:
``jac(x, *args) -> array_like, shape (n,)``
where ``x`` is an array with shape (n,) and ``args`` is a tuple with
the fixed parameters. If `jac` is a Boolean and is True, `fun` is
assumed to return and objective and gradient as an ``(f, g)`` tuple.
Methods 'Newton-CG', 'trust-ncg', 'dogleg', 'trust-exact', and
'trust-krylov' require that either a callable be supplied, or that
`fun` return the objective and gradient.
If None or False, the gradient will be estimated using 2-point finite
difference estimation with an absolute step size.
Alternatively, the keywords {'2-point', '3-point', 'cs'} can be used
to select a finite difference scheme for numerical estimation of the
gradient with a relative step size. These finite difference schemes
obey any specified `bounds`.
hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy}, optional
Method for computing the Hessian matrix. Only for Newton-CG, dogleg,
trust-ncg, trust-krylov, trust-exact and trust-constr. If it is
callable, it should return the Hessian matrix:
``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)``
where x is a (n,) ndarray and `args` is a tuple with the fixed
parameters. `~scipy.sparse.linalg.LinearOperator` and sparse matrix returns are only allowed
for 'trust-constr' method. Alternatively, the keywords
{'2-point', '3-point', 'cs'} select a finite difference scheme
for numerical estimation. Or, objects implementing the
`HessianUpdateStrategy` interface can be used to approximate
the Hessian. Available quasi-Newton methods implementing
this interface are:
- `BFGS`;
- `SR1`.
Whenever the gradient is estimated via finite-differences,
the Hessian cannot be estimated with options
{'2-point', '3-point', 'cs'} and needs to be
estimated using one of the quasi-Newton strategies.
'trust-exact' cannot use a finite-difference scheme, and must be used
with a callable returning an (n, n) array.
hessp : callable, optional
Hessian of objective function times an arbitrary vector p. Only for
Newton-CG, trust-ncg, trust-krylov, trust-constr.
Only one of `hessp` or `hess` needs to be given. If `hess` is
provided, then `hessp` will be ignored. `hessp` must compute the
Hessian times an arbitrary vector:
``hessp(x, p, *args) -> ndarray shape (n,)``
where x is a (n,) ndarray, p is an arbitrary vector with
dimension (n,) and `args` is a tuple with the fixed
parameters.
bounds : sequence or `Bounds`, optional
Bounds on variables for L-BFGS-B, TNC, SLSQP, Powell, and
trust-constr methods. There are two ways to specify the bounds:
1. Instance of `Bounds` class.
2. Sequence of ``(min, max)`` pairs for each element in `x`. None
is used to specify no bound.
constraints : {Constraint, dict} or List of {Constraint, dict}, optional
Constraints definition (only for COBYLA, SLSQP and trust-constr).
Constraints for 'trust-constr' are defined as a single object or a
list of objects specifying constraints to the optimization problem.
Available constraints are:
- `LinearConstraint`
- `NonlinearConstraint`
Constraints for COBYLA, SLSQP are defined as a list of dictionaries.
Each dictionary with fields:
type : str
Constraint type: 'eq' for equality, 'ineq' for inequality.
fun : callable
The function defining the constraint.
jac : callable, optional
The Jacobian of `fun` (only for SLSQP).
args : sequence, optional
Extra arguments to be passed to the function and Jacobian.
Equality constraint means that the constraint function result is to
be zero whereas inequality means that it is to be non-negative.
Note that COBYLA only supports inequality constraints.
tol : float, optional
Tolerance for termination. When `tol` is specified, the selected
minimization algorithm sets some relevant solver-specific tolerance(s)
equal to `tol`. For detailed control, use solver-specific
options.
options : dict, optional
A dictionary of solver options. All methods accept the following
generic options:
maxiter : int
Maximum number of iterations to perform. Depending on the
method each iteration may use several function evaluations.
disp : bool
Set to True to print convergence messages.
For method-specific options, see :func:`show_options()`.
callback : callable, optional
Called after each iteration. For 'trust-constr' it is a callable with
the signature:
``callback(xk, OptimizeResult state) -> bool``
where ``xk`` is the current parameter vector. and ``state``
is an `OptimizeResult` object, with the same fields
as the ones from the return. If callback returns True
the algorithm execution is terminated.
For all the other methods, the signature is:
``callback(xk)``
where ``xk`` is the current parameter vector.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes.
See also
--------
minimize_scalar : Interface to minimization algorithms for scalar
univariate functions
show_options : Additional options accepted by the solvers
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter. The default method is *BFGS*.
**Unconstrained minimization**
Method :ref:`Nelder-Mead <optimize.minimize-neldermead>` uses the
Simplex algorithm [1]_, [2]_. This algorithm is robust in many
applications. However, if numerical computation of derivative can be
trusted, other algorithms using the first and/or second derivatives
information might be preferred for their better performance in
general.
Method :ref:`CG <optimize.minimize-cg>` uses a nonlinear conjugate
gradient algorithm by Polak and Ribiere, a variant of the
Fletcher-Reeves method described in [5]_ pp.120-122. Only the
first derivatives are used.
Method :ref:`BFGS <optimize.minimize-bfgs>` uses the quasi-Newton
method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) [5]_
pp. 136. It uses the first derivatives only. BFGS has proven good
performance even for non-smooth optimizations. This method also
returns an approximation of the Hessian inverse, stored as
`hess_inv` in the OptimizeResult object.
Method :ref:`Newton-CG <optimize.minimize-newtoncg>` uses a
Newton-CG algorithm [5]_ pp. 168 (also known as the truncated
Newton method). It uses a CG method to the compute the search
direction. See also *TNC* method for a box-constrained
minimization with a similar algorithm. Suitable for large-scale
problems.
Method :ref:`dogleg <optimize.minimize-dogleg>` uses the dog-leg
trust-region algorithm [5]_ for unconstrained minimization. This
algorithm requires the gradient and Hessian; furthermore the
Hessian is required to be positive definite.
Method :ref:`trust-ncg <optimize.minimize-trustncg>` uses the
Newton conjugate gradient trust-region algorithm [5]_ for
unconstrained minimization. This algorithm requires the gradient
and either the Hessian or a function that computes the product of
the Hessian with a given vector. Suitable for large-scale problems.
Method :ref:`trust-krylov <optimize.minimize-trustkrylov>` uses
the Newton GLTR trust-region algorithm [14]_, [15]_ for unconstrained
minimization. This algorithm requires the gradient
and either the Hessian or a function that computes the product of
the Hessian with a given vector. Suitable for large-scale problems.
On indefinite problems it requires usually less iterations than the
`trust-ncg` method and is recommended for medium and large-scale problems.
Method :ref:`trust-exact <optimize.minimize-trustexact>`
is a trust-region method for unconstrained minimization in which
quadratic subproblems are solved almost exactly [13]_. This
algorithm requires the gradient and the Hessian (which is
*not* required to be positive definite). It is, in many
situations, the Newton method to converge in fewer iteraction
and the most recommended for small and medium-size problems.
**Bound-Constrained minimization**
Method :ref:`L-BFGS-B <optimize.minimize-lbfgsb>` uses the L-BFGS-B
algorithm [6]_, [7]_ for bound constrained minimization.
Method :ref:`Powell <optimize.minimize-powell>` is a modification
of Powell's method [3]_, [4]_ which is a conjugate direction
method. It performs sequential one-dimensional minimizations along
each vector of the directions set (`direc` field in `options` and
`info`), which is updated at each iteration of the main
minimization loop. The function need not be differentiable, and no
derivatives are taken. If bounds are not provided, then an
unbounded line search will be used. If bounds are provided and
the initial guess is within the bounds, then every function
evaluation throughout the minimization procedure will be within
the bounds. If bounds are provided, the initial guess is outside
the bounds, and `direc` is full rank (default has full rank), then
some function evaluations during the first iteration may be
outside the bounds, but every function evaluation after the first
iteration will be within the bounds. If `direc` is not full rank,
then some parameters may not be optimized and the solution is not
guaranteed to be within the bounds.
Method :ref:`TNC <optimize.minimize-tnc>` uses a truncated Newton
algorithm [5]_, [8]_ to minimize a function with variables subject
to bounds. This algorithm uses gradient information; it is also
called Newton Conjugate-Gradient. It differs from the *Newton-CG*
method described above as it wraps a C implementation and allows
each variable to be given upper and lower bounds.
**Constrained Minimization**
Method :ref:`COBYLA <optimize.minimize-cobyla>` uses the
Constrained Optimization BY Linear Approximation (COBYLA) method
[9]_, [10]_, [11]_. The algorithm is based on linear
approximations to the objective function and each constraint. The
method wraps a FORTRAN implementation of the algorithm. The
constraints functions 'fun' may return either a single number
or an array or list of numbers.
Method :ref:`SLSQP <optimize.minimize-slsqp>` uses Sequential
Least SQuares Programming to minimize a function of several
variables with any combination of bounds, equality and inequality
constraints. The method wraps the SLSQP Optimization subroutine
originally implemented by Dieter Kraft [12]_. Note that the
wrapper handles infinite values in bounds by converting them into
large floating values.
Method :ref:`trust-constr <optimize.minimize-trustconstr>` is a
trust-region algorithm for constrained optimization. It swiches
between two implementations depending on the problem definition.
It is the most versatile constrained minimization algorithm
implemented in SciPy and the most appropriate for large-scale problems.
For equality constrained problems it is an implementation of Byrd-Omojokun
Trust-Region SQP method described in [17]_ and in [5]_, p. 549. When
inequality constraints are imposed as well, it swiches to the trust-region
interior point method described in [16]_. This interior point algorithm,
in turn, solves inequality constraints by introducing slack variables
and solving a sequence of equality-constrained barrier problems
for progressively smaller values of the barrier parameter.
The previously described equality constrained SQP method is
used to solve the subproblems with increasing levels of accuracy
as the iterate gets closer to a solution.
**Finite-Difference Options**
For Method :ref:`trust-constr <optimize.minimize-trustconstr>`
the gradient and the Hessian may be approximated using
three finite-difference schemes: {'2-point', '3-point', 'cs'}.
The scheme 'cs' is, potentially, the most accurate but it
requires the function to correctly handles complex inputs and to
be differentiable in the complex plane. The scheme '3-point' is more
accurate than '2-point' but requires twice as many operations.
**Custom minimizers**
It may be useful to pass a custom minimization method, for example
when using a frontend to this method such as `scipy.optimize.basinhopping`
or a different library. You can simply pass a callable as the ``method``
parameter.
The callable is called as ``method(fun, x0, args, **kwargs, **options)``
where ``kwargs`` corresponds to any other parameters passed to `minimize`
(such as `callback`, `hess`, etc.), except the `options` dict, which has
its contents also passed as `method` parameters pair by pair. Also, if
`jac` has been passed as a bool type, `jac` and `fun` are mangled so that
`fun` returns just the function values and `jac` is converted to a function
returning the Jacobian. The method shall return an `OptimizeResult`
object.
The provided `method` callable must be able to accept (and possibly ignore)
arbitrary parameters; the set of parameters accepted by `minimize` may
expand in future versions and then these parameters will be passed to
the method. You can find an example in the scipy.optimize tutorial.
.. versionadded:: 0.11.0
References
----------
.. [1] Nelder, J A, and R Mead. 1965. A Simplex Method for Function
Minimization. The Computer Journal 7: 308-13.
.. [2] Wright M H. 1996. Direct search methods: Once scorned, now
respectable, in Numerical Analysis 1995: Proceedings of the 1995
Dundee Biennial Conference in Numerical Analysis (Eds. D F
Griffiths and G A Watson). Addison Wesley Longman, Harlow, UK.
191-208.
.. [3] Powell, M J D. 1964. An efficient method for finding the minimum of
a function of several variables without calculating derivatives. The
Computer Journal 7: 155-162.
.. [4] Press W, S A Teukolsky, W T Vetterling and B P Flannery.
Numerical Recipes (any edition), Cambridge University Press.
.. [5] Nocedal, J, and S J Wright. 2006. Numerical Optimization.
Springer New York.
.. [6] Byrd, R H and P Lu and J. Nocedal. 1995. A Limited Memory
Algorithm for Bound Constrained Optimization. SIAM Journal on
Scientific and Statistical Computing 16 (5): 1190-1208.
.. [7] Zhu, C and R H Byrd and J Nocedal. 1997. L-BFGS-B: Algorithm
778: L-BFGS-B, FORTRAN routines for large scale bound constrained
optimization. ACM Transactions on Mathematical Software 23 (4):
550-560.
.. [8] Nash, S G. Newton-Type Minimization Via the Lanczos Method.
1984. SIAM Journal of Numerical Analysis 21: 770-778.
.. [9] Powell, M J D. A direct search optimization method that models
the objective and constraint functions by linear interpolation.
1994. Advances in Optimization and Numerical Analysis, eds. S. Gomez
and J-P Hennart, Kluwer Academic (Dordrecht), 51-67.
.. [10] Powell M J D. Direct search algorithms for optimization
calculations. 1998. Acta Numerica 7: 287-336.
.. [11] Powell M J D. A view of algorithms for optimization without
derivatives. 2007.Cambridge University Technical Report DAMTP
2007/NA03
.. [12] Kraft, D. A software package for sequential quadratic
programming. 1988. Tech. Rep. DFVLR-FB 88-28, DLR German Aerospace
Center -- Institute for Flight Mechanics, Koln, Germany.
.. [13] Conn, A. R., Gould, N. I., and Toint, P. L.
Trust region methods. 2000. Siam. pp. 169-200.
.. [14] F. Lenders, C. Kirches, A. Potschka: "trlib: A vector-free
implementation of the GLTR method for iterative solution of
the trust region problem", :arxiv:`1611.04718`
.. [15] N. Gould, S. Lucidi, M. Roma, P. Toint: "Solving the
Trust-Region Subproblem using the Lanczos Method",
SIAM J. Optim., 9(2), 504--525, (1999).
.. [16] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. 1999.
An interior point algorithm for large-scale nonlinear programming.
SIAM Journal on Optimization 9.4: 877-900.
.. [17] Lalee, Marucha, Jorge Nocedal, and Todd Plantega. 1998. On the
implementation of an algorithm for large-scale equality constrained
optimization. SIAM Journal on Optimization 8.3: 682-706.
Examples
--------
Let us consider the problem of minimizing the Rosenbrock function. This
function (and its respective derivatives) is implemented in `rosen`
(resp. `rosen_der`, `rosen_hess`) in the `scipy.optimize`.
>>> from scipy.optimize import minimize, rosen, rosen_der
A simple application of the *Nelder-Mead* method is:
>>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
>>> res = minimize(rosen, x0, method='Nelder-Mead', tol=1e-6)
>>> res.x
array([ 1., 1., 1., 1., 1.])
Now using the *BFGS* algorithm, using the first derivative and a few
options:
>>> res = minimize(rosen, x0, method='BFGS', jac=rosen_der,
... options={'gtol': 1e-6, 'disp': True})
Optimization terminated successfully.
Current function value: 0.000000
Iterations: 26
Function evaluations: 31
Gradient evaluations: 31
>>> res.x
array([ 1., 1., 1., 1., 1.])
>>> print(res.message)
Optimization terminated successfully.
>>> res.hess_inv
array([[ 0.00749589, 0.01255155, 0.02396251, 0.04750988, 0.09495377], # may vary
[ 0.01255155, 0.02510441, 0.04794055, 0.09502834, 0.18996269],
[ 0.02396251, 0.04794055, 0.09631614, 0.19092151, 0.38165151],
[ 0.04750988, 0.09502834, 0.19092151, 0.38341252, 0.7664427 ],
[ 0.09495377, 0.18996269, 0.38165151, 0.7664427, 1.53713523]])
Next, consider a minimization problem with several constraints (namely
Example 16.4 from [5]_). The objective function is:
>>> fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2
There are three constraints defined as:
>>> cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
... {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6},
... {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2})
And variables must be positive, hence the following bounds:
>>> bnds = ((0, None), (0, None))
The optimization problem is solved using the SLSQP method as:
>>> res = minimize(fun, (2, 0), method='SLSQP', bounds=bnds,
... constraints=cons)
It should converge to the theoretical solution (1.4 ,1.7).
"""
x0 = np.asarray(x0)
if x0.dtype.kind in np.typecodes["AllInteger"]:
x0 = np.asarray(x0, dtype=float)
if not isinstance(args, tuple):
args = (args,)
if method is None:
# Select automatically
if constraints:
method = 'SLSQP'
elif bounds is not None:
method = 'L-BFGS-B'
else:
method = 'BFGS'
if callable(method):
meth = "_custom"
else:
meth = method.lower()
if options is None:
options = {}
# check if optional parameters are supported by the selected method
# - jac
if meth in ('nelder-mead', 'powell', 'cobyla') and bool(jac):
warn('Method %s does not use gradient information (jac).' % method,
RuntimeWarning)
# - hess
if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-constr',
'trust-krylov', 'trust-exact', '_custom') and hess is not None:
warn('Method %s does not use Hessian information (hess).' % method,
RuntimeWarning)
# - hessp
if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-constr',
'trust-krylov', '_custom') \
and hessp is not None:
warn('Method %s does not use Hessian-vector product '
'information (hessp).' % method, RuntimeWarning)
# - constraints or bounds
if (meth in ('nelder-mead', 'cg', 'bfgs', 'newton-cg', 'dogleg',
'trust-ncg') and (bounds is not None or np.any(constraints))):
warn('Method %s cannot handle constraints nor bounds.' % method,
RuntimeWarning)
if meth in ('l-bfgs-b', 'tnc', 'powell') and np.any(constraints):
warn('Method %s cannot handle constraints.' % method,
RuntimeWarning)
if meth == 'cobyla' and bounds is not None:
warn('Method %s cannot handle bounds.' % method,
RuntimeWarning)
# - callback
if (meth in ('cobyla',) and callback is not None):
warn('Method %s does not support callback.' % method, RuntimeWarning)
# - return_all
if (meth in ('l-bfgs-b', 'tnc', 'cobyla', 'slsqp') and
options.get('return_all', False)):
warn('Method %s does not support the return_all option.' % method,
RuntimeWarning)
# check gradient vector
if callable(jac):
pass
elif jac is True:
# fun returns func and grad
fun = MemoizeJac(fun)
jac = fun.derivative
elif (jac in FD_METHODS and
meth in ['trust-constr', 'bfgs', 'cg', 'l-bfgs-b', 'tnc', 'slsqp']):
# finite differences with relative step
pass
elif meth in ['trust-constr']:
# default jac calculation for this method
jac = '2-point'
elif jac is None or bool(jac) is False:
# this will cause e.g. LBFGS to use forward difference, absolute step
jac = None
else:
# default if jac option is not understood
jac = None
# set default tolerances
if tol is not None:
options = dict(options)
if meth == 'nelder-mead':
options.setdefault('xatol', tol)
options.setdefault('fatol', tol)
if meth in ('newton-cg', 'powell', 'tnc'):
options.setdefault('xtol', tol)
if meth in ('powell', 'l-bfgs-b', 'tnc', 'slsqp'):
options.setdefault('ftol', tol)
if meth in ('bfgs', 'cg', 'l-bfgs-b', 'tnc', 'dogleg',
'trust-ncg', 'trust-exact', 'trust-krylov'):
options.setdefault('gtol', tol)
if meth in ('cobyla', '_custom'):
options.setdefault('tol', tol)
if meth == 'trust-constr':
options.setdefault('xtol', tol)
options.setdefault('gtol', tol)
options.setdefault('barrier_tol', tol)
if meth == '_custom':
# custom method called before bounds and constraints are 'standardised'
# custom method should be able to accept whatever bounds/constraints
# are provided to it.
return method(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp,
bounds=bounds, constraints=constraints,
callback=callback, **options)
if bounds is not None:
bounds = standardize_bounds(bounds, x0, meth)
if constraints is not None:
constraints = standardize_constraints(constraints, x0, meth)
if meth == 'nelder-mead':
return _minimize_neldermead(fun, x0, args, callback, **options)
elif meth == 'powell':
return _minimize_powell(fun, x0, args, callback, bounds, **options)
elif meth == 'cg':
return _minimize_cg(fun, x0, args, jac, callback, **options)
elif meth == 'bfgs':
return _minimize_bfgs(fun, x0, args, jac, callback, **options)
elif meth == 'newton-cg':
return _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback,
**options)
elif meth == 'l-bfgs-b':
return _minimize_lbfgsb(fun, x0, args, jac, bounds,
callback=callback, **options)
elif meth == 'tnc':
return _minimize_tnc(fun, x0, args, jac, bounds, callback=callback,
**options)
elif meth == 'cobyla':
return _minimize_cobyla(fun, x0, args, constraints, **options)
elif meth == 'slsqp':
return _minimize_slsqp(fun, x0, args, jac, bounds,
constraints, callback=callback, **options)
elif meth == 'trust-constr':
return _minimize_trustregion_constr(fun, x0, args, jac, hess, hessp,
bounds, constraints,
callback=callback, **options)
elif meth == 'dogleg':
return _minimize_dogleg(fun, x0, args, jac, hess,
callback=callback, **options)
elif meth == 'trust-ncg':
return _minimize_trust_ncg(fun, x0, args, jac, hess, hessp,
callback=callback, **options)
elif meth == 'trust-krylov':
return _minimize_trust_krylov(fun, x0, args, jac, hess, hessp,
callback=callback, **options)
elif meth == 'trust-exact':
return _minimize_trustregion_exact(fun, x0, args, jac, hess,
callback=callback, **options)
else:
raise ValueError('Unknown solver %s' % method)
|
36,102 |
def test_invalid_identifier(configure_caching): # pylint: disable=unused-argument
"""Test `get_use_cache` raises a `TypeError` if identifier is not a string."""
with configure_caching({}):
with pytest.raises(TypeError):
get_use_cache(identifier=int)
|
def test_invalid_identifier(configure_caching):
"""Test `get_use_cache` raises a `TypeError` if identifier is not a string."""
with configure_caching({}):
with pytest.raises(TypeError):
get_use_cache(identifier=int)
|
9,822 |
def get_vol_ids(module, array):
"""Retrieve ids of named volumes."""
volumes = module.params['volumes']
if not volumes:
module.fail_json(msg='Need at least one volume when creating '
'a volume group.')
vol_names = frozenset(volumes)
try:
all_vols = array.list_volumes()
except Exception:
module.fail_json(msg='Error while attempting to retrieve volumes.')
found_vols = filter(lambda vol: vol['name'] in vol_names, all_vols)
found_names = frozenset(vol['name'] for vol in found_vols)
missing_names = list(vol_names.difference(found_names))
if len(missing_names) > 0:
module.fail_json(msg='The following volume names were not found:'
'{0}'.format(missing_names))
# all present
return [vol['id'] for vol in found_vols]
|
def get_vol_ids(module, array):
"""Retrieve ids of named volumes."""
volumes = module.params['volumes']
if not volumes:
module.fail_json(msg='Need at least one volume when creating '
'a volume group.')
vol_names = frozenset(volumes)
try:
all_vols = array.list_volumes()
except Exception:
module.fail_json(msg='Error while attempting to retrieve volumes.')
found_vols = filter(lambda vol: vol['name'] in vol_names, all_vols)
found_names = frozenset(vol['name'] for vol in found_vols)
missing_names = list(vol_names.difference(found_names))
if len(missing_names) > 0:
module.fail_json(msg='The following volume names were not found: '
'{0}'.format(missing_names))
# all present
return [vol['id'] for vol in found_vols]
|
38,539 |
def match_1d(
new_g: pp.Grid,
old_g: pp.Grid,
tol: float,
scaling: Optional[Literal["averaged", "integrated"]] = None,
) -> sps.spmatrix:
"""Obtain mappings between the cells of non-matching 1d grids.
The overlaps are identified as a sparse matrix which maps from cells in the old to
the new grid.
It is asumed that the two grids are aligned, with common start and
endpoints.
Parameters:
new_g (pp.Grid). Target grid for the mapping. Should have dimension 1.
old_g (pp.Grid). Original grid. Should have dimension 1.
tol (float): Tolerance used to filter away false overlaps caused by
numerical errors. Should be scaled relative to the cell size.
scaling (str, optional): Control weights of the returned matrix, see return
values for specification.
Returns:
sps.spmatrix: Mapping from the cells in the old to the new grid. The values in
the matrix depends on the parameter scaling: If set to 'averaged', a mapping
fit for intensive quantities (e.g., pressure) is returned (all rows sum to
unity). If set to 'integrated', the matrix is a mapping for extensive
quantities (column sum is 1). If not provided, the matrix elements are 1
for cell-pairs (new and old grid) that overlap; overlaps with areas less
than the parameter tol will be ignored.
"""
# Cell-node relation between grids - we know there are two nodes per cell
cell_nodes_new = new_g.cell_nodes()
cell_nodes_old = old_g.cell_nodes()
nodes_new = pp.utils.mcolon.mcolon(
cell_nodes_new.indptr[0:-1], cell_nodes_new.indptr[1:]
)
nodes_old = pp.utils.mcolon.mcolon(
cell_nodes_old.indptr[0:-1], cell_nodes_old.indptr[1:]
)
# Reshape so that the nodes of cells are stored columnwise
lines_new = cell_nodes_new.indices[nodes_new].reshape((2, -1), order="F")
lines_old = cell_nodes_old.indices[nodes_old].reshape((2, -1), order="F")
p_new = new_g.nodes
p_old = old_g.nodes
# Compute the intersection between the two tessalations.
# intersect is a list, every list member is a tuple with overlapping
# cells in grid 1 and 2, and their common area.
intersect = pp.intersections.line_tesselation(p_new, p_old, lines_new, lines_old)
num = len(intersect)
new_g_ind = np.zeros(num, dtype=int)
old_g_ind = np.zeros(num, dtype=int)
weights = np.zeros(num)
for ind, i in enumerate(intersect):
new_g_ind[ind] = i[0]
old_g_ind[ind] = i[1]
weights[ind] = i[2]
# The weights as computed from the intersection algorithm gives the volumes of the
# intersected cells. Depending on the specified scaling, the weights should be
# modified.
if scaling == "averaged":
weights /= new_g.cell_volumes[new_g_ind]
elif scaling == "integrated":
weights /= old_g.cell_volumes[old_g_ind]
elif scaling is None:
mask = weights > tol
new_g_ind = new_g_ind[mask]
old_g_ind = old_g_ind[mask]
weights = np.ones_like(new_g_ind)
return sps.coo_matrix(
(weights, (new_g_ind, old_g_ind)), shape=(new_g.num_cells, old_g.num_cells)
).tocsr()
|
def match_1d(
new_g: pp.Grid,
old_g: pp.Grid,
tol: float,
scaling: Optional[Literal["averaged", "integrated"]] = None,
) -> sps.spmatrix:
"""Obtain mappings between the cells of non-matching 1d grids.
The overlaps are identified as a sparse matrix which maps from cells in the old to
the new grid.
It is asumed that the two grids are aligned, with common start and
endpoints.
Parameters:
new_g (pp.Grid). Target grid for the mapping. Should have dimension 1.
old_g (pp.Grid). Original grid. Should have dimension 1.
tol (float): Tolerance used to filter away false overlaps caused by
numerical errors. Should be scaled relative to the cell size.
scaling (str, optional): Control weights of the returned matrix, see return
values for specification.
Returns:
sps.spmatrix: Mapping from the cells in the old to the new grid. The values in
the matrix depend on the parameter scaling: If set to 'averaged', a mapping
fit for intensive quantities (e.g., pressure) is returned (all rows sum to
unity). If set to 'integrated', the matrix is a mapping for extensive
quantities (column sum is 1). If not provided, the matrix elements are 1
for cell-pairs (new and old grid) that overlap; overlaps with areas less
than the parameter tol will be ignored.
"""
# Cell-node relation between grids - we know there are two nodes per cell
cell_nodes_new = new_g.cell_nodes()
cell_nodes_old = old_g.cell_nodes()
nodes_new = pp.utils.mcolon.mcolon(
cell_nodes_new.indptr[0:-1], cell_nodes_new.indptr[1:]
)
nodes_old = pp.utils.mcolon.mcolon(
cell_nodes_old.indptr[0:-1], cell_nodes_old.indptr[1:]
)
# Reshape so that the nodes of cells are stored columnwise
lines_new = cell_nodes_new.indices[nodes_new].reshape((2, -1), order="F")
lines_old = cell_nodes_old.indices[nodes_old].reshape((2, -1), order="F")
p_new = new_g.nodes
p_old = old_g.nodes
# Compute the intersection between the two tessalations.
# intersect is a list, every list member is a tuple with overlapping
# cells in grid 1 and 2, and their common area.
intersect = pp.intersections.line_tesselation(p_new, p_old, lines_new, lines_old)
num = len(intersect)
new_g_ind = np.zeros(num, dtype=int)
old_g_ind = np.zeros(num, dtype=int)
weights = np.zeros(num)
for ind, i in enumerate(intersect):
new_g_ind[ind] = i[0]
old_g_ind[ind] = i[1]
weights[ind] = i[2]
# The weights as computed from the intersection algorithm gives the volumes of the
# intersected cells. Depending on the specified scaling, the weights should be
# modified.
if scaling == "averaged":
weights /= new_g.cell_volumes[new_g_ind]
elif scaling == "integrated":
weights /= old_g.cell_volumes[old_g_ind]
elif scaling is None:
mask = weights > tol
new_g_ind = new_g_ind[mask]
old_g_ind = old_g_ind[mask]
weights = np.ones_like(new_g_ind)
return sps.coo_matrix(
(weights, (new_g_ind, old_g_ind)), shape=(new_g.num_cells, old_g.num_cells)
).tocsr()
|
48,173 |
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (12)
2015 - Visual Studio 2015 (14)
2017 - Visual Studio 2017 (15)
2019 - Visual Studio 2019 (16)
2022 - Visual Studio 2022 (17)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
"8.0": "2005",
"9.0": "2008",
"10.0": "2010",
"11.0": "2012",
"12.0": "2013",
"14.0": "2015",
"15.0": "2017",
"16.0": "2019",
"17.0": "2022",
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [
r"HKLM\Software\Microsoft\VisualStudio\%s" % version,
r"HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s" % version,
r"HKLM\Software\Microsoft\VCExpress\%s" % version,
r"HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s" % version,
]
for key_ in keys:
path = _RegistryGetValue(key_, "InstallDir")
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, "devenv.exe")
express_path = os.path.join(path, "*express.exe")
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(
_CreateVersion(
version_to_year[version], os.path.join(path, "..", "..")
)
)
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(
_CreateVersion(
version_to_year[version] + "e", os.path.join(path, "..", "..")
)
)
# The old method above does not work when only SDK is installed.
keys = [
r"HKLM\Software\Microsoft\VisualStudio\SxS\VC7",
r"HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7",
r"HKLM\Software\Microsoft\VisualStudio\SxS\VS7",
r"HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VS7",
]
for key in keys:
path = _RegistryGetValue(key, version)
if not path:
continue
path = _ConvertToCygpath(path)
if version == "15.0":
if os.path.exists(path):
versions.append(_CreateVersion("2017", path))
elif version != "14.0": # There is no Express edition for 2015.
versions.append(
_CreateVersion(
version_to_year[version] + "e",
os.path.join(path, ".."),
sdk_based=True,
)
)
return versions
|
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (12)
2015 - Visual Studio 2015 (14)
2017 - Visual Studio 2017 (15)
2019 - Visual Studio 2019 (16)
2022 - Visual Studio 2022 (17)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
"8.0": "2005",
"9.0": "2008",
"10.0": "2010",
"11.0": "2012",
"12.0": "2013",
"14.0": "2015",
"15.0": "2017",
"16.0": "2019",
"17.0": "2022",
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [
r"HKLM\Software\Microsoft\VisualStudio\%s" % version,
r"HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s" % version,
r"HKLM\Software\Microsoft\VCExpress\%s" % version,
r"HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s" % version,
]
for key in keys:
path = _RegistryGetValue(key, "InstallDir")
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, "devenv.exe")
express_path = os.path.join(path, "*express.exe")
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(
_CreateVersion(
version_to_year[version], os.path.join(path, "..", "..")
)
)
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(
_CreateVersion(
version_to_year[version] + "e", os.path.join(path, "..", "..")
)
)
# The old method above does not work when only SDK is installed.
keys = [
r"HKLM\Software\Microsoft\VisualStudio\SxS\VC7",
r"HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7",
r"HKLM\Software\Microsoft\VisualStudio\SxS\VS7",
r"HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VS7",
]
for key in keys:
path = _RegistryGetValue(key, version)
if not path:
continue
path = _ConvertToCygpath(path)
if version == "15.0":
if os.path.exists(path):
versions.append(_CreateVersion("2017", path))
elif version != "14.0": # There is no Express edition for 2015.
versions.append(
_CreateVersion(
version_to_year[version] + "e",
os.path.join(path, ".."),
sdk_based=True,
)
)
return versions
|
53,777 |
def get_active_assets(n, c, investment_period, snapshots):
"""
Getter function. Get the index of elements which are active of a given
component c, depending on lifetime and build year for a investment_period
investment_period (assuming that component is already used in build year)
"""
# component only active during lifetime
df = n.df(c).copy()
index_active = ((df["build_year"]<= investment_period) &
(investment_period<df[["build_year", "lifetime"]].sum(axis=1)))
return index_active
|
def get_active_assets(n, c, investment_period, snapshots):
"""
Getter function. Get the index of elements which are active of a given
component c, depending on lifetime and build year for a investment_period
investment_period (assuming that component is already used in build year)
"""
# component only active during lifetime
df = n.df(c).copy()
index_active = ((df["build_year"] <= investment_period) &
(investment_period < df[["build_year", "lifetime"]].sum(axis=1)))
return index_active
|
53,634 |
def _create_basic_elements(value: Any, node: List | Set | Tuple) -> list[NodeNG]:
"""Create a list of nodes to function as the elements of a new node."""
elements: list[NodeNG] = []
for elem in value:
elem_node = const_factory(elem)
elem_node.parent = node
elements.append(elem_node)
return elements
|
def _create_basic_elements(value: Any, node: List | Set | Tuple) -> list[NodeNG]:
"""Create a list of nodes to function as the elements of a new node."""
elements: list[NodeNG] = []
for element in value:
element_node = const_factory(element )
element_node .parent = node
elements.append(element_node )
return elements
|
12,786 |
def _generate_and_write_metadata(rolename, metadata_filename,
targets_directory, metadata_directory, storage_backend,
consistent_snapshot=False, filenames=None, allow_partially_signed=False,
increment_version_number=True, repository_name='default',
use_existing_fileinfo=False, use_timestamp_length=True,
use_timestamp_hashes=True, use_snapshot_length=False,
use_snapshot_hashes=False, snapshot_merkle=False):
"""
Non-public function that can generate and write the metadata for the
specified 'rolename'. It also increments the version number of 'rolename' if
the 'increment_version_number' argument is True.
"""
metadata = None
# Retrieve the roleinfo of 'rolename' to extract the needed metadata
# attributes, such as version number, expiration, etc.
roleinfo = tuf.roledb.get_roleinfo(rolename, repository_name)
previous_keyids = roleinfo.get('previous_keyids', [])
previous_threshold = roleinfo.get('previous_threshold', 1)
signing_keyids = list(set(roleinfo['signing_keyids']))
# Generate the appropriate role metadata for 'rolename'.
if rolename == 'root':
metadata = generate_root_metadata(roleinfo['version'], roleinfo['expires'],
consistent_snapshot, repository_name)
_log_warning_if_expires_soon(ROOT_FILENAME, roleinfo['expires'],
ROOT_EXPIRES_WARN_SECONDS)
elif rolename == 'snapshot':
if (snapshot_merkle):
root, leaves, metadata = generate_snapshot_metadata(metadata_directory,
roleinfo['version'], roleinfo['expires'],
storage_backend, consistent_snapshot, repository_name,
use_length=use_snapshot_length, use_hashes=use_snapshot_hashes,
snapshot_merkle=True)
# Add the merkle tree root hash to the timestamp roleinfo
timestamp_roleinfo = tuf.roledb.get_roleinfo('timestamp', repository_name)
timestamp_roleinfo['merkle_root'] = root.hash()
tuf.roledb.update_roleinfo('timestamp', timestamp_roleinfo,
repository_name=repository_name)
write_merkle_paths(root, leaves, storage_backend, metadata_directory)
else:
metadata = generate_snapshot_metadata(metadata_directory,
roleinfo['version'], roleinfo['expires'],
storage_backend, consistent_snapshot, repository_name,
use_length=use_snapshot_length, use_hashes=use_snapshot_hashes)
_log_warning_if_expires_soon(SNAPSHOT_FILENAME, roleinfo['expires'],
SNAPSHOT_EXPIRES_WARN_SECONDS)
elif rolename == 'timestamp':
# If filenames don't have "snapshot_filename" key, defaults to "snapshot.json"
snapshot_file_path = (filenames and filenames['snapshot']) \
or SNAPSHOT_FILENAME
metadata = generate_timestamp_metadata(snapshot_file_path, roleinfo['version'],
roleinfo['expires'], storage_backend, repository_name,
use_length=use_timestamp_length, use_hashes=use_timestamp_hashes,
roleinfo=roleinfo)
_log_warning_if_expires_soon(TIMESTAMP_FILENAME, roleinfo['expires'],
TIMESTAMP_EXPIRES_WARN_SECONDS)
# All other roles are either the top-level 'targets' role, or
# a delegated role.
else:
# Only print a warning if the top-level 'targets' role expires soon.
if rolename == 'targets':
_log_warning_if_expires_soon(TARGETS_FILENAME, roleinfo['expires'],
TARGETS_EXPIRES_WARN_SECONDS)
# Don't hash-prefix consistent target files if they are handled out of band
consistent_targets = consistent_snapshot and not use_existing_fileinfo
metadata = generate_targets_metadata(targets_directory,
roleinfo['paths'], roleinfo['version'], roleinfo['expires'],
roleinfo['delegations'], consistent_targets, use_existing_fileinfo,
storage_backend)
# Before writing 'rolename' to disk, automatically increment its version
# number (if 'increment_version_number' is True) so that the caller does not
# have to manually perform this action. The version number should be
# incremented in both the metadata file and roledb (required so that Snapshot
# references the latest version).
# Store the 'current_version' in case the version number must be restored
# (e.g., if 'rolename' cannot be written to disk because its metadata is not
# properly signed).
current_version = metadata['version']
if increment_version_number:
roleinfo = tuf.roledb.get_roleinfo(rolename, repository_name)
metadata['version'] = metadata['version'] + 1
roleinfo['version'] = roleinfo['version'] + 1
tuf.roledb.update_roleinfo(rolename, roleinfo,
repository_name=repository_name)
else:
logger.debug('Not incrementing ' + repr(rolename) + '\'s version number.')
if rolename in tuf.roledb.TOP_LEVEL_ROLES and not allow_partially_signed:
# Verify that the top-level 'rolename' is fully signed. Only a delegated
# role should not be written to disk without full verification of its
# signature(s), since it can only be considered fully signed depending on
# the delegating role.
signable = sign_metadata(metadata, signing_keyids, metadata_filename,
repository_name)
def should_write():
# Root must be signed by its previous keys and threshold.
if rolename == 'root' and len(previous_keyids) > 0:
if not tuf.sig.verify(signable, rolename, repository_name,
previous_threshold, previous_keyids):
return False
else:
logger.debug('Root is signed by a threshold of its previous keyids.')
# In the normal case, we should write metadata if the threshold is met.
return tuf.sig.verify(signable, rolename, repository_name,
roleinfo['threshold'], roleinfo['signing_keyids'])
if should_write():
_remove_invalid_and_duplicate_signatures(signable, repository_name)
# Root should always be written as if consistent_snapshot is True (i.e.,
# write <version>.root.json and root.json to disk).
if rolename == 'root':
consistent_snapshot = True
filename = write_metadata_file(signable, metadata_filename,
metadata['version'], consistent_snapshot, storage_backend)
# 'signable' contains an invalid threshold of signatures.
else:
# Since new metadata cannot be successfully written, restore the current
# version number.
roleinfo = tuf.roledb.get_roleinfo(rolename, repository_name)
roleinfo['version'] = current_version
tuf.roledb.update_roleinfo(rolename, roleinfo,
repository_name=repository_name)
# Note that 'signable' is an argument to tuf.UnsignedMetadataError().
raise tuf.exceptions.UnsignedMetadataError('Not enough'
' signatures for ' + repr(metadata_filename), signable)
# 'rolename' is a delegated role or a top-level role that is partially
# signed, and thus its signatures should not be verified.
else:
signable = sign_metadata(metadata, signing_keyids, metadata_filename,
repository_name)
_remove_invalid_and_duplicate_signatures(signable, repository_name)
# Root should always be written as if consistent_snapshot is True (i.e.,
# <version>.root.json and root.json).
if rolename == 'root':
filename = write_metadata_file(signable, metadata_filename,
metadata['version'], consistent_snapshot=True,
storage_backend=storage_backend)
else:
filename = write_metadata_file(signable, metadata_filename,
metadata['version'], consistent_snapshot, storage_backend)
return signable, filename
|
def _generate_and_write_metadata(rolename, metadata_filename,
targets_directory, metadata_directory, storage_backend,
consistent_snapshot=False, filenames=None, allow_partially_signed=False,
increment_version_number=True, repository_name='default',
use_existing_fileinfo=False, use_timestamp_length=True,
use_timestamp_hashes=True, use_snapshot_length=False,
use_snapshot_hashes=False, snapshot_merkle=False):
"""
Non-public function that can generate and write the metadata for the
specified 'rolename'. It also increments the version number of 'rolename' if
the 'increment_version_number' argument is True.
"""
metadata = None
# Retrieve the roleinfo of 'rolename' to extract the needed metadata
# attributes, such as version number, expiration, etc.
roleinfo = tuf.roledb.get_roleinfo(rolename, repository_name)
previous_keyids = roleinfo.get('previous_keyids', [])
previous_threshold = roleinfo.get('previous_threshold', 1)
signing_keyids = list(set(roleinfo['signing_keyids']))
# Generate the appropriate role metadata for 'rolename'.
if rolename == 'root':
metadata = generate_root_metadata(roleinfo['version'], roleinfo['expires'],
consistent_snapshot, repository_name)
_log_warning_if_expires_soon(ROOT_FILENAME, roleinfo['expires'],
ROOT_EXPIRES_WARN_SECONDS)
elif rolename == 'snapshot':
if snapshot_merkle:
root, leaves, metadata = generate_snapshot_metadata(metadata_directory,
roleinfo['version'], roleinfo['expires'],
storage_backend, consistent_snapshot, repository_name,
use_length=use_snapshot_length, use_hashes=use_snapshot_hashes,
snapshot_merkle=True)
# Add the merkle tree root hash to the timestamp roleinfo
timestamp_roleinfo = tuf.roledb.get_roleinfo('timestamp', repository_name)
timestamp_roleinfo['merkle_root'] = root.hash()
tuf.roledb.update_roleinfo('timestamp', timestamp_roleinfo,
repository_name=repository_name)
write_merkle_paths(root, leaves, storage_backend, metadata_directory)
else:
metadata = generate_snapshot_metadata(metadata_directory,
roleinfo['version'], roleinfo['expires'],
storage_backend, consistent_snapshot, repository_name,
use_length=use_snapshot_length, use_hashes=use_snapshot_hashes)
_log_warning_if_expires_soon(SNAPSHOT_FILENAME, roleinfo['expires'],
SNAPSHOT_EXPIRES_WARN_SECONDS)
elif rolename == 'timestamp':
# If filenames don't have "snapshot_filename" key, defaults to "snapshot.json"
snapshot_file_path = (filenames and filenames['snapshot']) \
or SNAPSHOT_FILENAME
metadata = generate_timestamp_metadata(snapshot_file_path, roleinfo['version'],
roleinfo['expires'], storage_backend, repository_name,
use_length=use_timestamp_length, use_hashes=use_timestamp_hashes,
roleinfo=roleinfo)
_log_warning_if_expires_soon(TIMESTAMP_FILENAME, roleinfo['expires'],
TIMESTAMP_EXPIRES_WARN_SECONDS)
# All other roles are either the top-level 'targets' role, or
# a delegated role.
else:
# Only print a warning if the top-level 'targets' role expires soon.
if rolename == 'targets':
_log_warning_if_expires_soon(TARGETS_FILENAME, roleinfo['expires'],
TARGETS_EXPIRES_WARN_SECONDS)
# Don't hash-prefix consistent target files if they are handled out of band
consistent_targets = consistent_snapshot and not use_existing_fileinfo
metadata = generate_targets_metadata(targets_directory,
roleinfo['paths'], roleinfo['version'], roleinfo['expires'],
roleinfo['delegations'], consistent_targets, use_existing_fileinfo,
storage_backend)
# Before writing 'rolename' to disk, automatically increment its version
# number (if 'increment_version_number' is True) so that the caller does not
# have to manually perform this action. The version number should be
# incremented in both the metadata file and roledb (required so that Snapshot
# references the latest version).
# Store the 'current_version' in case the version number must be restored
# (e.g., if 'rolename' cannot be written to disk because its metadata is not
# properly signed).
current_version = metadata['version']
if increment_version_number:
roleinfo = tuf.roledb.get_roleinfo(rolename, repository_name)
metadata['version'] = metadata['version'] + 1
roleinfo['version'] = roleinfo['version'] + 1
tuf.roledb.update_roleinfo(rolename, roleinfo,
repository_name=repository_name)
else:
logger.debug('Not incrementing ' + repr(rolename) + '\'s version number.')
if rolename in tuf.roledb.TOP_LEVEL_ROLES and not allow_partially_signed:
# Verify that the top-level 'rolename' is fully signed. Only a delegated
# role should not be written to disk without full verification of its
# signature(s), since it can only be considered fully signed depending on
# the delegating role.
signable = sign_metadata(metadata, signing_keyids, metadata_filename,
repository_name)
def should_write():
# Root must be signed by its previous keys and threshold.
if rolename == 'root' and len(previous_keyids) > 0:
if not tuf.sig.verify(signable, rolename, repository_name,
previous_threshold, previous_keyids):
return False
else:
logger.debug('Root is signed by a threshold of its previous keyids.')
# In the normal case, we should write metadata if the threshold is met.
return tuf.sig.verify(signable, rolename, repository_name,
roleinfo['threshold'], roleinfo['signing_keyids'])
if should_write():
_remove_invalid_and_duplicate_signatures(signable, repository_name)
# Root should always be written as if consistent_snapshot is True (i.e.,
# write <version>.root.json and root.json to disk).
if rolename == 'root':
consistent_snapshot = True
filename = write_metadata_file(signable, metadata_filename,
metadata['version'], consistent_snapshot, storage_backend)
# 'signable' contains an invalid threshold of signatures.
else:
# Since new metadata cannot be successfully written, restore the current
# version number.
roleinfo = tuf.roledb.get_roleinfo(rolename, repository_name)
roleinfo['version'] = current_version
tuf.roledb.update_roleinfo(rolename, roleinfo,
repository_name=repository_name)
# Note that 'signable' is an argument to tuf.UnsignedMetadataError().
raise tuf.exceptions.UnsignedMetadataError('Not enough'
' signatures for ' + repr(metadata_filename), signable)
# 'rolename' is a delegated role or a top-level role that is partially
# signed, and thus its signatures should not be verified.
else:
signable = sign_metadata(metadata, signing_keyids, metadata_filename,
repository_name)
_remove_invalid_and_duplicate_signatures(signable, repository_name)
# Root should always be written as if consistent_snapshot is True (i.e.,
# <version>.root.json and root.json).
if rolename == 'root':
filename = write_metadata_file(signable, metadata_filename,
metadata['version'], consistent_snapshot=True,
storage_backend=storage_backend)
else:
filename = write_metadata_file(signable, metadata_filename,
metadata['version'], consistent_snapshot, storage_backend)
return signable, filename
|
59,572 |
def plot_histogram(
data,
figsize=(7, 5),
color=None,
number_to_keep=None,
sort="asc",
target_string=None,
legend=None,
bar_labels=True,
title=None,
ax=None,
filename=None,
):
"""Plot a histogram of input counts data.
Args:
data (list or dict): This is either a list of dictionaries or a single
dict containing the values to represent (ex {'001': 130})
figsize (tuple): Figure size in inches.
color (list or str): String or list of strings for histogram bar colors.
number_to_keep (int): The number of terms to plot per dataset. The rest is made into a
single bar called 'rest'. If multiple datasets are given, the ``number_to_keep``
applies to each dataset individually, which may result in more bars than
``number_to_keep + 1``. The ``number_to_keep`` applies to the total values, rather than
the x-axis sort.
sort (string): Could be `'asc'`, `'desc'`, `'hamming'`, `'value'`, or
`'value_desc'`. If set to `'value'` or `'value_desc'` the x axis
will be sorted by the number of counts for each bitstring.
Defaults to `'asc'`.
target_string (str): Target string if 'sort' is a distance measure.
legend(list): A list of strings to use for labels of the data.
The number of entries must match the length of data (if data is a
list or 1 if it's a dict)
bar_labels (bool): Label each bar in histogram with counts value.
title (str): A string to use for the plot title
ax (matplotlib.axes.Axes): An optional Axes object to be used for
the visualization output. If none is specified a new matplotlib
Figure will be created and used. Additionally, if specified there
will be no returned Figure since it is redundant.
filename (str): file path to save image to.
Returns:
matplotlib.Figure:
A figure for the rendered histogram, if the ``ax``
kwarg is not set.
Raises:
MissingOptionalLibraryError: Matplotlib not available.
VisualizationError: When legend is provided and the length doesn't
match the input data.
VisualizationError: Input must be Counts or a dict
Examples:
.. jupyter-execute::
# Plot two counts in the same figure with legends and colors specified.
from qiskit.visualization import plot_histogram
counts1 = {'00': 525, '11': 499}
counts2 = {'00': 511, '11': 514}
legend = ['First execution', 'Second execution']
plot_histogram([counts1, counts2], legend=legend, color=['crimson','midnightblue'],
title="New Histogram")
.. jupyter-execute::
# You can sort the bitstrings using different methods.
counts = {'001': 596, '011': 211, '010': 50, '000': 117, '101': 33, '111': 8,
'100': 6, '110': 3}
# Sort by the counts in descending order
hist1 = plot_histogram(counts, sort='value_desc')
# Sort by the hamming distance (the number of bit flips to change from
# one bitstring to the other) from a target string.
hist2 = plot_histogram(counts, sort='hamming', target_string='001')
display(hist1, hist2)
"""
if not isinstance(data, list):
data = [data]
for dat in data:
if isinstance(dat, (QuasiDistribution, ProbDistribution)):
raise VisualizationError(
"Input must be Counts or a dict, " "consider using 'plot_distribution'"
)
return _plotting_core(
data,
figsize,
color,
number_to_keep,
sort,
target_string,
legend,
bar_labels,
title,
ax,
filename,
kind="counts",
)
|
def plot_histogram(
data,
figsize=(7, 5),
color=None,
number_to_keep=None,
sort="asc",
target_string=None,
legend=None,
bar_labels=True,
title=None,
ax=None,
filename=None,
):
"""Plot a histogram of input counts data.
Args:
data (list or dict): This is either a list of dictionaries or a single
dict containing the values to represent (ex {'001': 130})
figsize (tuple): Figure size in inches.
color (list or str): String or list of strings for histogram bar colors.
number_to_keep (int): The number of terms to plot per dataset. The rest is made into a
single bar called 'rest'. If multiple datasets are given, the ``number_to_keep``
applies to each dataset individually, which may result in more bars than
``number_to_keep + 1``. The ``number_to_keep`` applies to the total values, rather than
the x-axis sort.
sort (string): Could be `'asc'`, `'desc'`, `'hamming'`, `'value'`, or
`'value_desc'`. If set to `'value'` or `'value_desc'` the x axis
will be sorted by the number of counts for each bitstring.
Defaults to `'asc'`.
target_string (str): Target string if 'sort' is a distance measure.
legend(list): A list of strings to use for labels of the data.
The number of entries must match the length of data (if data is a
list or 1 if it's a dict)
bar_labels (bool): Label each bar in histogram with counts value.
title (str): A string to use for the plot title
ax (matplotlib.axes.Axes): An optional Axes object to be used for
the visualization output. If none is specified a new matplotlib
Figure will be created and used. Additionally, if specified there
will be no returned Figure since it is redundant.
filename (str): file path to save image to.
Returns:
matplotlib.Figure:
A figure for the rendered histogram, if the ``ax``
kwarg is not set.
Raises:
MissingOptionalLibraryError: Matplotlib not available.
VisualizationError: When legend is provided and the length doesn't
match the input data.
VisualizationError: Input must be Counts or a dict
Examples:
.. jupyter-execute::
# Plot two counts in the same figure with legends and colors specified.
from qiskit.visualization import plot_histogram
counts1 = {'00': 525, '11': 499}
counts2 = {'00': 511, '11': 514}
legend = ['First execution', 'Second execution']
plot_histogram([counts1, counts2], legend=legend, color=['crimson','midnightblue'],
title="New Histogram")
.. jupyter-execute::
# You can sort the bitstrings using different methods.
counts = {'001': 596, '011': 211, '010': 50, '000': 117, '101': 33, '111': 8,
'100': 6, '110': 3}
# Sort by the counts in descending order
hist1 = plot_histogram(counts, sort='value_desc')
# Sort by the hamming distance (the number of bit flips to change from
# one bitstring to the other) from a target string.
hist2 = plot_histogram(counts, sort='hamming', target_string='001')
display(hist1, hist2)
"""
if not isinstance(data, list):
data = [data]
for dat in data:
kind = "counts"
if isinstance(dat, (QuasiDistribution, ProbDistribution)) or isinstance(next(data.values()), float):
kind = "distribution"
return _plotting_core(
data,
figsize,
color,
number_to_keep,
sort,
target_string,
legend,
bar_labels,
title,
ax,
filename,
kind=kind,
)
|
30,355 |
def create_branch_command():
args = demisto.args()
branch_name = args.get('branch_name')
sha = args.get('sha')
response = create_branch(branch_name, sha)
ec_object = {
'Ref': response.get('ref'),
'NodeID': response.get('node_id')
}
ec = {
'GitHub.Branch(val.Ref === obj.Ref && val.NodeID === obj.NodeID)': ec_object
}
human_readable = tableToMarkdown('Created Branch Details', ec_object, removeNull=True)
return_outputs(readable_output=human_readable, outputs=ec, raw_response=response)
|
def create_branch_command():
args = demisto.args()
branch_name = args.get('branch_name')
commit_sha = args.get('commit_sha')
response = create_branch(branch_name, sha)
ec_object = {
'Ref': response.get('ref'),
'NodeID': response.get('node_id')
}
ec = {
'GitHub.Branch(val.Ref === obj.Ref && val.NodeID === obj.NodeID)': ec_object
}
human_readable = tableToMarkdown('Created Branch Details', ec_object, removeNull=True)
return_outputs(readable_output=human_readable, outputs=ec, raw_response=response)
|
5,280 |
def g():
page = 1
done = False
while not done:
get = requests.get(
'https://api.github.com/repos/RaRe-Technologies/gensim/pulls',
params={'state': 'closed', 'page': page},
)
get.raise_for_status()
pulls = get.json()
if not pulls:
break
for i, pr in enumerate(pulls):
if pr['created_at'] < last_release_date:
done = True
break
if pr['merged_at'] and pr['merged_at'] > last_release_date:
yield pr
page += 1
|
def iter_prs(newer_than):
page = 1
done = False
while not done:
get = requests.get(
'https://api.github.com/repos/RaRe-Technologies/gensim/pulls',
params={'state': 'closed', 'page': page},
)
get.raise_for_status()
pulls = get.json()
if not pulls:
break
for i, pr in enumerate(pulls):
if pr['created_at'] < last_release_date:
done = True
break
if pr['merged_at'] and pr['merged_at'] > last_release_date:
yield pr
page += 1
|
33,072 |
def print_version(ctx, param, value):
"""[Deprecated] Print out the version of opsdroid that is installed.
Args:
ctx (:obj:`click.Context`): The current click cli context
param (dict): a dictionary of all parameters pass to the click
context when invoking this function as a callback.
value (bool): the value of this parameter after invocation.
Defaults to False, set to true when this flag is called.
Returns:
int: the exit code. Always returns 0 in this case.
"""
if not value or ctx.resilient_parsing:
return
if ctx.command.name == "cli":
warn_deprecated_cli_option(
"The flag --version has been deprecated. "
"Please run `opsdroid version` instead."
)
click.echo("opsdroid {version}".format(version=__version__))
ctx.exit(0)
|
def print_version(ctx, param, value):
"""[Deprecated] Print out the version of opsdroid that is installed.
Args:
ctx (:obj:`click.Context`): The current click cli context.
param (dict): a dictionary of all parameters pass to the click
context when invoking this function as a callback.
value (bool): the value of this parameter after invocation.
Defaults to False, set to true when this flag is called.
Returns:
int: the exit code. Always returns 0 in this case.
"""
if not value or ctx.resilient_parsing:
return
if ctx.command.name == "cli":
warn_deprecated_cli_option(
"The flag --version has been deprecated. "
"Please run `opsdroid version` instead."
)
click.echo("opsdroid {version}".format(version=__version__))
ctx.exit(0)
|
5,923 |
def test_check_integrity_errors_on_missing_files(data, script, tmpdir):
"""
Work-in-progress failing test for a flag that detects broken packages
"""
to_install = data.packages.joinpath("pip-test-package-0.1.tar.gz")
result = script.pip_install_local(to_install)
assert 'Successfully installed pip-test-package' in result.stdout
target = script.site_packages_path / "piptestpackage/__init__.py"
target.unlink()
result = script.pip('check --integrity')
expected_lines = (
"piptestpackage is missing the __init__.py file",
)
assert matches_expected_lines(result.stdout, expected_lines)
assert result.returncode == 1
|
def test_check_integrity_errors_on_missing_files(data, script, tmpdir):
"""Ensure that pip check detects a missing file post-install."""
to_install = data.packages.joinpath("pip-test-package-0.1.tar.gz")
result = script.pip_install_local(to_install)
assert 'Successfully installed pip-test-package' in result.stdout
target = script.site_packages_path / "piptestpackage/__init__.py"
target.unlink()
result = script.pip('check --integrity')
expected_lines = (
"piptestpackage is missing the __init__.py file",
)
assert matches_expected_lines(result.stdout, expected_lines)
assert result.returncode == 1
|
44,502 |
def build_importer_spec(
input_type_schema: str,
pipeline_param_name: str = None,
constant_value: str = None
) -> pipeline_spec_pb2.PipelineDeploymentConfig.ImporterSpec:
"""Build an importer executor spec.
Args:
input_type_schema: The type of the input artifact.
pipeline_param_name: The name of the pipeline parameter if the importer gets
its artifacts_uri via a pipeline parameter.
constant_value: The value of artifact_uri in case a contant value is passed
directly into the compoent op.
Returns:
An importer spec.
"""
assert (
bool(pipeline_param_name) != bool(constant_value),
'importer spec should be built using either pipeline_param_name or'
'constant_value.'
)
importer_spec = pipeline_spec_pb2.PipelineDeploymentConfig.ImporterSpec()
importer_spec.type_schema.instance_schema = input_type_schema
if pipeline_param_name:
importer_spec.artifact_uri.runtime_parameter = pipeline_param_name
elif constant_value:
importer_spec.artifact_uri.constant_value.string_value = constant_value
return importer_spec
|
def build_importer_spec(
input_type_schema: str,
pipeline_param_name: str = None,
constant_value: str = None
) -> pipeline_spec_pb2.PipelineDeploymentConfig.ImporterSpec:
"""Builds an importer executor spec.
Args:
input_type_schema: The type of the input artifact.
pipeline_param_name: The name of the pipeline parameter if the importer gets
its artifacts_uri via a pipeline parameter.
constant_value: The value of artifact_uri in case a contant value is passed
directly into the compoent op.
Returns:
An importer spec.
"""
assert (
bool(pipeline_param_name) != bool(constant_value),
'importer spec should be built using either pipeline_param_name or'
'constant_value.'
)
importer_spec = pipeline_spec_pb2.PipelineDeploymentConfig.ImporterSpec()
importer_spec.type_schema.instance_schema = input_type_schema
if pipeline_param_name:
importer_spec.artifact_uri.runtime_parameter = pipeline_param_name
elif constant_value:
importer_spec.artifact_uri.constant_value.string_value = constant_value
return importer_spec
|
45,729 |
def l2_param(df_obs, df_forc, minref, maxref, mindiff, minsize, minmax, mindis):
"""This function calculates the second parameter of location component for SAL based on Wernli et al (2008).
Parameters
----------
df_obs: 2-d ndarray for the observation data.
df_pre: 2-d ndarray for the prediction data.
minref: minimum precipitation value for detecting object(s), If r star is lower than this threshold.
The default is 0.1 mm.
Returns
-------
l2:
The first parameter of location component which has a value between 0 to 1.
"""
maximum_distance = sqrt(((df_obs.shape[0]) ** 2) + ((df_obs.shape[1]) ** 2))
obs_r = (weighted_r(df_obs, minref, maxref, mindiff, minsize, minmax, mindis)) * (
df_obs.mean()
)
forc_r = (weighted_r(df_forc, minref, maxref, mindiff, minsize, minmax, mindis)) * (
df_forc.mean()
)
l2 = 2 * ((abs(obs_r - forc_r)) / maximum_distance)
return float(l2)
|
def l2_param(df_obs, df_forc, minref, maxref, mindiff, minsize, minmax, mindis):
"""Calculate the second parameter of location component for SAL based on Wernli et al (2008).
Parameters
----------
df_obs: 2-d ndarray for the observation data.
df_pre: 2-d ndarray for the prediction data.
minref: minimum precipitation value for detecting object(s), If r star is lower than this threshold.
The default is 0.1 mm.
Returns
-------
l2:
The first parameter of location component which has a value between 0 to 1.
"""
maximum_distance = sqrt(((df_obs.shape[0]) ** 2) + ((df_obs.shape[1]) ** 2))
obs_r = (weighted_r(df_obs, minref, maxref, mindiff, minsize, minmax, mindis)) * (
df_obs.mean()
)
forc_r = (weighted_r(df_forc, minref, maxref, mindiff, minsize, minmax, mindis)) * (
df_forc.mean()
)
l2 = 2 * ((abs(obs_r - forc_r)) / maximum_distance)
return float(l2)
|
30,842 |
def list_indicators_command(client: MsClient, args: Dict[str, str]) -> Tuple[str, Optional[Dict], Optional[List]]:
"""
Args:
client: MsClient
args: arguments from CortexSOAR. May include 'indicator_id'
Returns:
human_readable, outputs.
"""
indicators = client.list_indicators(args.get('indicator_id'))
limit = int(args.get('limit', 50))
indicators = indicators[:limit]
if indicators:
human_readable = tableToMarkdown(
'Indicators from Microsoft ATP:',
indicators,
headers=[
'id',
'action',
'threatType',
'severity',
'fileName',
'fileHashType',
'fileHashValue',
'domainName',
'networkIPv4',
'url'
],
removeNull=True
)
return human_readable, {'MicrosoftATP.Indicators(val.id == obj.id)': indicators}, indicators
else:
return 'No indicators found', None, None
|
def list_indicators_command(client: MsClient, args: Dict[str, str]) -> Tuple[str, Optional[Dict], Optional[List]]:
"""
Args:
client: MsClient
args: arguments from CortexSOAR. May include 'indicator_id'
Returns:
human_readable, outputs.
"""
indicators = client.list_indicators(args.get('indicator_id'))
limit = int(args.get('limit', 50))
indicators = indicators[:limit]
if indicators:
human_readable = tableToMarkdown(
'Microsoft Defender ATP Indicators',
indicators,
headers=[
'id',
'action',
'threatType',
'severity',
'fileName',
'fileHashType',
'fileHashValue',
'domainName',
'networkIPv4',
'url'
],
removeNull=True
)
return human_readable, {'MicrosoftATP.Indicators(val.id == obj.id)': indicators}, indicators
else:
return 'No indicators found', None, None
|
19,558 |
def fix_filename(path):
r"""Fix filenames for use in LaTeX.
Latex has problems if there are one or more points in the filename, thus
'abc.def.jpg' will be changed to '{abc.def}.jpg'
Windows gets angry about the brackets that resolve the above issue on linux
Latex distributions. MikTeX however, has no qualms about multiple dots in the
filename so the behavior is different for posix vs nt when the length of
file_parts is greater than two.
Args
----
filename : str
The file name to be changed.
Returns
-------
str
The new filename.
Examples
--------
>>> fix_filename("foo.bar.pdf")
'{foo.bar}.pdf'
>>> fix_filename("/etc/local/foo.bar.pdf")
'/etc/local/{foo.bar}.pdf'
>>> fix_filename("/etc/local/foo.bar.baz/document.pdf")
'/etc/local/foo.bar.baz/document.pdf'
>>> fix_filename("/etc/local/foo.bar.baz/foo~1/document.pdf")
'\detokenize{/etc/local/foo.bar.baz/foo~1/document.pdf}'
"""
path_parts = path.split('/' if os.name == 'posix' else '\\')
dir_parts = path_parts[:-1]
filename = path_parts[-1]
file_parts = filename.split('.')
if len(file_parts) > 2:
if os.name == 'posix':
filename = '{' + '.'.join(file_parts[0:-1]) + '}.' + file_parts[-1]
else:
filename = '.'.join(file_parts[0:-1]) + '.' + file_parts[-1]
dir_parts.append(filename)
fixed_path = '/'.join(dir_parts)
if '~' in fixed_path:
fixed_path = r'\detokenize{' + fixed_path + '}'
return fixed_path
|
def fix_filename(path):
r"""Fix filenames for use in LaTeX.
Latex has problems if there are one or more points in the filename, thus
'abc.def.jpg' will be changed to '{abc.def}.jpg'
Windows gets angry about the curly braces that resolve the above issue on linux
Latex distributions. MikTeX however, has no qualms about multiple dots in the
filename so the behavior is different for posix vs nt when the length of
file_parts is greater than two.
Args
----
filename : str
The file name to be changed.
Returns
-------
str
The new filename.
Examples
--------
>>> fix_filename("foo.bar.pdf")
'{foo.bar}.pdf'
>>> fix_filename("/etc/local/foo.bar.pdf")
'/etc/local/{foo.bar}.pdf'
>>> fix_filename("/etc/local/foo.bar.baz/document.pdf")
'/etc/local/foo.bar.baz/document.pdf'
>>> fix_filename("/etc/local/foo.bar.baz/foo~1/document.pdf")
'\detokenize{/etc/local/foo.bar.baz/foo~1/document.pdf}'
"""
path_parts = path.split('/' if os.name == 'posix' else '\\')
dir_parts = path_parts[:-1]
filename = path_parts[-1]
file_parts = filename.split('.')
if len(file_parts) > 2:
if os.name == 'posix':
filename = '{' + '.'.join(file_parts[0:-1]) + '}.' + file_parts[-1]
else:
filename = '.'.join(file_parts[0:-1]) + '.' + file_parts[-1]
dir_parts.append(filename)
fixed_path = '/'.join(dir_parts)
if '~' in fixed_path:
fixed_path = r'\detokenize{' + fixed_path + '}'
return fixed_path
|
42,559 |
def test_all_locations_are_present(database):
"""
Test that all Location are present in db
"""
cursor = database.conn.cursor()
for location in Location:
r = cursor.execute("SELECT EXISTS(SELECT 1 FROM location WHERE seq=?)", (location.value,))
assert r.fetchone() == (1,)
|
def test_all_locations_are_present(database):
"""
Test that all Location are present in db
"""
cursor = database.conn.cursor()
for location in Location:
r = cursor.execute("SELECT COUNT(*) FROM location WHERE seq=?", (location.value,))
assert r.fetchone() == (1,)
|
54,200 |
def train(args):
"""Train with the given args.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
# check cuda availability
if not torch.cuda.is_available():
logging.warning('cuda is not available')
# get input and output dimension info
with open(args.valid_json, 'rb') as f:
valid_json = json.load(f)['utts']
utts = list(valid_json.keys())
idim = int(valid_json[utts[0]]['input'][0]['shape'][-1])
odim = int(valid_json[utts[0]]['output'][0]['shape'][-1])
logging.info('#input dims : ' + str(idim))
logging.info('#output dims: ' + str(odim))
# specify attention, CTC, hybrid mode
if args.mtlalpha == 1.0:
mtl_mode = 'ctc'
logging.info('Pure CTC mode')
elif args.mtlalpha == 0.0:
mtl_mode = 'att'
logging.info('Pure attention mode')
else:
mtl_mode = 'mtl'
logging.info('Multitask learning mode')
if args.enc_init is not None or args.dec_init is not None:
model = load_trained_modules(idim, odim, args)
else:
model_class = dynamic_import(args.model_module)
model = model_class(idim, odim, args)
assert isinstance(model, ASRInterface)
subsampling_factor = model.subsample[0]
if args.rnnlm is not None:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(args.char_list), rnnlm_args.layer, rnnlm_args.unit))
torch.load(args.rnnlm, rnnlm)
model.rnnlm = rnnlm
# write model config
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
model_conf = args.outdir + '/model.json'
with open(model_conf, 'wb') as f:
logging.info('writing a model config file to ' + model_conf)
f.write(json.dumps((idim, odim, vars(args)),
indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))
for key in sorted(vars(args).keys()):
logging.info('ARGS: ' + key + ': ' + str(vars(args)[key]))
reporter = model.reporter
# check the use of multi-gpu
if args.ngpu > 1:
if args.batch_size != 0:
logging.info('batch size is automatically increased (%d -> %d)' % (
args.batch_size, args.batch_size * args.ngpu))
args.batch_size *= args.ngpu
# set torch device
device = torch.device("cuda" if args.ngpu > 0 else "cpu")
if args.train_dtype in ("float16", "float32", "float64"):
dtype = getattr(torch, args.train_dtype)
else:
dtype = torch.float32
model = model.to(device=device, dtype=dtype)
# Setup an optimizer
if args.opt == 'adadelta':
optimizer = torch.optim.Adadelta(
model.parameters(), rho=0.95, eps=args.eps,
weight_decay=args.weight_decay)
elif args.opt == 'adam':
optimizer = torch.optim.Adam(model.parameters(),
weight_decay=args.weight_decay)
elif args.opt == 'noam':
from espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt
optimizer = get_std_opt(model, args.adim, args.transformer_warmup_steps, args.transformer_lr)
else:
raise NotImplementedError("unknown optimizer: " + args.opt)
# setup apex.amp
if args.train_dtype in ("O0", "O1", "O2", "O3"):
try:
from apex import amp
except ImportError as e:
logging.error(f"You need to install apex for --train-dtype {args.train_dtype}. "
"See https://github.com/NVIDIA/apex#linux")
raise e
if args.opt == 'noam':
model, optimizer.optimizer = amp.initialize(model, optimizer.optimizer, opt_level=args.train_dtype)
else:
model, optimizer = amp.initialize(model, optimizer, opt_level=args.train_dtype)
use_apex = True
else:
use_apex = False
# FIXME: TOO DIRTY HACK
setattr(optimizer, "target", reporter)
setattr(optimizer, "serialize", lambda s: reporter.serialize(s))
# Setup a converter
converter = CustomConverter(subsampling_factor=subsampling_factor, dtype=dtype)
# read json data
with open(args.train_json, 'rb') as f:
train_json = json.load(f)['utts']
with open(args.valid_json, 'rb') as f:
valid_json = json.load(f)['utts']
use_sortagrad = args.sortagrad == -1 or args.sortagrad > 0
# make minibatch list (variable length)
train = make_batchset(train_json, args.batch_size,
args.maxlen_in, args.maxlen_out, args.minibatches,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
shortest_first=use_sortagrad,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout)
valid = make_batchset(valid_json, args.batch_size,
args.maxlen_in, args.maxlen_out, args.minibatches,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout)
load_tr = LoadInputsAndTargets(
mode='asr', load_output=True, preprocess_conf=args.preprocess_conf,
preprocess_args={'train': True} # Switch the mode of preprocessing
)
load_cv = LoadInputsAndTargets(
mode='asr', load_output=True, preprocess_conf=args.preprocess_conf,
preprocess_args={'train': False} # Switch the mode of preprocessing
)
# hack to make batchsize argument as 1
# actual bathsize is included in a list
# default collate function converts numpy array to pytorch tensor
# we used an empty collate function instead which returns list
train_iter = {'main': ChainerDataLoader(
dataset=TransformDataset(train, lambda data: converter([load_tr(data)])),
batch_size=1, num_workers=args.n_iter_processes, pin_memory=True,
shuffle=not use_sortagrad, collate_fn=lambda x: x[0])}
valid_iter = {'main': ChainerDataLoader(
dataset=TransformDataset(valid, lambda data: converter([load_cv(data)])),
batch_size=1, pin_memory=True, shuffle=False, collate_fn=lambda x: x[0],
num_workers=args.n_iter_processes)}
# Set up a trainer
updater = CustomUpdater(
model, args.grad_clip, train_iter, optimizer,
device, args.ngpu, args.grad_noise, args.accum_grad, use_apex=use_apex)
trainer = training.Trainer(
updater, (args.epochs, 'epoch'), out=args.outdir)
if use_sortagrad:
trainer.extend(ShufflingEnabler([train_iter]),
trigger=(args.sortagrad if args.sortagrad != -1 else args.epochs, 'epoch'))
# Resume from a snapshot
if args.resume:
logging.info('resumed from %s' % args.resume)
torch_resume(args.resume, trainer)
# Evaluate the model with the test dataset for each epoch
trainer.extend(CustomEvaluator(model, valid_iter, reporter, device, args.ngpu))
if args.save_interval_iters > 0:
trainer.extend(CustomEvaluator(model, valid_iter, reporter, device, args.ngpu),
trigger=(args.save_interval_iters, 'iteration'))
# Save attention weight each epoch
if args.num_save_attention > 0 and args.mtlalpha != 1.0:
data = sorted(list(valid_json.items())[:args.num_save_attention],
key=lambda x: int(x[1]['input'][0]['shape'][1]), reverse=True)
if hasattr(model, "module"):
att_vis_fn = model.module.calculate_all_attentions
plot_class = model.module.attention_plot_class
else:
att_vis_fn = model.calculate_all_attentions
plot_class = model.attention_plot_class
att_reporter = plot_class(
att_vis_fn, data, args.outdir + "/att_ws",
converter=converter, transform=load_cv, device=device)
trainer.extend(att_reporter, trigger=(1, 'epoch'))
else:
att_reporter = None
# Make a plot for training and validation values
trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss',
'main/loss_ctc', 'validation/main/loss_ctc',
'main/loss_att', 'validation/main/loss_att'],
'epoch', file_name='loss.png'))
trainer.extend(extensions.PlotReport(['main/acc', 'validation/main/acc'],
'epoch', file_name='acc.png'))
trainer.extend(extensions.PlotReport(['main/cer_ctc', 'validation/main/cer_ctc'],
'epoch', file_name='cer.png'))
# Save best models
trainer.extend(snapshot_object(model, 'model.loss.best'),
trigger=training.triggers.MinValueTrigger('validation/main/loss'))
if mtl_mode != 'ctc':
trainer.extend(snapshot_object(model, 'model.acc.best'),
trigger=training.triggers.MaxValueTrigger('validation/main/acc'))
# save snapshot which contains model and optimizer states
trainer.extend(torch_snapshot(), trigger=(1, 'epoch'))
if args.save_interval_iters > 0:
trainer.extend(torch_snapshot_iter(), trigger=(args.save_interval_iters, 'iteration'))
# epsilon decay in the optimizer
if args.opt == 'adadelta':
if args.criterion == 'acc' and mtl_mode != 'ctc':
trainer.extend(restore_snapshot(model, args.outdir + '/model.acc.best', load_fn=torch_load),
trigger=CompareValueTrigger(
'validation/main/acc',
lambda best_value, current_value: best_value > current_value))
trainer.extend(adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
'validation/main/acc',
lambda best_value, current_value: best_value > current_value))
elif args.criterion == 'loss':
trainer.extend(restore_snapshot(model, args.outdir + '/model.loss.best', load_fn=torch_load),
trigger=CompareValueTrigger(
'validation/main/loss',
lambda best_value, current_value: best_value < current_value))
trainer.extend(adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
'validation/main/loss',
lambda best_value, current_value: best_value < current_value))
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport(trigger=(args.report_interval_iters, 'iteration')))
report_keys = ['epoch', 'iteration', 'main/loss', 'main/loss_ctc', 'main/loss_att',
'validation/main/loss', 'validation/main/loss_ctc', 'validation/main/loss_att',
'main/acc', 'validation/main/acc', 'main/cer_ctc', 'validation/main/cer_ctc',
'elapsed_time']
if args.opt == 'adadelta':
trainer.extend(extensions.observe_value(
'eps', lambda trainer: trainer.updater.get_optimizer('main').param_groups[0]["eps"]),
trigger=(args.report_interval_iters, 'iteration'))
report_keys.append('eps')
if args.report_cer:
report_keys.append('validation/main/cer')
if args.report_wer:
report_keys.append('validation/main/wer')
trainer.extend(extensions.PrintReport(
report_keys), trigger=(args.report_interval_iters, 'iteration'))
trainer.extend(extensions.ProgressBar(update_interval=args.report_interval_iters))
set_early_stop(trainer, args)
if args.tensorboard_dir is not None and args.tensorboard_dir != "":
trainer.extend(TensorboardLogger(SummaryWriter(args.tensorboard_dir), att_reporter),
trigger=(args.report_interval_iters, "iteration"))
# Run the training
trainer.run()
check_early_stop(trainer, args.epochs)
|
def train(args):
"""Train with the given args.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
# check cuda availability
if not torch.cuda.is_available():
logging.warning('cuda is not available')
# get input and output dimension info
with open(args.valid_json, 'rb') as f:
valid_json = json.load(f)['utts']
utts = list(valid_json.keys())
idim = int(valid_json[utts[0]]['input'][0]['shape'][-1])
odim = int(valid_json[utts[0]]['output'][0]['shape'][-1])
logging.info('#input dims : ' + str(idim))
logging.info('#output dims: ' + str(odim))
# specify attention, CTC, hybrid mode
if args.mtlalpha == 1.0:
mtl_mode = 'ctc'
logging.info('Pure CTC mode')
elif args.mtlalpha == 0.0:
mtl_mode = 'att'
logging.info('Pure attention mode')
else:
mtl_mode = 'mtl'
logging.info('Multitask learning mode')
if args.enc_init is not None or args.dec_init is not None:
model = load_trained_modules(idim, odim, args)
else:
model_class = dynamic_import(args.model_module)
model = model_class(idim, odim, args)
assert isinstance(model, ASRInterface)
subsampling_factor = model.subsample[0]
if args.rnnlm is not None:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(args.char_list), rnnlm_args.layer, rnnlm_args.unit))
torch.load(args.rnnlm, rnnlm)
model.rnnlm = rnnlm
# write model config
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
model_conf = args.outdir + '/model.json'
with open(model_conf, 'wb') as f:
logging.info('writing a model config file to ' + model_conf)
f.write(json.dumps((idim, odim, vars(args)),
indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))
for key in sorted(vars(args).keys()):
logging.info('ARGS: ' + key + ': ' + str(vars(args)[key]))
reporter = model.reporter
# check the use of multi-gpu
if args.ngpu > 1:
if args.batch_size != 0:
logging.info('batch size is automatically increased (%d -> %d)' % (
args.batch_size, args.batch_size * args.ngpu))
args.batch_size *= args.ngpu
# set torch device
device = torch.device("cuda" if args.ngpu > 0 else "cpu")
if args.train_dtype in ("float16", "float32", "float64"):
dtype = getattr(torch, args.train_dtype)
else:
dtype = torch.float32
model = model.to(device=device, dtype=dtype)
# Setup an optimizer
if args.opt == 'adadelta':
optimizer = torch.optim.Adadelta(
model.parameters(), rho=0.95, eps=args.eps,
weight_decay=args.weight_decay)
elif args.opt == 'adam':
optimizer = torch.optim.Adam(model.parameters(),
weight_decay=args.weight_decay)
elif args.opt == 'noam':
from espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt
optimizer = get_std_opt(model, args.adim, args.transformer_warmup_steps, args.transformer_lr)
else:
raise NotImplementedError("unknown optimizer: " + args.opt)
# setup apex.amp
if args.train_dtype in ("O0", "O1", "O2", "O3"):
try:
from apex import amp
except ImportError as e:
logging.error(f"You need to install apex for --train-dtype {args.train_dtype}. "
"See https://github.com/NVIDIA/apex#linux")
raise e
if args.opt == 'noam':
model, optimizer.optimizer = amp.initialize(model, optimizer.optimizer, opt_level=args.train_dtype)
else:
model, optimizer = amp.initialize(model, optimizer, opt_level=args.train_dtype)
use_apex = True
else:
use_apex = False
# FIXME: TOO DIRTY HACK
setattr(optimizer, "target", reporter)
setattr(optimizer, "serialize", lambda s: reporter.serialize(s))
# Setup a converter
converter = CustomConverter(subsampling_factor=subsampling_factor, dtype=dtype)
# read json data
with open(args.train_json, 'rb') as f:
train_json = json.load(f)['utts']
with open(args.valid_json, 'rb') as f:
valid_json = json.load(f)['utts']
use_sortagrad = args.sortagrad == -1 or args.sortagrad > 0
# make minibatch list (variable length)
train = make_batchset(train_json, args.batch_size,
args.maxlen_in, args.maxlen_out, args.minibatches,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
shortest_first=use_sortagrad,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout)
valid = make_batchset(valid_json, args.batch_size,
args.maxlen_in, args.maxlen_out, args.minibatches,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout)
load_tr = LoadInputsAndTargets(
mode='asr', load_output=True, preprocess_conf=args.preprocess_conf,
preprocess_args={'train': True} # Switch the mode of preprocessing
)
load_cv = LoadInputsAndTargets(
mode='asr', load_output=True, preprocess_conf=args.preprocess_conf,
preprocess_args={'train': False} # Switch the mode of preprocessing
)
# hack to make batchsize argument as 1
# actual bathsize is included in a list
# default collate function converts numpy array to pytorch tensor
# we used an empty collate function instead which returns list
train_iter = {'main': ChainerDataLoader(
dataset=TransformDataset(train, lambda data: converter([load_tr(data)])),
batch_size=1, num_workers=args.n_iter_processes, pin_memory=True,
shuffle=not use_sortagrad, collate_fn=lambda x: x[0])}
valid_iter = {'main': ChainerDataLoader(
dataset=TransformDataset(valid, lambda data: converter([load_cv(data)])),
batch_size=1, pin_memory=True, shuffle=False, collate_fn=lambda x: x[0],
num_workers=args.n_iter_processes)}
# Set up a trainer
updater = CustomUpdater(
model, args.grad_clip, train_iter, optimizer,
device, args.ngpu, args.grad_noise, args.accum_grad, use_apex=use_apex)
trainer = training.Trainer(
updater, (args.epochs, 'epoch'), out=args.outdir)
if use_sortagrad:
trainer.extend(ShufflingEnabler([train_iter]),
trigger=(args.sortagrad if args.sortagrad != -1 else args.epochs, 'epoch'))
# Resume from a snapshot
if args.resume:
logging.info('resumed from %s' % args.resume)
torch_resume(args.resume, trainer)
# Evaluate the model with the test dataset for each epoch
trainer.extend(CustomEvaluator(model, valid_iter, reporter, device, args.ngpu))
if args.save_interval_iters > 0:
trainer.extend(CustomEvaluator(model, valid_iter, reporter, device, args.ngpu),
trigger=(args.save_interval_iters, 'iteration'))
# Save attention weight each epoch
if args.num_save_attention > 0 and args.mtlalpha != 1.0:
data = sorted(list(valid_json.items())[:args.num_save_attention],
key=lambda x: int(x[1]['input'][0]['shape'][1]), reverse=True)
if hasattr(model, "module"):
att_vis_fn = model.module.calculate_all_attentions
plot_class = model.module.attention_plot_class
else:
att_vis_fn = model.calculate_all_attentions
plot_class = model.attention_plot_class
att_reporter = plot_class(
att_vis_fn, data, args.outdir + "/att_ws",
converter=converter, transform=load_cv, device=device)
trainer.extend(att_reporter, trigger=(1, 'epoch'))
else:
att_reporter = None
# Make a plot for training and validation values
trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss',
'main/loss_ctc', 'validation/main/loss_ctc',
'main/loss_att', 'validation/main/loss_att'],
'epoch', file_name='loss.png'))
trainer.extend(extensions.PlotReport(['main/acc', 'validation/main/acc'],
'epoch', file_name='acc.png'))
trainer.extend(extensions.PlotReport(['main/cer_ctc', 'validation/main/cer_ctc'],
'epoch', file_name='cer.png'))
# Save best models
trainer.extend(snapshot_object(model, 'model.loss.best'),
trigger=training.triggers.MinValueTrigger('validation/main/loss'))
if mtl_mode != 'ctc':
trainer.extend(snapshot_object(model, 'model.acc.best'),
trigger=training.triggers.MaxValueTrigger('validation/main/acc'))
# save snapshot which contains model and optimizer states
trainer.extend(torch_snapshot(), trigger=(1, 'epoch'))
if args.save_interval_iters > 0:
trainer.extend(torch_snapshot(filename='snapshot.iter.{.updater.iteration}'),
trigger=(args.save_interval_iters, 'iteration'))
# epsilon decay in the optimizer
if args.opt == 'adadelta':
if args.criterion == 'acc' and mtl_mode != 'ctc':
trainer.extend(restore_snapshot(model, args.outdir + '/model.acc.best', load_fn=torch_load),
trigger=CompareValueTrigger(
'validation/main/acc',
lambda best_value, current_value: best_value > current_value))
trainer.extend(adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
'validation/main/acc',
lambda best_value, current_value: best_value > current_value))
elif args.criterion == 'loss':
trainer.extend(restore_snapshot(model, args.outdir + '/model.loss.best', load_fn=torch_load),
trigger=CompareValueTrigger(
'validation/main/loss',
lambda best_value, current_value: best_value < current_value))
trainer.extend(adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
'validation/main/loss',
lambda best_value, current_value: best_value < current_value))
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport(trigger=(args.report_interval_iters, 'iteration')))
report_keys = ['epoch', 'iteration', 'main/loss', 'main/loss_ctc', 'main/loss_att',
'validation/main/loss', 'validation/main/loss_ctc', 'validation/main/loss_att',
'main/acc', 'validation/main/acc', 'main/cer_ctc', 'validation/main/cer_ctc',
'elapsed_time']
if args.opt == 'adadelta':
trainer.extend(extensions.observe_value(
'eps', lambda trainer: trainer.updater.get_optimizer('main').param_groups[0]["eps"]),
trigger=(args.report_interval_iters, 'iteration'))
report_keys.append('eps')
if args.report_cer:
report_keys.append('validation/main/cer')
if args.report_wer:
report_keys.append('validation/main/wer')
trainer.extend(extensions.PrintReport(
report_keys), trigger=(args.report_interval_iters, 'iteration'))
trainer.extend(extensions.ProgressBar(update_interval=args.report_interval_iters))
set_early_stop(trainer, args)
if args.tensorboard_dir is not None and args.tensorboard_dir != "":
trainer.extend(TensorboardLogger(SummaryWriter(args.tensorboard_dir), att_reporter),
trigger=(args.report_interval_iters, "iteration"))
# Run the training
trainer.run()
check_early_stop(trainer, args.epochs)
|
35,274 |
def _pad_by_zeros(tensor_slices):
"""Return zero-padded full tensor.
"""
I = len(tensor_slices)
J = max(tensor_slice.shape[0] for tensor_slice in tensor_slices)
K = tensor_slices[0].shape[1]
unfolded = T.zeros((I, J, K))
for i, tensor_slice in enumerate(tensor_slices):
J_i = len(tensor_slice)
unfolded[i, :J_i] = tensor_slice
return unfolded
|
def _pad_by_zeros(tensor_slices):
"""Return zero-padded full tensor.
"""
I = len(tensor_slices)
J = max(tensor_slice.shape[0] for tensor_slice in tensor_slices)
K = tensor_slices[0].shape[1]
unfolded = T.zeros((I, J, K), **T.context(tensor_slices[0]))
for i, tensor_slice in enumerate(tensor_slices):
J_i = len(tensor_slice)
unfolded[i, :J_i] = tensor_slice
return unfolded
|
13,524 |
def parse_value_name_collumn(value_name, value, signal_size, float_factory):
mini = maxi = offset = None
value_table = dict()
if ".." in value_name:
(mini, maxi) = value_name.strip().split("..", 2)
mini = float_factory(mini)
maxi = float_factory(maxi)
offset = mini
elif value_name.__len__() > 0:
if value.strip().__len__() > 0:
# Value Table
value = int(float(value))
value_table[value] = value_name
maxi = pow(2, signal_size) - 1
maxi = float_factory(maxi)
mini = 0
offset = 1
return mini, maxi, offset, value_table
|
def parse_value_name_collumn(value_name, value, signal_size, float_factory):
mini = maxi = offset = None
value_table = dict()
if ".." in value_name:
(mini, maxi) = value_name.strip().split("..", 2)
mini = float_factory(mini)
maxi = float_factory(maxi)
offset = mini
elif len(value_name) > 0:
if value.strip().__len__() > 0:
# Value Table
value = int(float(value))
value_table[value] = value_name
maxi = pow(2, signal_size) - 1
maxi = float_factory(maxi)
mini = 0
offset = 1
return mini, maxi, offset, value_table
|
12,424 |
def handle(name: str, cfg: dict, cloud: Cloud, log, args: list):
LOG.debug("Starting clound-init module %s", name)
wg_section = None
if "wireguard" in cfg:
LOG.debug("Found Wireguard section in config")
wg_section = cfg["wireguard"]
if wg_section is None:
LOG.debug(
"Skipping module named %s," " no 'wireguard' configuration found",
name,
)
raise RuntimeError("Skipping Wireguard module")
# install wireguard tools, enable kernel module
maybe_install_wireguard_tools(cloud)
try:
subp.subp("modprobe wireguard", capture=True, shell=True)
except subp.ProcessExecutionError as e:
util.logexc(LOG, f"Could not load wireguard module: {e}")
for wg_int in wg_section["interfaces"]:
# check schema
supplemental_schema_validation(wg_int)
# write wg config files
write_config(wg_int)
# enable wg interfaces
enable_wg(wg_int)
# parse and run readinessprobe parameters
if (
"readinessprobe" in wg_section
and wg_section["readinessprobe"] is not None
):
readinessprobe_command_validation(wg_section)
readinessprobe(wg_section)
else:
LOG.debug("Skipping readinessprobe - no checks defined")
|
def handle(name: str, cfg: dict, cloud: Cloud, log, args: list):
LOG.debug("Starting clound-init module %s", name)
wg_section = None
if "wireguard" in cfg:
LOG.debug("Found Wireguard section in config")
wg_section = cfg["wireguard"]
if wg_section is None:
LOG.debug(
"Skipping module named %s," " no 'wireguard' configuration found",
name,
)
return
# install wireguard tools, enable kernel module
maybe_install_wireguard_tools(cloud)
try:
subp.subp("modprobe wireguard", capture=True, shell=True)
except subp.ProcessExecutionError as e:
util.logexc(LOG, f"Could not load wireguard module: {e}")
for wg_int in wg_section["interfaces"]:
# check schema
supplemental_schema_validation(wg_int)
# write wg config files
write_config(wg_int)
# enable wg interfaces
enable_wg(wg_int)
# parse and run readinessprobe parameters
if (
"readinessprobe" in wg_section
and wg_section["readinessprobe"] is not None
):
readinessprobe_command_validation(wg_section)
readinessprobe(wg_section)
else:
LOG.debug("Skipping readinessprobe - no checks defined")
|
14,612 |
def check_generate_predictions_console(use_threshold=False, all_probs=False):
# create some simple classification data without feature hashing
train_fs, test_fs = make_classification_data(num_examples=1000,
num_features=5)
# save the test feature set to an NDJ file
input_file = join(_my_dir, 'test',
'test_generate_predictions.jsonlines')
writer = NDJWriter(input_file, test_fs)
writer.write()
proba = use_threshold or all_probs
# create a learner that uses an SGD classifier
learner = Learner('SGDClassifier', probability=proba)
# train the learner with grid search
learner.train(train_fs, grid_search=True)
# get the predictions on the test featureset
predictions = learner.predict(test_fs)
# if we asked for probabilities, then use the threshold
# to convert them into binary predictions
if use_threshold:
threshold = 0.6
predictions = [int(p[1] >= threshold) for p in predictions]
else:
predictions = predictions.tolist()
threshold = None
# save the learner to a file
model_file = join(_my_dir, 'output',
'test_generate_predictions_console.model')
learner.save(model_file)
# now call main() from generate_predictions.py
generate_cmd = []
if use_threshold:
generate_cmd.append('-t {}'.format(threshold))
elif all_probs:
generate_cmd.append('-a')
generate_cmd.extend([model_file, input_file])
# we need to capture stdout since that's what main() writes to
err = ''
try:
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = mystdout = StringIO()
sys.stderr = mystderr = StringIO()
gp.main(generate_cmd)
out = mystdout.getvalue()
err = mystderr.getvalue()
output_lines = out.strip().split('\n')[1:] # Skip headers
if all_probs:
# Ignore the id (first column) in output.
predictions_after_saving = [[float(p) for p in x.split('\t')[1:]]
for x in output_lines]
else:
# Ignore the id (first column) in output.
predictions_after_saving = [int(x.split('\t')[1])
for x in output_lines]
if all_probs:
assert_array_almost_equal(predictions, predictions_after_saving)
else:
eq_(predictions, predictions_after_saving)
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
print(err)
|
def check_generate_predictions_console(use_threshold=False, all_probs=False):
# create some simple classification data without feature hashing
train_fs, test_fs = make_classification_data(num_examples=1000,
num_features=5)
# save the test feature set to an NDJ file
input_file = join(_my_dir, 'test',
'test_generate_predictions.jsonlines')
writer = NDJWriter(input_file, test_fs)
writer.write()
enable_probabilities = use_threshold or all_probs
# create a learner that uses an SGD classifier
learner = Learner('SGDClassifier', probability=proba)
# train the learner with grid search
learner.train(train_fs, grid_search=True)
# get the predictions on the test featureset
predictions = learner.predict(test_fs)
# if we asked for probabilities, then use the threshold
# to convert them into binary predictions
if use_threshold:
threshold = 0.6
predictions = [int(p[1] >= threshold) for p in predictions]
else:
predictions = predictions.tolist()
threshold = None
# save the learner to a file
model_file = join(_my_dir, 'output',
'test_generate_predictions_console.model')
learner.save(model_file)
# now call main() from generate_predictions.py
generate_cmd = []
if use_threshold:
generate_cmd.append('-t {}'.format(threshold))
elif all_probs:
generate_cmd.append('-a')
generate_cmd.extend([model_file, input_file])
# we need to capture stdout since that's what main() writes to
err = ''
try:
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = mystdout = StringIO()
sys.stderr = mystderr = StringIO()
gp.main(generate_cmd)
out = mystdout.getvalue()
err = mystderr.getvalue()
output_lines = out.strip().split('\n')[1:] # Skip headers
if all_probs:
# Ignore the id (first column) in output.
predictions_after_saving = [[float(p) for p in x.split('\t')[1:]]
for x in output_lines]
else:
# Ignore the id (first column) in output.
predictions_after_saving = [int(x.split('\t')[1])
for x in output_lines]
if all_probs:
assert_array_almost_equal(predictions, predictions_after_saving)
else:
eq_(predictions, predictions_after_saving)
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
print(err)
|
41,218 |
def cphase(theta: float, q0: cirq.Qid, q1: cirq.Qid) -> cirq.OP_TREE:
"""Implement a cphase using the Ising gate generated from 2 Sycamore gates
A CPHASE gate has the matrix diag([1, 1, 1, exp(1j * theta)]) and can
be mapped to the Ising gate by prep and post rotations of Z-pi/4.
We drop the global phase shift of theta/4.
Args:
theta: exp(1j * theta )
q0: First qubit id to operate on
q1: Second qubit id to operate on
returns:
a cirq program implementing cphase
"""
yield rzz(-theta / 4, q0, q1)
yield cirq.rz(theta / 2).on(q0)
yield cirq.rz(theta / 2).on(q1)
|
def cphase(theta: float, q0: cirq.Qid, q1: cirq.Qid) -> cirq.OP_TREE:
"""Implements a cphase using the Ising gate generated from 2 Sycamore gates.
A CPHASE gate has the matrix diag([1, 1, 1, exp(1j * theta)]) and can
be mapped to the Ising gate by prep and post rotations of Z-pi/4.
We drop the global phase shift of theta/4.
Args:
theta: exp(1j * theta )
q0: First qubit id to operate on
q1: Second qubit id to operate on
returns:
a cirq program implementing cphase
"""
yield rzz(-theta / 4, q0, q1)
yield cirq.rz(theta / 2).on(q0)
yield cirq.rz(theta / 2).on(q1)
|
41,724 |
def objective(trial):
# Model and optimizer
model = L.Classifier(create_model(trial))
optimizer = create_optimizer(trial, model)
# Dataset
rng = np.random.RandomState(0)
train, test = chainer.datasets.get_mnist()
train = chainer.datasets.SubDataset(
train, 0, N_TRAIN_EXAMPLES, order=rng.permutation(len(train)))
test = chainer.datasets.SubDataset(test, 0, N_TEST_EXAMPLES, order=rng.permutation(len(test)))
train_iter = chainer.iterators.SerialIterator(train, BATCHSIZE)
test_iter = chainer.iterators.SerialIterator(test, BATCHSIZE, repeat=False, shuffle=False)
# Trainer
updater = chainer.training.StandardUpdater(train_iter, optimizer)
trainer = chainer.training.Trainer(updater, (EPOCH, 'epoch'))
trainer.extend(chainer.training.extensions.Evaluator(test_iter, model))
log_report_extension = chainer.training.extensions.LogReport(log_name=None)
trainer.extend(
chainer.training.extensions.PrintReport([
'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy',
'validation/main/accuracy'
]))
trainer.extend(log_report_extension)
trainer.extend(
ChainerPruningExtension(trial, 'validation/main/accuracy', (1, 'epoch')))
# Run!
trainer.run(show_loop_exception_msg=False)
# Set the user attributes such as loss and accuracy for train and validation sets
log_last = log_report_extension.log[-1]
for key, value in log_last.items():
trial.set_user_attr(key, value)
# Return the validation accuracy
return log_report_extension.log[-1]['validation/main/accuracy']
|
def objective(trial):
# Model and optimizer
model = L.Classifier(create_model(trial))
optimizer = create_optimizer(trial, model)
# Dataset
rng = np.random.RandomState(0)
train, test = chainer.datasets.get_mnist()
train = chainer.datasets.SubDataset(
train, 0, N_TRAIN_EXAMPLES, order=rng.permutation(len(train)))
test = chainer.datasets.SubDataset(test, 0, N_TEST_EXAMPLES, order=rng.permutation(len(test)))
train_iter = chainer.iterators.SerialIterator(train, BATCHSIZE)
test_iter = chainer.iterators.SerialIterator(test, BATCHSIZE, repeat=False, shuffle=False)
# Trainer
updater = chainer.training.StandardUpdater(train_iter, optimizer)
trainer = chainer.training.Trainer(updater, (EPOCH, 'epoch'))
trainer.extend(chainer.training.extensions.Evaluator(test_iter, model))
log_report_extension = chainer.training.extensions.LogReport(log_name=None)
trainer.extend(
chainer.training.extensions.PrintReport([
'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy',
'validation/main/accuracy'
]))
trainer.extend(log_report_extension)
trainer.extend(
ChainerPruningExtension(trial, 'validation/main/accuracy', (1, 'epoch')))
# Run!
trainer.run(show_loop_exception_msg=False)
# Set the user attributes such as loss and accuracy for train and validation sets
log_last = log_report_extension.log[-1]
for key, value in log_last.items():
trial.set_user_attr(key, value)
# Return the validation accuracy
return log_report_extension.log[-1]['validation/main/accuracy']
|
42,866 |
def triangular_decomposition(V, tol=1e-11):
r"""Triangular decomposition of unitary due to Reck et al.
See Reck et al. Phys. Rev. Lett. 73, 58 [10.1103/PhysRevLett.73.58]
for more details and Clements et al. Optica 3, 1460 (2016) for details
on notation.
Args:
V (array): Unitary matrix of size n_size
tol (int): the number of decimal places to use when determining
whether the matrix is unitary
Returns:
tuple[array]: returns a tuple of the form ``(tlist,np.diag(localV))``
where:
* ``tlist``: list containing ``[n,m,theta,phi,n_size]`` of the T unitaries needed
* ``localV``: Diagonal unitary applied at the beginning of circuit
"""
localV = V
(nsize, _) = localV.shape
diffn = np.linalg.norm(V @ V.conj().T - np.identity(nsize))
if diffn >= tol:
raise ValueError("The input matrix is not unitary")
tlist = []
for i in range(nsize-2, -1, -1):
for j in range(i+1):
tlist.append(nullT(nsize-j-1, nsize-i-2, localV))
localV = T(*tlist[-1]) @ localV
return list(reversed(tlist)), np.diag(localV)
|
def triangular_decomposition(V, tol=1e-11):
r"""Triangular decomposition of unitary due to Reck et al.
See Reck et al. Phys. Rev. Lett. 73, 58 [10.1103/PhysRevLett.73.58]
for more details and Clements et al. Optica 3, 1460 (2016) for details
on notation.
Args:
V (array): Unitary matrix of size ``n_size``
tol (int): the number of decimal places to use when determining
whether the matrix is unitary
Returns:
tuple[array]: returns a tuple of the form ``(tlist,np.diag(localV))``
where:
* ``tlist``: list containing ``[n,m,theta,phi,n_size]`` of the T unitaries needed
* ``localV``: Diagonal unitary applied at the beginning of circuit
"""
localV = V
(nsize, _) = localV.shape
diffn = np.linalg.norm(V @ V.conj().T - np.identity(nsize))
if diffn >= tol:
raise ValueError("The input matrix is not unitary")
tlist = []
for i in range(nsize-2, -1, -1):
for j in range(i+1):
tlist.append(nullT(nsize-j-1, nsize-i-2, localV))
localV = T(*tlist[-1]) @ localV
return list(reversed(tlist)), np.diag(localV)
|
31,748 |
def test_module(client: Client) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: client to use
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
try:
message = 'ok'
except DemistoException as e:
if 'Forbidden' in str(e) or 'Authorization' in str(e):
message = 'Authorization Error: make sure API Key is correctly set'
else:
raise e
return message
|
def test_module(client: Client) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: client to use
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
try:
message = 'ok'
except DemistoException as e:
if 'Forbidden' in str(e) or 'Authorization' in str(e):
message = 'Authorization Error: make sure API Key is correctly set.'
else:
raise e
return message
|
32,584 |
def update_command_results(
xm: XM,
command_results: List[CommandResults],
xm_data_list: List[Dict[str, Any]],
readable_output,
entity: Dict[str, Any],
):
id_ = entity.get("entityId")
try:
ip = entity.get("ipv4Str", "")
domain = (
entity.get("customProperties", {})
.get("domainWorkgroup", {})
.get("data", "")
)
os = entity.get("os", {}).get("type", "")
os_version = entity.get("os", {}).get("name", "")
hostname = entity.get("displayName", "")
endpoint_standard_context = Common.Endpoint(
id_,
ip_address=ip,
domain=domain,
os=os,
os_version=os_version,
hostname=hostname,
)
except (TypeError, AttributeError, KeyError):
endpoint_standard_context = Common.Endpoint(id_)
command_results.append(
CommandResults(
indicator=endpoint_standard_context,
readable_output=f"Fetched Endpoint {id_} info",
raw_response=entity,
)
)
entity_data = entity_obj_to_data(xm, entity)
readable_output += pretty_print_entity(entity_data)
xm_data_list.append(entity_data)
return readable_output
|
def update_command_results(
xm: XM,
command_results: List[CommandResults],
xm_data_list: List[Dict[str, Any]],
readable_output,
entity: Dict[str, Any],
):
id_ = entity.get("entityId")
try:
ip = entity.get("ipv4Str", "")
domain = (
demisto.get(entity, 'customProperties.domainWorkgroup.data')
)
os = entity.get("os", {}).get("type", "")
os_version = entity.get("os", {}).get("name", "")
hostname = entity.get("displayName", "")
endpoint_standard_context = Common.Endpoint(
id_,
ip_address=ip,
domain=domain,
os=os,
os_version=os_version,
hostname=hostname,
)
except (TypeError, AttributeError, KeyError):
endpoint_standard_context = Common.Endpoint(id_)
command_results.append(
CommandResults(
indicator=endpoint_standard_context,
readable_output=f"Fetched Endpoint {id_} info",
raw_response=entity,
)
)
entity_data = entity_obj_to_data(xm, entity)
readable_output += pretty_print_entity(entity_data)
xm_data_list.append(entity_data)
return readable_output
|
35,648 |
def resnext101_32x8d(weights: Optional[ResNeXt101_32x8dWeights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
if "pretrained" in kwargs:
warnings.warn("The argument pretrained is deprecated, please use weights instead.")
weights = ResNeXt101_32x8dWeights.ImageNet1K_RefV1 if kwargs.pop("pretrained") else None
weights = ResNeXt101_32x8dWeights.verify(weights)
kwargs["groups"] = 32
kwargs["width_per_group"] = 8
return _resnet(BasicBlock, [3, 4, 23, 3], weights, progress, **kwargs)
|
def resnext101_32x8d(weights: Optional[ResNeXt101_32x8dWeights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
if "pretrained" in kwargs:
warnings.warn("The argument pretrained is deprecated, please use weights instead.")
weights = ResNeXt101_32x8dWeights.ImageNet1K_RefV1 if kwargs.pop("pretrained") else None
weights = ResNeXt101_32x8dWeights.verify(weights)
kwargs["groups"] = 32
kwargs["width_per_group"] = 8
return _resnet(Bottleneck, [3, 4, 23, 3], weights, progress, **kwargs)
|
32,943 |
def _traced_query(func, args, kwargs):
pin = Pin.get_from(graphql)
if not pin or not pin.enabled():
return func(*args, **kwargs)
# set resource name
source = get_argument_value(args, kwargs, 1, "source")
resource = _get_source_str(source)
with pin.tracer.trace(
name="graphql.graphql",
resource=resource,
service=trace_utils.int_service(pin, config.graphql),
span_type=SpanTypes.GRAPHQL,
) as span:
# mark span as measured and set sample rate
span.set_tag(SPAN_MEASURED_KEY)
sample_rate = config.graphql.get_analytics_sample_rate()
if sample_rate is not None:
span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, sample_rate)
result = func(*args, **kwargs)
if isinstance(result, ExecutionResult):
# set error tags if the result contains a list of GraphqlErrors, skip if it's a promise
# If the wrapped validate and execute functions return a list of errors we will duplicate
# the span errors here.
_set_span_errors(result.errors, span)
return result
|
def _traced_query(func, args, kwargs):
pin = Pin.get_from(graphql)
if not pin or not pin.enabled():
return func(*args, **kwargs)
# set resource name
source = get_argument_value(args, kwargs, 1, "source")
resource = _get_source_str(source)
with pin.tracer.trace(
name="graphql.request",
resource=resource,
service=trace_utils.int_service(pin, config.graphql),
span_type=SpanTypes.GRAPHQL,
) as span:
# mark span as measured and set sample rate
span.set_tag(SPAN_MEASURED_KEY)
sample_rate = config.graphql.get_analytics_sample_rate()
if sample_rate is not None:
span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, sample_rate)
result = func(*args, **kwargs)
if isinstance(result, ExecutionResult):
# set error tags if the result contains a list of GraphqlErrors, skip if it's a promise
# If the wrapped validate and execute functions return a list of errors we will duplicate
# the span errors here.
_set_span_errors(result.errors, span)
return result
|
53,273 |
def get_paschen_constants (gas,electrode):
r"""
Function to get the constants A and B and the second Townsend coefficient to calculate the Paschen breakdown voltage
Parameters
----------
gas : 'str'
electrode : 'str'
String representing the gas and electrode material
Return
------
dictionary containing the constants A, B and townsend_gamma for calculation of the breakdwn voltage
References
---------
Paschen_constants contains the coefficents A and B for the estimation of the
First Townsend Ionization Coefficent
(exponential fit to the First Townsend Ionization coefficient)
as adapted from
E.Nasser, Fundamentals of Gaseous Ionization and Plasma Electronics,
Wiley-Interscience, New York 1971
format: paschen_constants dir {"gas":[A,B]}
units: A in [Ionisation/(Pa m)] and B in [V/(Pa m)]
Townsend_gamma is the Second Townsend Ionization coefficient as given by
A.Beroual and I. Fonfana, Discharge in Long Air Gap Modeling and Application
IOP Publishing Ltd 2016
ISBN 978-0-7503-1236-3 (ebook)
ISBN 978-0-7503-1237-0 (print)
Examples
--------
c=def get_paschen_constants ("Ar","Ni):
c={'A': 11, 'B': 135, 'gam': 0.058}
c=def get_paschen_constants ("Ar","zz"):
c={'A': 11, 'B': 135, 'gam': 0.01}
If electrode material is not found a default value of 0.01 is taken
c=def get_paschen_constants ("Zz","Ni"):
c=None
If gas is not found, c is set to None
"""
# Supported gases
gases=["Air","N2","H2","He","Ne","Ar","Kr","Xe"]
paschen_constants={"Air":[11,274],
"N2":[9.0, 257],
"H2":[3.8,104],
"He":[2.3,26],
"Ne":[3.0, 75],
"Ar":[11,135],
"Kr":[13,180],
"Xe":[20,263]}
# Supported electrode materials
materials=["Al","Cu","Ni","Pt","C","W","Fe"]
townsend_gamma={"Air":{"Al":0.035,"Cu":0.025,"Ni":0.036,"Pt":0.017,"C":None,"W":None,"Fe":0.02},
"N2":{"Al":0.1,"Cu":0.066,"Ni":0.077,"Pt":0.59,"C":None,"W":None,"Fe":0.059},
"H2":{"Al":0.095,"Cu":0.05,"Ni":0.053,"Pt":0.02,"C":0.014,"W":None,"Fe":0.061},
"He":{"Al":0.021,"Cu":None,"Ni":0.015,"Pt":0.01,"C":None,"W":None,"Fe":0.015},
"Ne":{"Al":0.053,"Cu":0.02,"Ni":0.031,"Pt":0.023,"C":None,"W":0.045,"Fe":0.022},
"Ar":{"Al":0.12,"Cu":0.058,"Ni":0.058,"Pt":0.058,"C":None,"W":None,"Fe":0.058},
"Kr":{"Al":None,"Cu":None,"Ni":None,"Pt":None,"C":None,"W":None,"Fe":None},
"Xe":{"Al":None,"Cu":None,"Ni":None,"Pt":None,"C":None,"W":None,"Fe":None}}
# Check if the asked gas and electrode material is supported
resg= gas in gases
rese=electrode in materials
# If the gas is supported get the constants A and B
print(resg,rese)
if resg==True :
print(gas)
A=paschen_constants[gas][0]
B=paschen_constants[gas][1]
print(A,B)
# Get the townsend_gamma coefficient for the the gas/electrode combination
if rese==True:
gam=townsend_gamma[gas]
print(gam)
gn=gam[electrode]
print (gn)
# Test if townsend_gamma exists for the demanded gas/electrode configuration
# If not a default townsend_gamma value of 0.01 is taken
if gn is None:
gn=0.01
print("default")
print(gn)
else:
# If the electrode material is not supportes set townsend_gamma to default = 0.01
gn=0.01
print("default")
# Create output dir {const}
const={"A":A,"B":B,"gam":gn}
print(const)
return const
# If gas is not supported set const=None
else :
const=None
print("No constants for this gas available",const)
return const
|
def get_paschen_constants (gas,electrode):
r"""
Function to get the constants A and B and the second Townsend coefficient to calculate the Paschen breakdown voltage
Parameters
----------
gas : 'str'
electrode : 'str'
String representing the gas and electrode material
Return
------
dictionary containing the constants A, B and townsend_gamma for calculation of the breakdwn voltage
References
---------
Paschen_constants contains the coefficents A and B for the estimation of the
First Townsend Ionization Coefficent
(exponential fit to the First Townsend Ionization coefficient)
as adapted from
E.Nasser, Fundamentals of Gaseous Ionization and Plasma Electronics,
Wiley-Interscience, New York 1971
format: paschen_constants dir {"gas":[A,B]}
units: A in [Ionisation/(Pa m)] and B in [V/(Pa m)]
`townsend_gamma` is the Second Townsend Ionization coefficient as given by
A.Beroual and I. Fonfana, Discharge in Long Air Gap Modeling and Application
IOP Publishing Ltd 2016
ISBN 978-0-7503-1236-3 (ebook)
ISBN 978-0-7503-1237-0 (print)
Examples
--------
c=def get_paschen_constants ("Ar","Ni):
c={'A': 11, 'B': 135, 'gam': 0.058}
c=def get_paschen_constants ("Ar","zz"):
c={'A': 11, 'B': 135, 'gam': 0.01}
If electrode material is not found a default value of 0.01 is taken
c=def get_paschen_constants ("Zz","Ni"):
c=None
If gas is not found, c is set to None
"""
# Supported gases
gases=["Air","N2","H2","He","Ne","Ar","Kr","Xe"]
paschen_constants={"Air":[11,274],
"N2":[9.0, 257],
"H2":[3.8,104],
"He":[2.3,26],
"Ne":[3.0, 75],
"Ar":[11,135],
"Kr":[13,180],
"Xe":[20,263]}
# Supported electrode materials
materials=["Al","Cu","Ni","Pt","C","W","Fe"]
townsend_gamma={"Air":{"Al":0.035,"Cu":0.025,"Ni":0.036,"Pt":0.017,"C":None,"W":None,"Fe":0.02},
"N2":{"Al":0.1,"Cu":0.066,"Ni":0.077,"Pt":0.59,"C":None,"W":None,"Fe":0.059},
"H2":{"Al":0.095,"Cu":0.05,"Ni":0.053,"Pt":0.02,"C":0.014,"W":None,"Fe":0.061},
"He":{"Al":0.021,"Cu":None,"Ni":0.015,"Pt":0.01,"C":None,"W":None,"Fe":0.015},
"Ne":{"Al":0.053,"Cu":0.02,"Ni":0.031,"Pt":0.023,"C":None,"W":0.045,"Fe":0.022},
"Ar":{"Al":0.12,"Cu":0.058,"Ni":0.058,"Pt":0.058,"C":None,"W":None,"Fe":0.058},
"Kr":{"Al":None,"Cu":None,"Ni":None,"Pt":None,"C":None,"W":None,"Fe":None},
"Xe":{"Al":None,"Cu":None,"Ni":None,"Pt":None,"C":None,"W":None,"Fe":None}}
# Check if the asked gas and electrode material is supported
resg= gas in gases
rese=electrode in materials
# If the gas is supported get the constants A and B
print(resg,rese)
if resg==True :
print(gas)
A=paschen_constants[gas][0]
B=paschen_constants[gas][1]
print(A,B)
# Get the townsend_gamma coefficient for the the gas/electrode combination
if rese==True:
gam=townsend_gamma[gas]
print(gam)
gn=gam[electrode]
print (gn)
# Test if townsend_gamma exists for the demanded gas/electrode configuration
# If not a default townsend_gamma value of 0.01 is taken
if gn is None:
gn=0.01
print("default")
print(gn)
else:
# If the electrode material is not supportes set townsend_gamma to default = 0.01
gn=0.01
print("default")
# Create output dir {const}
const={"A":A,"B":B,"gam":gn}
print(const)
return const
# If gas is not supported set const=None
else :
const=None
print("No constants for this gas available",const)
return const
|
52,860 |
def _has_todo(txt):
"""
check if text contains a TODO
the todo needs to be delimited by whitespaces or be at the begin/end
"""
return re.search(r"(^|\W)(todo|TODO)(\W|$)", txt) is not None
|
def _has_todo(txt):
"""
check if text contains a TODO
the todo needs to be delimited by a non-alphanumerical character (whitespaces, #, ...)
or be at the begin/end of txt
"""
return re.search(r"(^|\W)(todo|TODO)(\W|$)", txt) is not None
|
47,753 |
def test__templater_dbt_handle_database_connection_failure(
project_dir, dbt_templater # noqa: F811
):
"""Test the result of a failed database connection."""
from dbt.adapters.factory import get_adapter
src_fpath = "plugins/sqlfluff-templater-dbt/test/fixtures/dbt/error_models/exception_connect_database.sql"
target_fpath = os.path.abspath(
os.path.join(
project_dir, "models/my_new_project/exception_connect_database.sql"
)
)
# We move the file that throws an error in and out of the project directory
# as dbt throws an error if a node fails to parse while computing the DAG
os.rename(src_fpath, target_fpath)
try:
_, violations = dbt_templater.process(
in_str="",
fname=target_fpath,
config=FluffConfig(configs=DBT_FLUFF_CONFIG),
)
except Exception as e:
if DBT_VERSION_TUPLE == (1, 0):
# In dbt 1.0.0, connection failures raise an exception
assert str(e).startswith(
"Runtime Error\n connection never acquired for thread"
)
else:
raise (e)
finally:
get_adapter(dbt_templater.dbt_config).connections.release()
os.rename(target_fpath, src_fpath)
if DBT_VERSION_TUPLE != (1, 0):
assert violations
# NB: Replace slashes to deal with different plaform paths being returned.
assert (
violations[0]
.desc()
.replace("\\", "/")
.startswith("dbt tried to connect to the database")
)
|
def test__templater_dbt_handle_database_connection_failure(
project_dir, dbt_templater # noqa: F811
):
"""Test the result of a failed database connection."""
from dbt.adapters.factory import get_adapter
src_fpath = "plugins/sqlfluff-templater-dbt/test/fixtures/dbt/error_models/exception_connect_database.sql"
target_fpath = os.path.abspath(
os.path.join(
project_dir, "models/my_new_project/exception_connect_database.sql"
)
)
# We move the file that throws an error in and out of the project directory
# as dbt throws an error if a node fails to parse while computing the DAG
os.rename(src_fpath, target_fpath)
try:
_, violations = dbt_templater.process(
in_str="",
fname=target_fpath,
config=FluffConfig(configs=DBT_FLUFF_CONFIG),
)
except Exception as e:
if DBT_VERSION_TUPLE == (1, 0):
# In dbt 1.0.0, connection failures raise an exception
assert str(e).startswith(
"Runtime Error\n connection never acquired for thread"
)
else:
raise (e)
finally:
get_adapter(dbt_templater.dbt_config).connections.release()
os.rename(target_fpath, src_fpath)
if DBT_VERSION_TUPLE < (1, 0):
assert violations
# NB: Replace slashes to deal with different plaform paths being returned.
assert (
violations[0]
.desc()
.replace("\\", "/")
.startswith("dbt tried to connect to the database")
)
|
7,731 |
def bounding_box(geom_type, geom_id):
"""Get a bounding box for a geometric object
Parameters
----------
geom_type : str
Type of geometry object. One of ('surface', 'cell', 'universe')
geom_id : int
Id of the object. Can be positive or negative for surfaces.
"""
geomt = c_char_p(geom_type.encode())
llc = np.zeros((3,), dtype=float)
urc = np.zeros((3,), dtype=float)
_dll.openmc_bounding_box(geomt,
geom_id,
llc.ctypes.data_as(POINTER(c_double)),
urc.ctypes.data_as(POINTER(c_double)))
return llc, urc
|
def bounding_box(geom_type, geom_id):
"""Get a bounding box for a geometric object
Parameters
----------
geom_type : str
Type of geometry object. One of ('surface', 'cell', 'universe')
geom_id : int
ID of the object. Can be positive or negative for surfaces.
"""
geomt = c_char_p(geom_type.encode())
llc = np.zeros((3,), dtype=float)
urc = np.zeros((3,), dtype=float)
_dll.openmc_bounding_box(geomt,
geom_id,
llc.ctypes.data_as(POINTER(c_double)),
urc.ctypes.data_as(POINTER(c_double)))
return llc, urc
|
11,841 |
def open(fp, mode="r"):
"""
Opens and identifies the given image file.
This is a lazy operation; this function identifies the file, but
the file remains open and the actual image data is not read from
the file until you try to process the data (or call the
:py:meth:`~PIL.Image.Image.load` method). See
:py:func:`~PIL.Image.new`. See :ref:`file-handling`.
:param fp: A filename (string), pathlib.Path object or a file object.
The file object must implement :py:meth:`~file.read`,
:py:meth:`~file.seek`, and :py:meth:`~file.tell` methods,
and be opened in binary mode.
:param mode: The mode. If given, this argument must be "r".
:returns: An :py:class:`~PIL.Image.Image` object.
:exception FileNotFoundError: If the file cannot be found.
:exception PIL.UnidentifiedImageError: If the image cannot be opened and
identified.
:exception ValueError: If the ``mode`` is not "r", or if a StringIO
instance is used for ``fp``.
"""
if mode != "r":
raise ValueError("bad mode %r" % mode)
elif isinstance(fp, io.StringIO):
raise ValueError(
"StringIO cannot be used to open an image. Binary data must be used instead"
)
exclusive_fp = False
filename = ""
if isinstance(fp, Path):
filename = str(fp.resolve())
elif isPath(fp):
filename = fp
if filename:
fp = builtins.open(filename, "rb")
exclusive_fp = True
try:
fp.seek(0)
except (AttributeError, io.UnsupportedOperation):
fp = io.BytesIO(fp.read())
exclusive_fp = True
prefix = fp.read(16)
preinit()
accept_warnings = []
def _open_core(fp, filename, prefix):
for i in ID:
try:
factory, accept = OPEN[i]
result = not accept or accept(prefix)
if type(result) in [str, bytes]:
accept_warnings.append(result)
elif result:
fp.seek(0)
im = factory(fp, filename)
_decompression_bomb_check(im.size)
return im
except (SyntaxError, IndexError, TypeError, struct.error):
# Leave disabled by default, spams the logs with image
# opening failures that are entirely expected.
# logger.debug("", exc_info=True)
continue
except BaseException:
if exclusive_fp:
fp.close()
raise
return None
im = _open_core(fp, filename, prefix)
if im is None:
if init():
im = _open_core(fp, filename, prefix)
if im:
im._exclusive_fp = exclusive_fp
return im
if exclusive_fp:
fp.close()
for message in accept_warnings:
warnings.warn(message)
raise UnidentifiedImageError(
"cannot identify image file %r" % (filename if filename else fp)
)
|
def open(fp, mode="r"):
"""
Opens and identifies the given image file.
This is a lazy operation; this function identifies the file, but
the file remains open and the actual image data is not read from
the file until you try to process the data (or call the
:py:meth:`~PIL.Image.Image.load` method). See
:py:func:`~PIL.Image.new`. See :ref:`file-handling`.
:param fp: A filename (string), pathlib.Path object or a file object.
The file object must implement :py:meth:`~file.read`,
:py:meth:`~file.seek`, and :py:meth:`~file.tell` methods,
and be opened in binary mode.
:param mode: The mode. If given, this argument must be "r".
:returns: An :py:class:`~PIL.Image.Image` object.
:exception FileNotFoundError: If the file cannot be found.
:exception PIL.UnidentifiedImageError: If the image cannot be opened and
identified.
:exception ValueError: If the ``mode`` is not "r", or if a StringIO
instance is used for ``fp``.
"""
if mode != "r":
raise ValueError("bad mode %r" % mode)
elif isinstance(fp, io.StringIO):
raise ValueError(
"StringIO cannot be used to open an image. Binary data must be used instead."
)
exclusive_fp = False
filename = ""
if isinstance(fp, Path):
filename = str(fp.resolve())
elif isPath(fp):
filename = fp
if filename:
fp = builtins.open(filename, "rb")
exclusive_fp = True
try:
fp.seek(0)
except (AttributeError, io.UnsupportedOperation):
fp = io.BytesIO(fp.read())
exclusive_fp = True
prefix = fp.read(16)
preinit()
accept_warnings = []
def _open_core(fp, filename, prefix):
for i in ID:
try:
factory, accept = OPEN[i]
result = not accept or accept(prefix)
if type(result) in [str, bytes]:
accept_warnings.append(result)
elif result:
fp.seek(0)
im = factory(fp, filename)
_decompression_bomb_check(im.size)
return im
except (SyntaxError, IndexError, TypeError, struct.error):
# Leave disabled by default, spams the logs with image
# opening failures that are entirely expected.
# logger.debug("", exc_info=True)
continue
except BaseException:
if exclusive_fp:
fp.close()
raise
return None
im = _open_core(fp, filename, prefix)
if im is None:
if init():
im = _open_core(fp, filename, prefix)
if im:
im._exclusive_fp = exclusive_fp
return im
if exclusive_fp:
fp.close()
for message in accept_warnings:
warnings.warn(message)
raise UnidentifiedImageError(
"cannot identify image file %r" % (filename if filename else fp)
)
|
33,239 |
def stripped_to_photo(stripped):
"""
adds the jpg header and footer to a stripped image.
transcoded from https://github.com/telegramdesktop/tdesktop/blob/bec39d89e19670eb436dc794a8f20b657cb87c71/Telegram/SourceFiles/ui/image/image.cpp#L225
"""
if len(stripped) < 3 or stripped[0] != '\x01':
return stripped
header = bytearray(b'\xff\xd8\xff\xe0\x00\x10JFIF\x00\x01\x01\x00\x00\x01\x00\x01\x00\x00\xff\xdb\x00C\x00(\x1c\x1e#\x1e\x19(#!#-+(0<dA<77<{X]Id\x91\x80\x99\x96\x8f\x80\x8c\x8a\xa0\xb4\xe6\xc3\xa0\xaa\xda\xad\x8a\x8c\xc8\xff\xcb\xda\xee\xf5\xff\xff\xff\x9b\xc1\xff\xff\xff\xfa\xff\xe6\xfd\xff\xf8\xff\xdb\x00C\x01+--<5<vAAv\xf8\xa5\x8c\xa5\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xff\xc0\x00\x11\x08\x00\x00\x00\x00\x03\x01"\x00\x02\x11\x01\x03\x11\x01\xff\xc4\x00\x1f\x00\x00\x01\x05\x01\x01\x01\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\xff\xc4\x00\xb5\x10\x00\x02\x01\x03\x03\x02\x04\x03\x05\x05\x04\x04\x00\x00\x01}\x01\x02\x03\x00\x04\x11\x05\x12!1A\x06\x13Qa\x07"q\x142\x81\x91\xa1\x08#B\xb1\xc1\x15R\xd1\xf0$3br\x82\t\n\x16\x17\x18\x19\x1a%&\'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz\x83\x84\x85\x86\x87\x88\x89\x8a\x92\x93\x94\x95\x96\x97\x98\x99\x9a\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xff\xc4\x00\x1f\x01\x00\x03\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\xff\xc4\x00\xb5\x11\x00\x02\x01\x02\x04\x04\x03\x04\x07\x05\x04\x04\x00\x01\x02w\x00\x01\x02\x03\x11\x04\x05!1\x06\x12AQ\x07aq\x13"2\x81\x08\x14B\x91\xa1\xb1\xc1\t#3R\xf0\x15br\xd1\n\x16$4\xe1%\xf1\x17\x18\x19\x1a&\'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x92\x93\x94\x95\x96\x97\x98\x99\x9a\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xff\xda\x00\x0c\x03\x01\x00\x02\x11\x03\x11\x00?\x00')
footer = b"\xff\xd9"
header[164] = stripped[1]
header[166] = stripped[2]
return bytes(header) + stripped[3:] + footer
|
def stripped_to_photo(stripped):
"""
Adds the JPG header and footer to the :tl:`PhotoStrippedSize` image.
transcoded from https://github.com/telegramdesktop/tdesktop/blob/bec39d89e19670eb436dc794a8f20b657cb87c71/Telegram/SourceFiles/ui/image/image.cpp#L225
"""
if len(stripped) < 3 or stripped[0] != '\x01':
return stripped
header = bytearray(b'\xff\xd8\xff\xe0\x00\x10JFIF\x00\x01\x01\x00\x00\x01\x00\x01\x00\x00\xff\xdb\x00C\x00(\x1c\x1e#\x1e\x19(#!#-+(0<dA<77<{X]Id\x91\x80\x99\x96\x8f\x80\x8c\x8a\xa0\xb4\xe6\xc3\xa0\xaa\xda\xad\x8a\x8c\xc8\xff\xcb\xda\xee\xf5\xff\xff\xff\x9b\xc1\xff\xff\xff\xfa\xff\xe6\xfd\xff\xf8\xff\xdb\x00C\x01+--<5<vAAv\xf8\xa5\x8c\xa5\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xf8\xff\xc0\x00\x11\x08\x00\x00\x00\x00\x03\x01"\x00\x02\x11\x01\x03\x11\x01\xff\xc4\x00\x1f\x00\x00\x01\x05\x01\x01\x01\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\xff\xc4\x00\xb5\x10\x00\x02\x01\x03\x03\x02\x04\x03\x05\x05\x04\x04\x00\x00\x01}\x01\x02\x03\x00\x04\x11\x05\x12!1A\x06\x13Qa\x07"q\x142\x81\x91\xa1\x08#B\xb1\xc1\x15R\xd1\xf0$3br\x82\t\n\x16\x17\x18\x19\x1a%&\'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz\x83\x84\x85\x86\x87\x88\x89\x8a\x92\x93\x94\x95\x96\x97\x98\x99\x9a\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xff\xc4\x00\x1f\x01\x00\x03\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\xff\xc4\x00\xb5\x11\x00\x02\x01\x02\x04\x04\x03\x04\x07\x05\x04\x04\x00\x01\x02w\x00\x01\x02\x03\x11\x04\x05!1\x06\x12AQ\x07aq\x13"2\x81\x08\x14B\x91\xa1\xb1\xc1\t#3R\xf0\x15br\xd1\n\x16$4\xe1%\xf1\x17\x18\x19\x1a&\'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x92\x93\x94\x95\x96\x97\x98\x99\x9a\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xff\xda\x00\x0c\x03\x01\x00\x02\x11\x03\x11\x00?\x00')
footer = b"\xff\xd9"
header[164] = stripped[1]
header[166] = stripped[2]
return bytes(header) + stripped[3:] + footer
|
37,105 |
def circuit_drawer(circuit,
scale=0.7,
filename=None,
style=None,
output=None,
interactive=False,
line_length=None,
plot_barriers=True,
reverse_bits=False,
justify=None,
vertical_compression='medium',
idle_wires=True,
with_layout=True,
fold=None):
"""Draw a quantum circuit to different formats (set by output parameter):
**text**: ASCII art TextDrawing that can be printed in the console.
**latex**: high-quality images compiled via latex.
**latex_source**: raw uncompiled latex output.
**matplotlib**: images with color rendered purely in Python.
Args:
circuit (QuantumCircuit): the quantum circuit to draw
scale (float): scale of image to draw (shrink if < 1)
filename (str): file path to save image to
style (dict or str): dictionary of style or file name of style file.
This option is only used by the ``mpl`` output type. If a str is
passed in that is the path to a json file which contains that will
be open, parsed, and then used just as the input dict. See:
:ref:`Style Dict Doc <style-dict-doc>` for more information on the
contents.
output (str): Select the output method to use for drawing the circuit.
Valid choices are ``text``, ``latex``, ``latex_source``, ``mpl``.
By default the `'text`' drawer is used unless a user config file
has an alternative backend set as the default. If the output kwarg
is set, that backend will always be used over the default in a user
config file.
interactive (bool): when set true show the circuit in a new window
(for `mpl` this depends on the matplotlib backend being used
supporting this). Note when used with either the `text` or the
`latex_source` output type this has no effect and will be silently
ignored.
line_length (int): Deprecated, see `fold` which superseceds this option.
Sets the length of the lines generated by `text` output type.
This useful when the drawing does not fit in the console. If None
(default), it will try to guess the console width using
``shutil.get_terminal_size()``. However, if you're running in
jupyter the default line length is set to 80 characters. If you
don't want pagination at all, set ``line_length=-1``.
reverse_bits (bool): When set to True reverse the bit order inside
registers for the output visualization.
plot_barriers (bool): Enable/disable drawing barriers in the output
circuit. Defaults to True.
justify (string): Options are ``left``, ``right`` or ``none``, if
anything else is supplied it defaults to left justified. It refers
to where gates should be placed in the output circuit if there is
an option. ``none`` results in each gate being placed in its own
column.
vertical_compression (string): ``high``, ``medium`` or ``low``. It
merges the lines generated by the ``text`` output so the drawing
will take less vertical room. Default is ``medium``. Only used by
the ``text`` output, will be silently ignored otherwise.
idle_wires (bool): Include idle wires (wires with no circuit elements)
in output visualization. Default is True.
with_layout (bool): Include layout information, with labels on the
physical layout.
fold (int): Sets pagination. It can be disabled using -1.
In `text`, sets the length of the lines. This useful when the
drawing does not fit in the console. If None (default), it will try
to guess the console width using ``shutil.get_terminal_size()``.
However, if running in jupyter, the default line length is set to
80 characters. In ``mpl`` is the amount of operations before
folding. Default is 25.
Returns:
:class:`PIL.Image` or :class:`matplotlib.figure` or :class:`str` or
:class:`TextDrawing`:
* `PIL.Image` (output='latex')
an in-memory representation of the image of the circuit diagram.
* `matplotlib.figure.Figure` (output='mpl')
a matplotlib figure object for the circuit diagram.
* `str` (output='latex_source')
The LaTeX source code for visualizing the circuit diagram.
* `TextDrawing` (output='text')
A drawing that can be printed as ascii art
Raises:
VisualizationError: when an invalid output method is selected
ImportError: when the output methods requieres non-installed libraries.
.. _style-dict-doc:
**Style Dict Details**
The style dict kwarg contains numerous options that define the style of the
output circuit visualization. The style dict is only used by the ``mpl``
output. The options available in the style dict are defined below:
Args:
textcolor (str): The color code to use for text. Defaults to
`'#000000'`
subtextcolor (str): The color code to use for subtext. Defaults to
`'#000000'`
linecolor (str): The color code to use for lines. Defaults to
`'#000000'`
creglinecolor (str): The color code to use for classical register
lines. Defaults to `'#778899'`
gatetextcolor (str): The color code to use for gate text. Defaults to
`'#000000'`
gatefacecolor (str): The color code to use for gates. Defaults to
`'#ffffff'`
barrierfacecolor (str): The color code to use for barriers. Defaults to
`'#bdbdbd'`
backgroundcolor (str): The color code to use for the background.
Defaults to `'#ffffff'`
fontsize (int): The font size to use for text. Defaults to 13
subfontsize (int): The font size to use for subtext. Defaults to 8
displaytext (dict): A dictionary of the text to use for each element
type in the output visualization. The default values are::
{
'id': 'id',
'u0': 'U_0',
'u1': 'U_1',
'u2': 'U_2',
'u3': 'U_3',
'x': 'X',
'y': 'Y',
'z': 'Z',
'h': 'H',
's': 'S',
'sdg': 'S^\\dagger',
't': 'T',
'tdg': 'T^\\dagger',
'rx': 'R_x',
'ry': 'R_y',
'rz': 'R_z',
'reset': '\\left|0\\right\\rangle'
}
You must specify all the necessary values if using this. There is
no provision for passing an incomplete dict in.
displaycolor (dict): The color codes to use for each circuit element.
The default values are::
{
'id': '#F0E442',
'u0': '#E7AB3B',
'u1': '#E7AB3B',
'u2': '#E7AB3B',
'u3': '#E7AB3B',
'x': '#58C698',
'y': '#58C698',
'z': '#58C698',
'h': '#70B7EB',
's': '#E0722D',
'sdg': '#E0722D',
't': '#E0722D',
'tdg': '#E0722D',
'rx': '#ffffff',
'ry': '#ffffff',
'rz': '#ffffff',
'reset': '#D188B4',
'target': '#70B7EB',
'meas': '#D188B4'
}
Also, just like `displaytext` there is no provision for an
incomplete dict passed in.
latexdrawerstyle (bool): When set to True enable latex mode which will
draw gates like the `latex` output modes.
usepiformat (bool): When set to True use radians for output
fold (int): The number of circuit elements to fold the circuit at.
Defaults to 20
cregbundle (bool): If set True bundle classical registers
showindex (bool): If set True draw an index.
compress (bool): If set True draw a compressed circuit
figwidth (int): The maximum width (in inches) for the output figure.
dpi (int): The DPI to use for the output image. Defaults to 150
margin (list): A list of margin values to adjust spacing around output
image. Takes a list of 4 ints: [x left, x right, y bottom, y top].
creglinestyle (str): The style of line to use for classical registers.
Choices are `'solid'`, `'doublet'`, or any valid matplotlib
`linestyle` kwarg value. Defaults to `doublet`
"""
image = None
config = user_config.get_config()
# Get default from config file else use text
default_output = 'text'
if config:
default_output = config.get('circuit_drawer', 'text')
if default_output == 'auto':
if _matplotlib.HAS_MATPLOTLIB:
default_output = 'mpl'
else:
default_output = 'text'
if output is None:
output = default_output
if output == 'text':
return _text_circuit_drawer(circuit, filename=filename,
line_length=line_length,
reverse_bits=reverse_bits,
plot_barriers=plot_barriers,
justify=justify,
vertical_compression=vertical_compression,
idle_wires=idle_wires,
with_layout=with_layout,
fold=fold)
elif output == 'latex':
image = _latex_circuit_drawer(circuit, scale=scale,
filename=filename, style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits,
justify=justify,
idle_wires=idle_wires,
with_layout=with_layout)
elif output == 'latex_source':
return _generate_latex_source(circuit,
filename=filename, scale=scale,
style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits,
justify=justify,
idle_wires=idle_wires,
with_layout=with_layout)
elif output == 'mpl':
image = _matplotlib_circuit_drawer(circuit, scale=scale,
filename=filename, style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits,
justify=justify,
idle_wires=idle_wires,
with_layout=with_layout,
fold=fold)
else:
raise exceptions.VisualizationError(
'Invalid output type %s selected. The only valid choices '
'are latex, latex_source, text, and mpl' % output)
if image and interactive:
image.show()
return image
|
def circuit_drawer(circuit,
scale=0.7,
filename=None,
style=None,
output=None,
interactive=False,
line_length=None,
plot_barriers=True,
reverse_bits=False,
justify=None,
vertical_compression='medium',
idle_wires=True,
with_layout=True,
fold=None):
"""Draw a quantum circuit to different formats (set by output parameter):
**text**: ASCII art TextDrawing that can be printed in the console.
**latex**: high-quality images compiled via latex.
**latex_source**: raw uncompiled latex output.
**matplotlib**: images with color rendered purely in Python.
Args:
circuit (QuantumCircuit): the quantum circuit to draw
scale (float): scale of image to draw (shrink if < 1)
filename (str): file path to save image to
style (dict or str): dictionary of style or file name of style file.
This option is only used by the ``mpl`` output type. If a str is
passed in that is the path to a json file which contains that will
be open, parsed, and then used just as the input dict. See:
:ref:`Style Dict Doc <style-dict-doc>` for more information on the
contents.
output (str): Select the output method to use for drawing the circuit.
Valid choices are ``text``, ``latex``, ``latex_source``, ``mpl``.
By default the `'text`' drawer is used unless a user config file
has an alternative backend set as the default. If the output kwarg
is set, that backend will always be used over the default in a user
config file.
interactive (bool): when set true show the circuit in a new window
(for `mpl` this depends on the matplotlib backend being used
supporting this). Note when used with either the `text` or the
`latex_source` output type this has no effect and will be silently
ignored.
line_length (int): Deprecated, see `fold` which superseceds this option.
Sets the length of the lines generated by `text` output type.
This useful when the drawing does not fit in the console. If None
(default), it will try to guess the console width using
``shutil.get_terminal_size()``. However, if you're running in
jupyter the default line length is set to 80 characters. If you
don't want pagination at all, set ``line_length=-1``.
reverse_bits (bool): When set to True reverse the bit order inside
registers for the output visualization.
plot_barriers (bool): Enable/disable drawing barriers in the output
circuit. Defaults to True.
justify (string): Options are ``left``, ``right`` or ``none``, if
anything else is supplied it defaults to left justified. It refers
to where gates should be placed in the output circuit if there is
an option. ``none`` results in each gate being placed in its own
column.
vertical_compression (string): ``high``, ``medium`` or ``low``. It
merges the lines generated by the ``text`` output so the drawing
will take less vertical room. Default is ``medium``. Only used by
the ``text`` output, will be silently ignored otherwise.
idle_wires (bool): Include idle wires (wires with no circuit elements)
in output visualization. Default is True.
with_layout (bool): Include layout information, with labels on the
physical layout. Default is True.
fold (int): Sets pagination. It can be disabled using -1.
In `text`, sets the length of the lines. This useful when the
drawing does not fit in the console. If None (default), it will try
to guess the console width using ``shutil.get_terminal_size()``.
However, if running in jupyter, the default line length is set to
80 characters. In ``mpl`` is the amount of operations before
folding. Default is 25.
Returns:
:class:`PIL.Image` or :class:`matplotlib.figure` or :class:`str` or
:class:`TextDrawing`:
* `PIL.Image` (output='latex')
an in-memory representation of the image of the circuit diagram.
* `matplotlib.figure.Figure` (output='mpl')
a matplotlib figure object for the circuit diagram.
* `str` (output='latex_source')
The LaTeX source code for visualizing the circuit diagram.
* `TextDrawing` (output='text')
A drawing that can be printed as ascii art
Raises:
VisualizationError: when an invalid output method is selected
ImportError: when the output methods requieres non-installed libraries.
.. _style-dict-doc:
**Style Dict Details**
The style dict kwarg contains numerous options that define the style of the
output circuit visualization. The style dict is only used by the ``mpl``
output. The options available in the style dict are defined below:
Args:
textcolor (str): The color code to use for text. Defaults to
`'#000000'`
subtextcolor (str): The color code to use for subtext. Defaults to
`'#000000'`
linecolor (str): The color code to use for lines. Defaults to
`'#000000'`
creglinecolor (str): The color code to use for classical register
lines. Defaults to `'#778899'`
gatetextcolor (str): The color code to use for gate text. Defaults to
`'#000000'`
gatefacecolor (str): The color code to use for gates. Defaults to
`'#ffffff'`
barrierfacecolor (str): The color code to use for barriers. Defaults to
`'#bdbdbd'`
backgroundcolor (str): The color code to use for the background.
Defaults to `'#ffffff'`
fontsize (int): The font size to use for text. Defaults to 13
subfontsize (int): The font size to use for subtext. Defaults to 8
displaytext (dict): A dictionary of the text to use for each element
type in the output visualization. The default values are::
{
'id': 'id',
'u0': 'U_0',
'u1': 'U_1',
'u2': 'U_2',
'u3': 'U_3',
'x': 'X',
'y': 'Y',
'z': 'Z',
'h': 'H',
's': 'S',
'sdg': 'S^\\dagger',
't': 'T',
'tdg': 'T^\\dagger',
'rx': 'R_x',
'ry': 'R_y',
'rz': 'R_z',
'reset': '\\left|0\\right\\rangle'
}
You must specify all the necessary values if using this. There is
no provision for passing an incomplete dict in.
displaycolor (dict): The color codes to use for each circuit element.
The default values are::
{
'id': '#F0E442',
'u0': '#E7AB3B',
'u1': '#E7AB3B',
'u2': '#E7AB3B',
'u3': '#E7AB3B',
'x': '#58C698',
'y': '#58C698',
'z': '#58C698',
'h': '#70B7EB',
's': '#E0722D',
'sdg': '#E0722D',
't': '#E0722D',
'tdg': '#E0722D',
'rx': '#ffffff',
'ry': '#ffffff',
'rz': '#ffffff',
'reset': '#D188B4',
'target': '#70B7EB',
'meas': '#D188B4'
}
Also, just like `displaytext` there is no provision for an
incomplete dict passed in.
latexdrawerstyle (bool): When set to True enable latex mode which will
draw gates like the `latex` output modes.
usepiformat (bool): When set to True use radians for output
fold (int): The number of circuit elements to fold the circuit at.
Defaults to 20
cregbundle (bool): If set True bundle classical registers
showindex (bool): If set True draw an index.
compress (bool): If set True draw a compressed circuit
figwidth (int): The maximum width (in inches) for the output figure.
dpi (int): The DPI to use for the output image. Defaults to 150
margin (list): A list of margin values to adjust spacing around output
image. Takes a list of 4 ints: [x left, x right, y bottom, y top].
creglinestyle (str): The style of line to use for classical registers.
Choices are `'solid'`, `'doublet'`, or any valid matplotlib
`linestyle` kwarg value. Defaults to `doublet`
"""
image = None
config = user_config.get_config()
# Get default from config file else use text
default_output = 'text'
if config:
default_output = config.get('circuit_drawer', 'text')
if default_output == 'auto':
if _matplotlib.HAS_MATPLOTLIB:
default_output = 'mpl'
else:
default_output = 'text'
if output is None:
output = default_output
if output == 'text':
return _text_circuit_drawer(circuit, filename=filename,
line_length=line_length,
reverse_bits=reverse_bits,
plot_barriers=plot_barriers,
justify=justify,
vertical_compression=vertical_compression,
idle_wires=idle_wires,
with_layout=with_layout,
fold=fold)
elif output == 'latex':
image = _latex_circuit_drawer(circuit, scale=scale,
filename=filename, style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits,
justify=justify,
idle_wires=idle_wires,
with_layout=with_layout)
elif output == 'latex_source':
return _generate_latex_source(circuit,
filename=filename, scale=scale,
style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits,
justify=justify,
idle_wires=idle_wires,
with_layout=with_layout)
elif output == 'mpl':
image = _matplotlib_circuit_drawer(circuit, scale=scale,
filename=filename, style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits,
justify=justify,
idle_wires=idle_wires,
with_layout=with_layout,
fold=fold)
else:
raise exceptions.VisualizationError(
'Invalid output type %s selected. The only valid choices '
'are latex, latex_source, text, and mpl' % output)
if image and interactive:
image.show()
return image
|
14,070 |
def check_equality(left, right, check_less_precise):
if check_less_precise:
if not geom_almost_equals(left, right):
unequal_geoms = left[left.geom_almost_equals(right)]
raise AssertionError(
f"{len(unequal_geoms)} out of {len(left)} geometries"
f" are not almost equal. These geometries are "
f"not almost equal: {unequal_geoms}"
)
else:
if not geom_equals(left, right):
unequal_geoms = left[left.geom_equals(right)]
raise AssertionError(
f"{len(unequal_geoms)} out of {len(left)} geometries"
f" are not almost equal. These geometries are "
f"not almost equal: {unequal_geoms}"
)
|
def check_equality(left, right, check_less_precise):
if check_less_precise:
if not geom_almost_equals(left, right):
unequal_geoms = left[left.geom_almost_equals(right)]
raise AssertionError(
f"{len(unequal_geoms)} out of {len(left)} geometries"
f" are not almost equal. These geometries are "
f"not almost equal: \n{unequal_geoms}"
)
else:
if not geom_equals(left, right):
unequal_geoms = left[left.geom_equals(right)]
raise AssertionError(
f"{len(unequal_geoms)} out of {len(left)} geometries"
f" are not almost equal. These geometries are "
f"not almost equal: {unequal_geoms}"
)
|
6,384 |
def _check_agent_availability(agent_email,scheduled_time):
appointemnts_at_scheduled_time = frappe.get_list('Appointment', filters={'scheduled_time':scheduled_time})
for appointment in appointemnts_at_scheduled_time:
if appointment._assign == agent_email:
return False
return True
|
def _check_agent_availability(agent_email, scheduled_time):
appointemnts_at_scheduled_time = frappe.get_list('Appointment', filters={'scheduled_time':scheduled_time})
for appointment in appointemnts_at_scheduled_time:
if appointment._assign == agent_email:
return False
return True
|
22,035 |
def open(path, convert=False, shuffle=False, fs_options={}, fs=None, *args, **kwargs):
"""Open a DataFrame from file given by path.
Example:
>>> df = vaex.open('sometable.hdf5')
>>> df = vaex.open('somedata*.csv', convert='bigdata.hdf5')
:param str or list path: local or absolute path to file, or glob string, or list of paths
:param convert: Uses `dataframe.export` when convert is a path. If True, ``convert=path+'.hdf5'``
The conversion is skipped if the input file or conversion argument did not change.
:param bool shuffle: shuffle converted DataFrame or not
:param dict fs_options: Extra arguments passed to an optional file system if needed:
* Amazon AWS S3
* `anonymous` - access file without authentication (public files)
* `access_key` - AWS access key, if not provided will use the standard env vars, or the `~/.aws/credentials` file
* `secret_key` - AWS secret key, similar to `access_key`
* `profile` - If multiple profiles are present in `~/.aws/credentials`, pick this one instead of 'default', see https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html
* `region` - AWS Region, e.g. 'us-east-1`, will be determined automatically if not provided.
* `endpoint_override` - URL/ip to connect to, instead of AWS, e.g. 'localhost:9000' for minio
* Google Cloud Storage
* :py:class:`gcsfs.core.GCSFileSystem`
In addition you can pass the boolean "cache" option.
:param fs: Apache Arrow FileSystem object, or FSSpec FileSystem object, if specified, fs_options should be empty.
:param args: extra arguments for file readers that need it
:param kwargs: extra keyword arguments
:return: return a DataFrame on success, otherwise None
:rtype: DataFrame
Cloud storage support:
Vaex supports streaming of HDF5 files from Amazon AWS S3 and Google Cloud Storage.
Files are by default cached in $HOME/.vaex/file-cache/(s3|gs) such that successive access
is as fast as native disk access.
The following common fs_options are used for S3 access:
* anon: Use anonymous access or not (false by default). (Allowed values are: true,True,1,false,False,0)
* cache: Use the disk cache or not, only set to false if the data should be accessed once. (Allowed values are: true,True,1,false,False,0)
All fs_options can also be encoded in the file path as a query string.
Examples:
>>> df = vaex.open('s3://vaex/taxi/yellow_taxi_2015_f32s.hdf5', fs_options={'anonymous': True})
>>> df = vaex.open('s3://vaex/taxi/yellow_taxi_2015_f32s.hdf5?anon=true')
>>> df = vaex.open('s3://mybucket/path/to/file.hdf5', fs_options={'access_key': my_key, 'secret_key': my_secret_key})
>>> df = vaex.open(f's3://mybucket/path/to/file.hdf5?access_key={{my_key}}&secret_key={{my_secret_key}}')
>>> df = vaex.open('s3://mybucket/path/to/file.hdf5?profile=myproject')
Google Cloud Storage support:
The following fs_options are used for GCP access:
* token: Authentication method for GCP. Use 'anon' for annonymous access. See https://gcsfs.readthedocs.io/en/latest/index.html#credentials for more details.
* cache: Use the disk cache or not, only set to false if the data should be accessed once. (Allowed values are: true,True,1,false,False,0).
* project and other arguments are passed to :py:class:`gcsfs.core.GCSFileSystem`
Examples:
>>> df = vaex.open('gs://vaex-data/airlines/us_airline_data_1988_2019.hdf5', fs_options={'token': None})
>>> df = vaex.open('gs://vaex-data/airlines/us_airline_data_1988_2019.hdf5?token=anon')
>>> df = vaex.open('gs://vaex-data/testing/xys.hdf5?token=anon&cache=False')
"""
import vaex
import vaex.convert
try:
path = vaex.file.stringyfy(path)
if path in aliases:
path = aliases[path]
path = vaex.file.stringyfy(path)
if path.startswith("http://") or path.startswith("ws://") or \
path.startswith("vaex+http://") or path.startswith("vaex+ws://"): # TODO: think about https and wss
server, name = path.rsplit("/", 1)
url = urlparse(path)
if '?' in name:
name = name[:name.index('?')]
extra_args = {key: values[0] for key, values in parse_qs(url.query).items()}
if 'token' in extra_args:
kwargs['token'] = extra_args['token']
if 'token_trusted' in extra_args:
kwargs['token_trusted'] = extra_args['token_trusted']
client = vaex.connect(server, **kwargs)
return client[name]
if path.startswith("cluster"):
import vaex.enterprise.distributed
return vaex.enterprise.distributed.open(path, *args, **kwargs)
else:
import vaex.file
import glob
if isinstance(path, str):
paths = [path]
else:
paths = path
filenames = []
for path in paths:
naked_path, options = vaex.file.split_options(path)
if glob.has_magic(naked_path):
filenames.extend(list(sorted(vaex.file.glob(path, **kwargs))))
else:
filenames.append(path)
df = None
if len(filenames) == 0:
raise IOError(f'File pattern did not match anything {path}')
filename_hdf5 = vaex.convert._convert_name(filenames, shuffle=shuffle)
filename_hdf5_noshuffle = vaex.convert._convert_name(filenames, shuffle=False)
if len(filenames) == 1:
path = filenames[0]
# # naked_path, _ = vaex.file.split_options(path, fs_options)
_, ext, _ = vaex.file.split_ext(path)
if ext == '.csv': # special case for csv
return vaex.from_csv(path, fs_options=fs_options, fs=fs, convert=convert, **kwargs)
if convert:
path_output = convert if isinstance(convert, str) else filename_hdf5
vaex.convert.convert(
path_input=path, fs_options_input=fs_options, fs_input=fs,
path_output=path_output, fs_options_output=fs_options, fs_output=fs,
*args, **kwargs
)
ds = vaex.dataset.open(path_output, fs_options=fs_options, fs=fs, **kwargs)
else:
ds = vaex.dataset.open(path, fs_options=fs_options, fs=fs, **kwargs)
df = vaex.from_dataset(ds)
if df is None:
if os.path.exists(path):
raise IOError('Could not open file: {}, did you install vaex-hdf5? Is the format supported?'.format(path))
elif len(filenames) > 1:
if convert not in [True, False]:
filename_hdf5 = convert
else:
filename_hdf5 = vaex.convert._convert_name(filenames, shuffle=shuffle)
if os.path.exists(filename_hdf5) and convert: # also check mtime
df = vaex.open(filename_hdf5)
else:
dfs = []
for filename in filenames:
dfs.append(vaex.open(filename, convert=bool(convert), shuffle=shuffle, **kwargs))
df = vaex.concat(dfs)
if convert:
if shuffle:
df = df.shuffle()
df.export_hdf5(filename_hdf5)
df = vaex.open(filename_hdf5)
if df is None:
raise IOError('Unknown error opening: {}'.format(path))
return df
except:
logging.getLogger("vaex").error("error opening %r" % path)
raise
|
def open(path, convert=False, shuffle=False, fs_options={}, fs=None, *args, **kwargs):
"""Open a DataFrame from file given by path.
Example:
>>> df = vaex.open('sometable.hdf5')
>>> df = vaex.open('somedata*.csv', convert='bigdata.hdf5')
:param str or list path: local or absolute path to file, or glob string, or list of paths
:param convert: Uses `dataframe.export` when convert is a path. If True, ``convert=path+'.hdf5'``
The conversion is skipped if the input file or conversion argument did not change.
:param bool shuffle: shuffle converted DataFrame or not
:param dict fs_options: Extra arguments passed to an optional file system if needed:
* Amazon AWS S3
* `anonymous` - access file without authentication (public files)
* `access_key` - AWS access key, if not provided will use the standard env vars, or the `~/.aws/credentials` file
* `secret_key` - AWS secret key, similar to `access_key`
* `profile` - If multiple profiles are present in `~/.aws/credentials`, pick this one instead of 'default', see https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html
* `region` - AWS Region, e.g. 'us-east-1`, will be determined automatically if not provided.
* `endpoint_override` - URL/ip to connect to, instead of AWS, e.g. 'localhost:9000' for minio
* Google Cloud Storage
* :py:class:`gcsfs.core.GCSFileSystem`
In addition you can pass the boolean "cache" option.
:param fs: Apache Arrow FileSystem object, or FSSpec FileSystem object. If this is specified, `fs_options` should be empty.
:param args: extra arguments for file readers that need it
:param kwargs: extra keyword arguments
:return: return a DataFrame on success, otherwise None
:rtype: DataFrame
Cloud storage support:
Vaex supports streaming of HDF5 files from Amazon AWS S3 and Google Cloud Storage.
Files are by default cached in $HOME/.vaex/file-cache/(s3|gs) such that successive access
is as fast as native disk access.
The following common fs_options are used for S3 access:
* anon: Use anonymous access or not (false by default). (Allowed values are: true,True,1,false,False,0)
* cache: Use the disk cache or not, only set to false if the data should be accessed once. (Allowed values are: true,True,1,false,False,0)
All fs_options can also be encoded in the file path as a query string.
Examples:
>>> df = vaex.open('s3://vaex/taxi/yellow_taxi_2015_f32s.hdf5', fs_options={'anonymous': True})
>>> df = vaex.open('s3://vaex/taxi/yellow_taxi_2015_f32s.hdf5?anon=true')
>>> df = vaex.open('s3://mybucket/path/to/file.hdf5', fs_options={'access_key': my_key, 'secret_key': my_secret_key})
>>> df = vaex.open(f's3://mybucket/path/to/file.hdf5?access_key={{my_key}}&secret_key={{my_secret_key}}')
>>> df = vaex.open('s3://mybucket/path/to/file.hdf5?profile=myproject')
Google Cloud Storage support:
The following fs_options are used for GCP access:
* token: Authentication method for GCP. Use 'anon' for annonymous access. See https://gcsfs.readthedocs.io/en/latest/index.html#credentials for more details.
* cache: Use the disk cache or not, only set to false if the data should be accessed once. (Allowed values are: true,True,1,false,False,0).
* project and other arguments are passed to :py:class:`gcsfs.core.GCSFileSystem`
Examples:
>>> df = vaex.open('gs://vaex-data/airlines/us_airline_data_1988_2019.hdf5', fs_options={'token': None})
>>> df = vaex.open('gs://vaex-data/airlines/us_airline_data_1988_2019.hdf5?token=anon')
>>> df = vaex.open('gs://vaex-data/testing/xys.hdf5?token=anon&cache=False')
"""
import vaex
import vaex.convert
try:
path = vaex.file.stringyfy(path)
if path in aliases:
path = aliases[path]
path = vaex.file.stringyfy(path)
if path.startswith("http://") or path.startswith("ws://") or \
path.startswith("vaex+http://") or path.startswith("vaex+ws://"): # TODO: think about https and wss
server, name = path.rsplit("/", 1)
url = urlparse(path)
if '?' in name:
name = name[:name.index('?')]
extra_args = {key: values[0] for key, values in parse_qs(url.query).items()}
if 'token' in extra_args:
kwargs['token'] = extra_args['token']
if 'token_trusted' in extra_args:
kwargs['token_trusted'] = extra_args['token_trusted']
client = vaex.connect(server, **kwargs)
return client[name]
if path.startswith("cluster"):
import vaex.enterprise.distributed
return vaex.enterprise.distributed.open(path, *args, **kwargs)
else:
import vaex.file
import glob
if isinstance(path, str):
paths = [path]
else:
paths = path
filenames = []
for path in paths:
naked_path, options = vaex.file.split_options(path)
if glob.has_magic(naked_path):
filenames.extend(list(sorted(vaex.file.glob(path, **kwargs))))
else:
filenames.append(path)
df = None
if len(filenames) == 0:
raise IOError(f'File pattern did not match anything {path}')
filename_hdf5 = vaex.convert._convert_name(filenames, shuffle=shuffle)
filename_hdf5_noshuffle = vaex.convert._convert_name(filenames, shuffle=False)
if len(filenames) == 1:
path = filenames[0]
# # naked_path, _ = vaex.file.split_options(path, fs_options)
_, ext, _ = vaex.file.split_ext(path)
if ext == '.csv': # special case for csv
return vaex.from_csv(path, fs_options=fs_options, fs=fs, convert=convert, **kwargs)
if convert:
path_output = convert if isinstance(convert, str) else filename_hdf5
vaex.convert.convert(
path_input=path, fs_options_input=fs_options, fs_input=fs,
path_output=path_output, fs_options_output=fs_options, fs_output=fs,
*args, **kwargs
)
ds = vaex.dataset.open(path_output, fs_options=fs_options, fs=fs, **kwargs)
else:
ds = vaex.dataset.open(path, fs_options=fs_options, fs=fs, **kwargs)
df = vaex.from_dataset(ds)
if df is None:
if os.path.exists(path):
raise IOError('Could not open file: {}, did you install vaex-hdf5? Is the format supported?'.format(path))
elif len(filenames) > 1:
if convert not in [True, False]:
filename_hdf5 = convert
else:
filename_hdf5 = vaex.convert._convert_name(filenames, shuffle=shuffle)
if os.path.exists(filename_hdf5) and convert: # also check mtime
df = vaex.open(filename_hdf5)
else:
dfs = []
for filename in filenames:
dfs.append(vaex.open(filename, convert=bool(convert), shuffle=shuffle, **kwargs))
df = vaex.concat(dfs)
if convert:
if shuffle:
df = df.shuffle()
df.export_hdf5(filename_hdf5)
df = vaex.open(filename_hdf5)
if df is None:
raise IOError('Unknown error opening: {}'.format(path))
return df
except:
logging.getLogger("vaex").error("error opening %r" % path)
raise
|
50,156 |
def parse_config_file(options: Options, set_strict_flags: Callable[[], None],
filename: Optional[str],
stdout: Optional[TextIO] = None,
stderr: Optional[TextIO] = None) -> None:
"""Parse a config file into an Options object.
Errors are written to stderr but are not fatal.
If filename is None, fall back to default config files.
"""
stdout = stdout or sys.stdout
stderr = stderr or sys.stderr
if filename is not None:
config_files: Tuple[str, ...] = (filename,)
else:
config_files = tuple(map(os.path.expanduser, defaults.CONFIG_FILES))
config_parser = configparser.RawConfigParser()
for config_file in config_files:
if not os.path.exists(config_file):
continue
try:
if is_toml(config_file):
with open(config_file, "rb") as f:
toml_data = tomli.load(f)
# Filter down to just mypy relevant toml keys
toml_data = toml_data.get('tool', {})
if 'mypy' not in toml_data:
continue
toml_data = {'mypy': toml_data['mypy']}
parser: MutableMapping[str, Any] = destructure_overrides(toml_data)
config_types = toml_config_types
else:
config_parser.read(config_file)
parser = config_parser
config_types = ini_config_types
except (tomli.TOMLDecodeError, configparser.Error, ConfigTOMLValueError) as err:
print("%s: %s" % (config_file, err), file=stderr)
else:
if config_file in defaults.SHARED_CONFIG_FILES and 'mypy' not in parser:
continue
file_read = config_file
options.config_file = file_read
break
else:
return
os.environ['MYPY_CONFIG_FILE_DIR'] = os.path.dirname(
os.path.abspath(config_file))
if 'mypy' not in parser:
if filename or file_read not in defaults.SHARED_CONFIG_FILES:
print("%s: No [mypy] section in config file" % file_read, file=stderr)
else:
section = parser['mypy']
prefix = '%s: [%s]: ' % (file_read, 'mypy')
updates, report_dirs = parse_section(
prefix, options, set_strict_flags, section, config_types, stderr)
for k, v in updates.items():
setattr(options, k, v)
options.report_dirs.update(report_dirs)
for name, section in parser.items():
if name.startswith('mypy-'):
prefix = get_prefix(file_read, name)
updates, report_dirs = parse_section(
prefix, options, set_strict_flags, section, config_types, stderr)
if report_dirs:
print("%sPer-module sections should not specify reports (%s)" %
(prefix, ', '.join(s + '_report' for s in sorted(report_dirs))),
file=stderr)
if set(updates) - PER_MODULE_OPTIONS:
print("%sPer-module sections should only specify per-module flags (%s)" %
(prefix, ', '.join(sorted(set(updates) - PER_MODULE_OPTIONS))),
file=stderr)
updates = {k: v for k, v in updates.items() if k in PER_MODULE_OPTIONS}
globs = name[5:]
for glob in globs.split(','):
# For backwards compatibility, replace (back)slashes with dots.
glob = glob.replace(os.sep, '.')
if os.altsep:
glob = glob.replace(os.altsep, '.')
if (any(c in glob for c in '?[]!') or
any('*' in x and x != '*' for x in glob.split('.'))):
print("%sPatterns must be fully-qualified module names, optionally "
"with '*' in some components (e.g spam.*.eggs.*)"
% prefix,
file=stderr)
else:
options.per_module_options[glob] = updates
|
def parse_config_file(options: Options, set_strict_flags: Callable[[], None],
filename: Optional[str],
stdout: Optional[TextIO] = None,
stderr: Optional[TextIO] = None) -> None:
"""Parse a config file into an Options object.
Errors are written to stderr but are not fatal.
If filename is None, fall back to default config files.
"""
stdout = stdout or sys.stdout
stderr = stderr or sys.stderr
if filename is not None:
config_files: Tuple[str, ...] = (filename,)
else:
config_files = tuple(map(os.path.expanduser, defaults.CONFIG_FILES))
config_parser = configparser.RawConfigParser()
for config_file in config_files:
if not os.path.exists(config_file):
continue
try:
if is_toml(config_file):
with open(config_file, encoding="utf-8") as f:
toml_data = tomli.loads(f.read())
# Filter down to just mypy relevant toml keys
toml_data = toml_data.get('tool', {})
if 'mypy' not in toml_data:
continue
toml_data = {'mypy': toml_data['mypy']}
parser: MutableMapping[str, Any] = destructure_overrides(toml_data)
config_types = toml_config_types
else:
config_parser.read(config_file)
parser = config_parser
config_types = ini_config_types
except (tomli.TOMLDecodeError, configparser.Error, ConfigTOMLValueError) as err:
print("%s: %s" % (config_file, err), file=stderr)
else:
if config_file in defaults.SHARED_CONFIG_FILES and 'mypy' not in parser:
continue
file_read = config_file
options.config_file = file_read
break
else:
return
os.environ['MYPY_CONFIG_FILE_DIR'] = os.path.dirname(
os.path.abspath(config_file))
if 'mypy' not in parser:
if filename or file_read not in defaults.SHARED_CONFIG_FILES:
print("%s: No [mypy] section in config file" % file_read, file=stderr)
else:
section = parser['mypy']
prefix = '%s: [%s]: ' % (file_read, 'mypy')
updates, report_dirs = parse_section(
prefix, options, set_strict_flags, section, config_types, stderr)
for k, v in updates.items():
setattr(options, k, v)
options.report_dirs.update(report_dirs)
for name, section in parser.items():
if name.startswith('mypy-'):
prefix = get_prefix(file_read, name)
updates, report_dirs = parse_section(
prefix, options, set_strict_flags, section, config_types, stderr)
if report_dirs:
print("%sPer-module sections should not specify reports (%s)" %
(prefix, ', '.join(s + '_report' for s in sorted(report_dirs))),
file=stderr)
if set(updates) - PER_MODULE_OPTIONS:
print("%sPer-module sections should only specify per-module flags (%s)" %
(prefix, ', '.join(sorted(set(updates) - PER_MODULE_OPTIONS))),
file=stderr)
updates = {k: v for k, v in updates.items() if k in PER_MODULE_OPTIONS}
globs = name[5:]
for glob in globs.split(','):
# For backwards compatibility, replace (back)slashes with dots.
glob = glob.replace(os.sep, '.')
if os.altsep:
glob = glob.replace(os.altsep, '.')
if (any(c in glob for c in '?[]!') or
any('*' in x and x != '*' for x in glob.split('.'))):
print("%sPatterns must be fully-qualified module names, optionally "
"with '*' in some components (e.g spam.*.eggs.*)"
% prefix,
file=stderr)
else:
options.per_module_options[glob] = updates
|
30,118 |
def var_n_mutated(L, k, r1, q=None):
# there are computational issues in the variance formula that we solve here
# by the use of higher-precision arithmetic; the problem occurs when r is
# very small; for example, with L=10,k=2,r1=1e-6 standard precision
# gives varN<0 which is nonsense; by using the mpf type, we get the correct
# answer which is about 0.000038.
if r1 == 0:
return 0.0
r1 = float(r1)
if q == None: # we assume that if q is provided, it is correct for r1
q = r1_to_q(k, r1)
varN = (
L * (1 - q) * (q * (2 * k + (2 / r1) - 1) - 2 * k)
+ k * (k - 1) * (1 - q) ** 2
+ (2 * (1 - q) / (r1**2)) * ((1 + (k - 1) * (1 - q)) * r1 - q)
)
if varN < 0.0: # this seems to happen only with super tiny test data
raise ValueError("Error: varN <0.0!")
return float(varN)
|
def var_n_mutated(L, k, r1, *, q=None):
# there are computational issues in the variance formula that we solve here
# by the use of higher-precision arithmetic; the problem occurs when r is
# very small; for example, with L=10,k=2,r1=1e-6 standard precision
# gives varN<0 which is nonsense; by using the mpf type, we get the correct
# answer which is about 0.000038.
if r1 == 0:
return 0.0
r1 = float(r1)
if q == None: # we assume that if q is provided, it is correct for r1
q = r1_to_q(k, r1)
varN = (
L * (1 - q) * (q * (2 * k + (2 / r1) - 1) - 2 * k)
+ k * (k - 1) * (1 - q) ** 2
+ (2 * (1 - q) / (r1**2)) * ((1 + (k - 1) * (1 - q)) * r1 - q)
)
if varN < 0.0: # this seems to happen only with super tiny test data
raise ValueError("Error: varN <0.0!")
return float(varN)
|
34,618 |
def ensure_conversation_exists(app: Sanic):
"""Wraps a request handler ensuring the conversation exists.
"""
def decorator(f):
@wraps(f)
def decorated(*args, **kwargs):
conversation_id = kwargs["conversation_id"]
tracker = app.agent.create_processor().get_tracker(conversation_id)
if tracker is not None:
return f(*args, **kwargs)
else:
raise ErrorResponse(404, "Not found", "Conversation ID not found.")
return decorated
return decorator
|
def ensure_conversation_exists(app: Sanic) -> Callable[..., HTTPResponse]:
"""Wraps a request handler ensuring the conversation exists.
"""
def decorator(f):
@wraps(f)
def decorated(*args, **kwargs):
conversation_id = kwargs["conversation_id"]
tracker = app.agent.create_processor().get_tracker(conversation_id)
if tracker is not None:
return f(*args, **kwargs)
else:
raise ErrorResponse(404, "Not found", "Conversation ID not found.")
return decorated
return decorator
|
32,983 |
def test_reclayer_optimize_out_dot_kv_in_rec():
# Same as test_reclayer_optimize_out_dot, but with the att key/value layers declared INSIDE the rec layer.
AttNumHeads = 4
EncKeyPerHeadDim = 5
EncValuePerHeadDim = 7
EncKeyTotalDim = AttNumHeads * EncKeyPerHeadDim
EncValueTotalDim = AttNumHeads * EncValuePerHeadDim
check_reclayer_optimize_out(
{"class": "linear", "activation": None, "from": ["att"]},
other_subnet_layers={
"s": {"class": "linear", "activation": None, "with_bias": False, "from": ["data:source"],
"n_out": EncKeyTotalDim}, # (B, D) -- Q (query). D should be same as enc_ctx
"att_query": {"class": "split_dims", "axis": "F", "dims": (AttNumHeads, EncKeyPerHeadDim),
"from": ["s"]}, # (B, H, D/H)
# this does not depend on the classes, but you should still be able to define it here.
"enc_ctx0": {"class": "linear", "activation": None, "with_bias": False, "from": ["base:encoder"],
"n_out": EncKeyTotalDim}, # (B, enc-T, D)
"enc_ctx": {"class": "split_dims", "axis": "F", "dims": (AttNumHeads, EncKeyPerHeadDim),
"from": ["enc_ctx0"], "is_output_layer": True}, # (B, enc-T, H, D/H)
"enc_value0": {"class": "linear", "activation": None, "with_bias": False, "from": ["base:encoder"],
"n_out": EncValueTotalDim}, # (B, enc-T, D)
"enc_value": {"class": "split_dims", "axis": "F", "dims": (AttNumHeads, EncValuePerHeadDim),
"from": ["enc_value0"], "is_output_layer": True}, # (B, enc-T, H, D/H)
"energy": {"class": "dot", "red1": -1, "red2": -1, "var1": "T", "var2": "T?", # Note the "T?".
"from": ["enc_ctx", "att_query"]},
"att_weights": {"class": "softmax_over_spatial", "from": ["energy"]}, # (B, enc-T, H, 1)
"att0": {"class": "generic_attention", "weights": "att_weights", "base": "enc_value"}, # (B, H, V)
"att": {"class": "merge_dims", "axes": "static", "from": ["att0"]}, # (B, H*V); Use "static" here.
},
shared_base_net={
"encoder": {"class": "copy", "from": ["data"]}
},
rtol=1e-3)
|
def test_reclayer_optimize_out_dot_kv_in_rec():
# Same as test_reclayer_optimize_out_dot, but with the att key/value layers declared INSIDE the rec layer.
AttNumHeads = 4
EncKeyPerHeadDim = 5
EncValuePerHeadDim = 7
EncKeyTotalDim = AttNumHeads * EncKeyPerHeadDim
EncValueTotalDim = AttNumHeads * EncValuePerHeadDim
check_reclayer_optimize_out(
{"class": "linear", "activation": None, "from": ["att"]},
other_subnet_layers={
"s": {"class": "linear", "activation": None, "with_bias": False, "from": ["data:source"],
"n_out": EncKeyTotalDim}, # (B, D) -- Q (query). D should be same as enc_ctx
"att_query": {"class": "split_dims", "axis": "F", "dims": (AttNumHeads, EncKeyPerHeadDim),
"from": ["s"]}, # (B, H, D/H)
# this does not depend on the classes, but you should still be able to define it here.
"enc_ctx0": {"class": "linear", "activation": None, "with_bias": False, "from": ["base:encoder"],
"n_out": EncKeyTotalDim}, # (B, enc-T, D)
"enc_ctx": {"class": "split_dims", "axis": "F", "dims": (AttNumHeads, EncKeyPerHeadDim),
"from": ["enc_ctx0"], "is_output_layer": True}, # (B, enc-T, H, D/H)
"enc_value0": {"class": "linear", "activation": None, "with_bias": False, "from": ["base:encoder"],
"n_out": EncValueTotalDim}, # (B, enc-T, D)
"enc_value": {"class": "split_dims", "axis": "F", "dims": (AttNumHeads, EncValuePerHeadDim),
"from": ["enc_value0"], "is_output_layer": True}, # (B, enc-T, H, D/H)
"energy": {"class": "dot", "red1": -1, "red2": -1, "var1": "T", "var2": "T?", # Note the "T?".
"from": ["enc_ctx", "att_query"]},
"att_weights": {"class": "softmax_over_spatial", "from": ["energy"]}, # (B, enc-T, H, 1)
"att0": {"class": "generic_attention", "weights": "att_weights", "base": "enc_value"}, # (B, H, V)
"att": {"class": "merge_dims", "axes": "static", "from": ["att0"]}, # (B, H*V); Use "static" here.
},
shared_base_net={
"encoder": {"class": "copy", "from": "data", "is_output_layer": True}
},
rtol=1e-3)
|
54,301 |
def generate_asimov_data(asimov_mu, data, pdf, init_pars, par_bounds, fixed_params):
"""
Compute Asimov Dataset (expected yields at best-fit values) for a given POI value.
Example:
>>> import pyhf
>>> pyhf.set_backend("numpy")
>>> model = pyhf.simplemodels.hepdata_like(
... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]
... )
>>> observations = [51, 48]
>>> data = observations + model.config.auxdata
>>> mu_test = 1.0
>>> pyhf.infer.calculators.generate_asimov_data(mu_test, data, model, None, None, None)
array([ 60.61229858, 56.52802479, 270.06832542, 48.31545488])
Args:
asimov_mu (:obj:`float`): The value for the parameter of interest to be used.
data (:obj:`tensor`): The observed data.
pdf (~pyhf.pdf.Model): The statistical model adhering to the schema ``model.json``.
init_pars (:obj:`tensor` of :obj:`float`): The starting values of the model parameters for minimization.
par_bounds (:obj:`tensor` of shape Nx2): The extrema of values the model parameters
are allowed to reach in the fit.
fixed_params (:obj:`tensor` of :obj:`bool`): The flag to set a parameter constant to its starting
value during minimization.
Returns:
Tensor: The Asimov dataset.
"""
bestfit_nuisance_asimov = fixed_poi_fit(
asimov_mu, data, pdf, init_pars, par_bounds, fixed_params
)
return pdf.expected_data(bestfit_nuisance_asimov)
|
def generate_asimov_data(asimov_mu, data, pdf, init_pars, par_bounds, fixed_params):
"""
Compute Asimov Dataset (expected yields at best-fit values) for a given POI value.
Example:
>>> import pyhf
>>> pyhf.set_backend("numpy")
>>> model = pyhf.simplemodels.hepdata_like(
... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]
... )
>>> observations = [51, 48]
>>> data = observations + model.config.auxdata
>>> mu_test = 1.0
>>> pyhf.infer.calculators.generate_asimov_data(mu_test, data, model, None, None, None)
array([ 60.61229858, 56.52802479, 270.06832542, 48.31545488])
Args:
asimov_mu (:obj:`float`): The value for the parameter of interest to be used.
data (:obj:`tensor`): The observed data.
pdf (~pyhf.pdf.Model): The statistical model adhering to the schema ``model.json``.
init_pars (:obj:`tensor` of :obj:`float`): The starting values of the model parameters for minimization.
par_bounds (:obj:`tensor`): The extrema of values the model parameters
are allowed to reach in the fit.
The shape should be ``(n, 2)`` for ``n`` model parameters.
fixed_params (:obj:`tensor` of :obj:`bool`): The flag to set a parameter constant to its starting
value during minimization.
Returns:
Tensor: The Asimov dataset.
"""
bestfit_nuisance_asimov = fixed_poi_fit(
asimov_mu, data, pdf, init_pars, par_bounds, fixed_params
)
return pdf.expected_data(bestfit_nuisance_asimov)
|
41,743 |
def objective(trial: optuna.trial.Trial) -> float:
n_layers = trial.suggest_int('n_layers', 2, 5)
n_channels = [3]
for i in range(n_layers):
out_channels = trial.suggest_int('n_channels_{}'.format(i), 3, 32)
n_channels.append(out_channels)
n_channels.append(2)
model = simple_cnn(n_channels)
learn = Learner(
data, model, silent=True, metrics=[accuracy],
callback_fns=[partial(FastAIPruningCallback, trial=trial, monitor='valid_loss')])
learn.fit(EPOCHS)
return learn.validate()[-1].item()
|
def objective(trial):
n_layers = trial.suggest_int('n_layers', 2, 5)
n_channels = [3]
for i in range(n_layers):
out_channels = trial.suggest_int('n_channels_{}'.format(i), 3, 32)
n_channels.append(out_channels)
n_channels.append(2)
model = simple_cnn(n_channels)
learn = Learner(
data, model, silent=True, metrics=[accuracy],
callback_fns=[partial(FastAIPruningCallback, trial=trial, monitor='valid_loss')])
learn.fit(EPOCHS)
return learn.validate()[-1].item()
|
11,900 |
def contain(image, size, method=Image.BICUBIC):
"""
Returns a sized version of the image, expanded to fill the requested aspect ratio
and size.
:param image: The image to size and crop.
:param size: The requested output size in pixels, given as a
(width, height) tuple.
:param method: What resampling method to use. Default is
:py:attr:`PIL.Image.BICUBIC`. See :ref:`concept-filters`.
:return: An image.
"""
im_ratio = image.width / image.height
dest_ratio = size[0] / size[1]
if im_ratio != dest_ratio:
if im_ratio > dest_ratio:
new_height = int(image.height / image.width * size[0])
if new_height != size[1]:
size = (size[0], new_height)
else:
new_width = int(image.width / image.height * size[1])
if new_width != size[0]:
size = (new_width, size[1])
return image.resize(size, resample=method)
|
def contain(image, size, method=Image.BICUBIC):
"""
Returns a sized version of the image, expanded to fill the requested aspect ratio
and size.
:param image: The image to size and crop.
:param size: The requested output size in pixels, given as a
(width, height) tuple.
:param method: Resampling method to use. Default is
:py:attr:`PIL.Image.BICUBIC`. See :ref:`concept-filters`.
:return: An image.
"""
im_ratio = image.width / image.height
dest_ratio = size[0] / size[1]
if im_ratio != dest_ratio:
if im_ratio > dest_ratio:
new_height = int(image.height / image.width * size[0])
if new_height != size[1]:
size = (size[0], new_height)
else:
new_width = int(image.width / image.height * size[1])
if new_width != size[0]:
size = (new_width, size[1])
return image.resize(size, resample=method)
|
7,510 |
def _linear_wcs_fit(params, lon, lat, x, y, w_obj):
"""
Objective function for fitting linear terms.
Parameters
----------
params : array
6 element array. First 4 elements are PC matrix, last 2 are CRPIX.
lon, lat: array
Sky coordinates.
x, y: array
Pixel coordinates
w_obj: `~astropy.wcs.WCS`
WCS object
"""
cd = params[0:4]
crpix = params[4:6]
w_obj.wcs.cd = ((cd[0], cd[1]), (cd[2], cd[3]))
w_obj.wcs.crpix = crpix
lon2, lat2 = w_obj.wcs_pix2world(x, y, 0)
resids = np.concatenate((lon-lon2, lat-lat2))
resids[resids > 180] = 360 - resids[resids > 180]
resids[resids < -180] = 360 + resids[resids < -180]
return resids
|
def _linear_wcs_fit(params, lon, lat, x, y, w_obj):
"""
Objective function for fitting linear terms.
Parameters
----------
params : array
6 element array. First 4 elements are PC matrix, last 2 are CRPIX.
lon, lat: array
Sky coordinates.
x, y: array
Pixel coordinates
w_obj: `~astropy.wcs.WCS`
WCS object
"""
cd = params[0:4]
crpix = params[4:6]
w_obj.wcs.cd = ((cd[0], cd[1]), (cd[2], cd[3]))
w_obj.wcs.crpix = crpix
lon2, lat2 = w_obj.wcs_pix2world(x, y, 0)
resids = np.concatenate((lon-lon2, lat-lat2))
resids[resids > 180] = 360 - resids[resids > 180]
resids[resids < -180] = 360 + resids[resids < -180]
return resids
|
20,513 |
def create_grid(file_like):
"""Create grid, initialize fields, and set boundary conditions.
**create_grid** expects a dictionary with three keys "grid", "fields", and
"boundary_conditions".
**Dictionary Section "grid"**
The value associated with the "grid" key should itself be a dictionary
containing the name of a Landlab model grid type as its only key. The
following grid types are valid:
- :py:class:`~landlab.grid.raster.RasterModelGrid`
- :py:class:`~landlab.grid.voronoi.VoronoiDelaunayGrid`
- :py:class:`~landlab.grid.hex.HexModelGrid`
- :py:class:`~landlab.grid.radial.RadialModelGrid`
- :py:class:`~landlab.grid.network.NetworkModelGrid`
The value associated with the grid name key is a list containing the
arguments. If any keyword arguments are passed, they should be passed as
the last element of the list. For example the following code block is a
yaml file indicating a RasterModelGrid with shape (4, 5) and xy-spacing of
(3, 4).
.. code-block:: yaml
grid:
RasterModelGrid:
- [4, 5]
- xy_spacing: [3, 4]
These arguments and keyword arguments will be passed to the ``__init__``
constructor of the specified model grid. Refer to the documentation for
each grid to determine its requirements.
**Dictionary Section "fields"**
Fields can be created by reading from files or by creating synthetic
values.
The value associated with the "fields" key is a nested set of dictionaries
indicating where the fields are created, what the field names are, and how
to create the fields. At the highest hierachical level, the value
associated with the "fields" key must be a dictionary with keys indicating
at which grid elements fields should be created (e.g. to create fields at
node, use "at_node").
The value associated with each "at_xxx" value is itself a dictionary
indicating the name of the field an how it should be created. A field can
either be created by reading from a file or creating synthetic values. The
:py:func:`~landlab.io.netcdf.read.read_netcdf` and
:py:func:`~landlab.io.esri_ascii.read_esri_ascii` functions, and the
:py:mod:`synthetic fields <landlab.values.synthetic>`
package are currently supported methods to create fields. These may be
chained together (as is shown in the Example section below). If these
functions do not meet your needs, we welcome contributions that extend the
capabilities of this function.
The following example would uses the
:py:func:`~landlab.values.synthetic.plane` function from the synthetic
values package to create an at_node value for the field
topographic__elevation.
.. code-block:: yaml
fields:
at_node:
topographic__elevation:
plane:
- point: [1, 1, 1]
normal: [-2, -1, 1]
**Dictionary Section "boundary_conditions"**
The final portion of the input dictionary calls bound functions of the
model grid to set boundary conditions. Any valid bound function can be
called. The specified functions are provided in a list, and called in
order. If required, multiple functions may be called.
Each entry to the list is a dictionary with a single key, the name of the
bound function. The value associated with that key is a list of arguments
and keyword arguments, similar in structure to those described above.
For example, the following sets closed boundaries at all sides of the grid.
.. code-block:: yaml
boundary_conditions:
- set_closed_boundaries_at_grid_edges:
- True
- True
- True
- True
Parameters
----------
file_like : file_like or str
Dictionary, contents of a dictionary as a string, a file-like object,
or the path to a file containing a YAML dictionary.
Examples
--------
>>> import numpy as np
>>> np.random.seed(42)
>>> from landlab import create_grid
>>> p = {'grid': {'RasterModelGrid': [(4,5),
... {'xy_spacing': (3, 4)}]
... },
... 'fields': {'at_node': {'spam': {'plane': [{'point': (1, 1, 1),
... 'normal': (-2, -1, 1)}],
... 'random': [{'distribution': 'uniform',
... 'low': 1,
... 'high': 4}]
... },
... },
... 'at_link': {'eggs': {'constant': [{'where': 'ACTIVE_LINK',
... 'constant': 12}],
... },
... },
... },
... 'boundary_conditions': [
... {'set_closed_boundaries_at_grid_edges':
... [True, True, True, True]
... }]
... }
>>> mg = create_grid(p)
>>> mg.number_of_nodes
20
>>> "spam" in mg.at_node
True
>>> "eggs" in mg.at_link
True
>>> mg.x_of_node
array([ 0., 3., 6., 9., 12.,
0., 3., 6., 9., 12.,
0., 3., 6., 9., 12.,
0., 3., 6., 9., 12.])
>>> mg.status_at_node
array([4, 4, 4, 4, 4,
4, 0, 0, 0, 4,
4, 0, 0, 0, 4,
4, 4, 4, 4, 4], dtype=uint8)
>>> np.round(mg.at_node['spam'].reshape(mg.shape), decimals=2)
array([[ 0.12, 7.85, 13.2 , 18.8 , 23.47],
[ 3.47, 9.17, 17.6 , 22.8 , 29.12],
[ 7.06, 15.91, 21.5 , 25.64, 31.55],
[ 11.55, 17.91, 24.57, 30.3 , 35.87]])
"""
# part 0, parse input
if isinstance(file_like, dict):
dict_like = file_like
else:
dict_like = load_params(file_like)
# part 1 create grid
grid_dict = dict_like.pop("grid", None)
if grid_dict is None:
msg = "create_grid: no grid dictionary provided. This is required."
raise ValueError(msg)
for grid_type in grid_dict:
if grid_type in _MODEL_GRIDS:
grid_class = _MODEL_GRIDS[grid_type]
else:
msg = "create_grid: provided grid type not supported."
raise ValueError
if len(grid_dict) != 1:
msg = ("create_grid: two entries to grid dictionary provided. "
"This is not supported.")
raise ValueError
args, kwargs = _parse_args_kwargs(grid_dict.pop(grid_type))
grid = grid_class(*args, **kwargs)
# part two, create fields
fields_dict = dict_like.pop("fields", {})
# for each grid element:
for at_group in fields_dict:
at = at_group[3:]
if at not in grid.groups:
msg = (
"create_grid: No field location ",
"{at} ".format(at=at),
"exists for grid types",
"{grid}. ".format(grid=grid_type),
)
raise ValueError(msg)
at_dict = fields_dict[at_group]
# for field at grid element
for name in at_dict:
name_dict = at_dict[name]
# for each function, add values.
for func in name_dict:
args, kwargs = _parse_args_kwargs(name_dict[func])
if func in _SYNTHETIC_FIELD_CONSTRUCTORS:
# if any args, raise an error, there shouldn't be any.
synth_function = _SYNTHETIC_FIELD_CONSTRUCTORS[func]
synth_function(grid, name, at=at, **kwargs)
elif func == "read_esri_ascii":
read_esri_ascii(*args, grid=grid, name=name, **kwargs)
elif func == "read_netcdf":
read_netcdf(*args, grid=grid, name=name, **kwargs)
else:
msg = (
"create_grid: Bad function ",
"{func} ".format(func=func),
"for creating a field.",
)
raise ValueError(msg)
# part three, set boundary conditions
bc_list = dict_like.pop("boundary_conditions", [])
for bc_function_dict in bc_list:
if len(bc_function_dict) != 1:
msg = ("create_grid: two entries to a boundary condition function "
"dictionary were provided. This is not supported.")
raise ValueError(msg)
for bc_function in bc_function_dict:
args, kwargs = _parse_args_kwargs(bc_function_dict[bc_function])
methods = dict(inspect.getmembers(grid, inspect.ismethod))
if bc_function in methods:
methods[bc_function](*args, **kwargs)
else:
msg = (
"create_grid: No function ",
"{func} ".format(func=bc_function),
"exists for grid types ",
"{grid}. ".format(grid=grid_type),
"If you think this type of grid should have such a ",
"function. Please create a GitHub Issue to discuss ",
"contributing it to the Landlab codebase.",
)
raise ValueError(msg)
return grid
|
def create_grid(file_like):
"""Create grid, initialize fields, and set boundary conditions.
**create_grid** expects a dictionary with three keys: "grid", "fields", and
"boundary_conditions".
**Dictionary Section "grid"**
The value associated with the "grid" key should itself be a dictionary
containing the name of a Landlab model grid type as its only key. The
following grid types are valid:
- :py:class:`~landlab.grid.raster.RasterModelGrid`
- :py:class:`~landlab.grid.voronoi.VoronoiDelaunayGrid`
- :py:class:`~landlab.grid.hex.HexModelGrid`
- :py:class:`~landlab.grid.radial.RadialModelGrid`
- :py:class:`~landlab.grid.network.NetworkModelGrid`
The value associated with the grid name key is a list containing the
arguments. If any keyword arguments are passed, they should be passed as
the last element of the list. For example the following code block is a
yaml file indicating a RasterModelGrid with shape (4, 5) and xy-spacing of
(3, 4).
.. code-block:: yaml
grid:
RasterModelGrid:
- [4, 5]
- xy_spacing: [3, 4]
These arguments and keyword arguments will be passed to the ``__init__``
constructor of the specified model grid. Refer to the documentation for
each grid to determine its requirements.
**Dictionary Section "fields"**
Fields can be created by reading from files or by creating synthetic
values.
The value associated with the "fields" key is a nested set of dictionaries
indicating where the fields are created, what the field names are, and how
to create the fields. At the highest hierachical level, the value
associated with the "fields" key must be a dictionary with keys indicating
at which grid elements fields should be created (e.g. to create fields at
node, use "at_node").
The value associated with each "at_xxx" value is itself a dictionary
indicating the name of the field an how it should be created. A field can
either be created by reading from a file or creating synthetic values. The
:py:func:`~landlab.io.netcdf.read.read_netcdf` and
:py:func:`~landlab.io.esri_ascii.read_esri_ascii` functions, and the
:py:mod:`synthetic fields <landlab.values.synthetic>`
package are currently supported methods to create fields. These may be
chained together (as is shown in the Example section below). If these
functions do not meet your needs, we welcome contributions that extend the
capabilities of this function.
The following example would uses the
:py:func:`~landlab.values.synthetic.plane` function from the synthetic
values package to create an at_node value for the field
topographic__elevation.
.. code-block:: yaml
fields:
at_node:
topographic__elevation:
plane:
- point: [1, 1, 1]
normal: [-2, -1, 1]
**Dictionary Section "boundary_conditions"**
The final portion of the input dictionary calls bound functions of the
model grid to set boundary conditions. Any valid bound function can be
called. The specified functions are provided in a list, and called in
order. If required, multiple functions may be called.
Each entry to the list is a dictionary with a single key, the name of the
bound function. The value associated with that key is a list of arguments
and keyword arguments, similar in structure to those described above.
For example, the following sets closed boundaries at all sides of the grid.
.. code-block:: yaml
boundary_conditions:
- set_closed_boundaries_at_grid_edges:
- True
- True
- True
- True
Parameters
----------
file_like : file_like or str
Dictionary, contents of a dictionary as a string, a file-like object,
or the path to a file containing a YAML dictionary.
Examples
--------
>>> import numpy as np
>>> np.random.seed(42)
>>> from landlab import create_grid
>>> p = {'grid': {'RasterModelGrid': [(4,5),
... {'xy_spacing': (3, 4)}]
... },
... 'fields': {'at_node': {'spam': {'plane': [{'point': (1, 1, 1),
... 'normal': (-2, -1, 1)}],
... 'random': [{'distribution': 'uniform',
... 'low': 1,
... 'high': 4}]
... },
... },
... 'at_link': {'eggs': {'constant': [{'where': 'ACTIVE_LINK',
... 'constant': 12}],
... },
... },
... },
... 'boundary_conditions': [
... {'set_closed_boundaries_at_grid_edges':
... [True, True, True, True]
... }]
... }
>>> mg = create_grid(p)
>>> mg.number_of_nodes
20
>>> "spam" in mg.at_node
True
>>> "eggs" in mg.at_link
True
>>> mg.x_of_node
array([ 0., 3., 6., 9., 12.,
0., 3., 6., 9., 12.,
0., 3., 6., 9., 12.,
0., 3., 6., 9., 12.])
>>> mg.status_at_node
array([4, 4, 4, 4, 4,
4, 0, 0, 0, 4,
4, 0, 0, 0, 4,
4, 4, 4, 4, 4], dtype=uint8)
>>> np.round(mg.at_node['spam'].reshape(mg.shape), decimals=2)
array([[ 0.12, 7.85, 13.2 , 18.8 , 23.47],
[ 3.47, 9.17, 17.6 , 22.8 , 29.12],
[ 7.06, 15.91, 21.5 , 25.64, 31.55],
[ 11.55, 17.91, 24.57, 30.3 , 35.87]])
"""
# part 0, parse input
if isinstance(file_like, dict):
dict_like = file_like
else:
dict_like = load_params(file_like)
# part 1 create grid
grid_dict = dict_like.pop("grid", None)
if grid_dict is None:
msg = "create_grid: no grid dictionary provided. This is required."
raise ValueError(msg)
for grid_type in grid_dict:
if grid_type in _MODEL_GRIDS:
grid_class = _MODEL_GRIDS[grid_type]
else:
msg = "create_grid: provided grid type not supported."
raise ValueError
if len(grid_dict) != 1:
msg = ("create_grid: two entries to grid dictionary provided. "
"This is not supported.")
raise ValueError
args, kwargs = _parse_args_kwargs(grid_dict.pop(grid_type))
grid = grid_class(*args, **kwargs)
# part two, create fields
fields_dict = dict_like.pop("fields", {})
# for each grid element:
for at_group in fields_dict:
at = at_group[3:]
if at not in grid.groups:
msg = (
"create_grid: No field location ",
"{at} ".format(at=at),
"exists for grid types",
"{grid}. ".format(grid=grid_type),
)
raise ValueError(msg)
at_dict = fields_dict[at_group]
# for field at grid element
for name in at_dict:
name_dict = at_dict[name]
# for each function, add values.
for func in name_dict:
args, kwargs = _parse_args_kwargs(name_dict[func])
if func in _SYNTHETIC_FIELD_CONSTRUCTORS:
# if any args, raise an error, there shouldn't be any.
synth_function = _SYNTHETIC_FIELD_CONSTRUCTORS[func]
synth_function(grid, name, at=at, **kwargs)
elif func == "read_esri_ascii":
read_esri_ascii(*args, grid=grid, name=name, **kwargs)
elif func == "read_netcdf":
read_netcdf(*args, grid=grid, name=name, **kwargs)
else:
msg = (
"create_grid: Bad function ",
"{func} ".format(func=func),
"for creating a field.",
)
raise ValueError(msg)
# part three, set boundary conditions
bc_list = dict_like.pop("boundary_conditions", [])
for bc_function_dict in bc_list:
if len(bc_function_dict) != 1:
msg = ("create_grid: two entries to a boundary condition function "
"dictionary were provided. This is not supported.")
raise ValueError(msg)
for bc_function in bc_function_dict:
args, kwargs = _parse_args_kwargs(bc_function_dict[bc_function])
methods = dict(inspect.getmembers(grid, inspect.ismethod))
if bc_function in methods:
methods[bc_function](*args, **kwargs)
else:
msg = (
"create_grid: No function ",
"{func} ".format(func=bc_function),
"exists for grid types ",
"{grid}. ".format(grid=grid_type),
"If you think this type of grid should have such a ",
"function. Please create a GitHub Issue to discuss ",
"contributing it to the Landlab codebase.",
)
raise ValueError(msg)
return grid
|
43,650 |
def _spin2_matrix_elements(sz):
r"""Builds the table of matrix elements
:math:`\langle \bm{\alpha}, \bm{\beta} \vert \hat{s}_1 \cdot \hat{s}_2 \vert
\bm{\gamma}, \bm{\delta} \rangle` of the two-particle spin operator
:math:`\hat{s}_1 \cdot \hat{s}_2`.
The matrix elements are evaluated using the expression,
.. math::
\langle ~ (\alpha, s_{z_\alpha});~ (\beta, s_{z_\beta}) ~ \vert \hat{s}_1 &&
\cdot \hat{s}_2 \vert ~ (\gamma, s_{z_\gamma}); ~ (\delta, s_{z_\gamma}) ~ \rangle =
\delta_{\alpha,\delta} \delta_{\beta,\gamma} \\
&& \times \left( \frac{1}{2} \delta_{s_{z_\alpha}, s_{z_\delta}+1}
\delta_{s_{z_\beta}, s_{z_\gamma}-1} + \frac{1}{2} \delta_{s_{z_\alpha}, s_{z_\delta}-1}
\delta_{s_{z_\beta}, s_{z_\gamma}+1} + s_{z_\alpha} s_{z_\beta}
\delta_{s_{z_\alpha}, s_{z_\delta}} \delta_{s_{z_\beta}, s_{z_\gamma}} \right),
where :math:`\alpha` and :math:`s_{z_\alpha}` refer to the quantum numbers of the spatial
function and the spin projection, respectively, of the single-particle state
:math:`\vert \bm{\alpha} \rangle \equiv \vert \alpha, s_{z_\alpha} \rangle`.
Args:
sz (array[float]): spin-projection of the single-particle states
Returns:
array: NumPy array with the table of matrix elements. The first four columns
contain the indices :math:`\bm{\alpha}`, :math:`\bm{\beta}`, :math:`\bm{\gamma}`,
:math:`\bm{\delta}` and the fifth column stores the computed matrix element.
**Example**
>>> sz = np.array([0.5, -0.5])
>>> print(_spin2_matrix_elements(sz))
[[ 0. 0. 0. 0. 0.25]
[ 0. 1. 1. 0. -0.25]
[ 1. 0. 0. 1. -0.25]
[ 1. 1. 1. 1. 0.25]
[ 0. 1. 0. 1. 0.5 ]
[ 1. 0. 1. 0. 0.5 ]]
"""
n = np.arange(sz.size)
alpha = n.reshape(-1, 1, 1, 1)
beta = n.reshape(1, -1, 1, 1)
gamma = n.reshape(1, 1, -1, 1)
delta = n.reshape(1, 1, 1, -1)
# we only care about indices satisfying the following boolean mask
mask = np.logical_and(alpha // 2 == delta // 2, beta // 2 == gamma // 2)
# diagonal elements
diag_mask = np.logical_and(sz[alpha] == sz[delta], sz[beta] == sz[gamma])
diag_indices = np.argwhere(np.logical_and(mask, diag_mask))
diag_values = (sz[alpha] * sz[beta]).flatten()
diag = np.vstack([diag_indices.T, diag_values]).T
# off-diagonal elements
m1 = np.logical_and(sz[alpha] == sz[delta] + 1, sz[beta] == sz[gamma] - 1)
m2 = np.logical_and(sz[alpha] == sz[delta] - 1, sz[beta] == sz[gamma] + 1)
off_diag_mask = np.logical_and(mask, np.logical_or(m1, m2))
off_diag_indices = np.argwhere(off_diag_mask)
off_diag_values = np.full([len(off_diag_indices)], 0.5)
off_diag = np.vstack([off_diag_indices.T, off_diag_values]).T
# combine the off diagonal and diagonal tables into a single table
return np.vstack([diag, off_diag])
|
def _spin2_matrix_elements(sz):
r"""Builds the table of matrix elements
:math:`\langle \bm{\alpha}, \bm{\beta} \vert \hat{s}_1 \cdot \hat{s}_2 \vert
\bm{\gamma}, \bm{\delta} \rangle` of the two-particle spin operator
:math:`\hat{s}_1 \cdot \hat{s}_2`.
The matrix elements are evaluated using the expression
.. math::
\langle ~ (\alpha, s_{z_\alpha});~ (\beta, s_{z_\beta}) ~ \vert \hat{s}_1 &&
\cdot \hat{s}_2 \vert ~ (\gamma, s_{z_\gamma}); ~ (\delta, s_{z_\gamma}) ~ \rangle =
\delta_{\alpha,\delta} \delta_{\beta,\gamma} \\
&& \times \left( \frac{1}{2} \delta_{s_{z_\alpha}, s_{z_\delta}+1}
\delta_{s_{z_\beta}, s_{z_\gamma}-1} + \frac{1}{2} \delta_{s_{z_\alpha}, s_{z_\delta}-1}
\delta_{s_{z_\beta}, s_{z_\gamma}+1} + s_{z_\alpha} s_{z_\beta}
\delta_{s_{z_\alpha}, s_{z_\delta}} \delta_{s_{z_\beta}, s_{z_\gamma}} \right),
where :math:`\alpha` and :math:`s_{z_\alpha}` refer to the quantum numbers of the spatial
function and the spin projection, respectively, of the single-particle state
:math:`\vert \bm{\alpha} \rangle \equiv \vert \alpha, s_{z_\alpha} \rangle`.
Args:
sz (array[float]): spin-projection of the single-particle states
Returns:
array: NumPy array with the table of matrix elements. The first four columns
contain the indices :math:`\bm{\alpha}`, :math:`\bm{\beta}`, :math:`\bm{\gamma}`,
:math:`\bm{\delta}` and the fifth column stores the computed matrix element.
**Example**
>>> sz = np.array([0.5, -0.5])
>>> print(_spin2_matrix_elements(sz))
[[ 0. 0. 0. 0. 0.25]
[ 0. 1. 1. 0. -0.25]
[ 1. 0. 0. 1. -0.25]
[ 1. 1. 1. 1. 0.25]
[ 0. 1. 0. 1. 0.5 ]
[ 1. 0. 1. 0. 0.5 ]]
"""
n = np.arange(sz.size)
alpha = n.reshape(-1, 1, 1, 1)
beta = n.reshape(1, -1, 1, 1)
gamma = n.reshape(1, 1, -1, 1)
delta = n.reshape(1, 1, 1, -1)
# we only care about indices satisfying the following boolean mask
mask = np.logical_and(alpha // 2 == delta // 2, beta // 2 == gamma // 2)
# diagonal elements
diag_mask = np.logical_and(sz[alpha] == sz[delta], sz[beta] == sz[gamma])
diag_indices = np.argwhere(np.logical_and(mask, diag_mask))
diag_values = (sz[alpha] * sz[beta]).flatten()
diag = np.vstack([diag_indices.T, diag_values]).T
# off-diagonal elements
m1 = np.logical_and(sz[alpha] == sz[delta] + 1, sz[beta] == sz[gamma] - 1)
m2 = np.logical_and(sz[alpha] == sz[delta] - 1, sz[beta] == sz[gamma] + 1)
off_diag_mask = np.logical_and(mask, np.logical_or(m1, m2))
off_diag_indices = np.argwhere(off_diag_mask)
off_diag_values = np.full([len(off_diag_indices)], 0.5)
off_diag = np.vstack([off_diag_indices.T, off_diag_values]).T
# combine the off diagonal and diagonal tables into a single table
return np.vstack([diag, off_diag])
|
27,687 |
def fixture(
callable_or_scope=None,
*args,
scope="function",
params=None,
autouse=False,
ids=None,
name=None
):
"""Decorator to mark a fixture factory function.
This decorator can be used, with or without parameters, to define a
fixture function.
The name of the fixture function can later be referenced to cause its
invocation ahead of running tests: test
modules or classes can use the ``pytest.mark.usefixtures(fixturename)``
marker.
Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
Fixtures can provide their values to test functions using ``return`` or ``yield``
statements. When using ``yield`` the code block after the ``yield`` statement is executed
as teardown code regardless of the test outcome, and must yield exactly once.
:arg scope: the scope for which this fixture is shared, one of
``"function"`` (default), ``"class"``, ``"module"``,
``"package"`` or ``"session"``.
``"package"`` is considered **experimental** at this time.
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
The current parameter is available in ``request.param``.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
:arg name: the name of the fixture. This defaults to the name of the
decorated function. If a fixture is used in the same module in
which it is defined, the function name of the fixture will be
shadowed by the function arg that requests the fixture; one way
to resolve this is to name the decorated function
``fixture_<fixturename>`` and then use
``@pytest.fixture(name='<fixturename>')``.
"""
if params is not None and not isinstance(params, (list, tuple)):
params = list(params)
fixture_function, arguments = _parse_fixture_args(
callable_or_scope,
*args,
scope=scope,
params=params,
autouse=autouse,
ids=ids,
name=name
)
scope = arguments.get("scope")
params = arguments.get("params")
autouse = arguments.get("autouse")
ids = arguments.get("ids")
name = arguments.get("name")
if fixture_function and params is None and autouse is False:
# direct decoration
return FixtureFunctionMarker(scope, params, autouse, name=name)(
fixture_function
)
return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
|
def fixture(
callable_or_scope=None,
*args,
scope="function",
params=None,
autouse=False,
ids=None,
name=None
):
"""Decorator to mark a fixture factory function.
This decorator can be used, with or without parameters, to define a
fixture function.
The name of the fixture function can later be referenced to cause its
invocation ahead of running tests: test
modules or classes can use the ``pytest.mark.usefixtures(fixturename)``
marker.
Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
Fixtures can provide their values to test functions using ``return`` or ``yield``
statements. When using ``yield`` the code block after the ``yield`` statement is executed
as teardown code regardless of the test outcome, and must yield exactly once.
:arg scope: the scope for which this fixture is shared, one of
``"function"`` (default), ``"class"``, ``"module"``,
``"package"`` or ``"session"``.
``"package"`` is considered **experimental** at this time.
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
The current parameter is available in ``request.param``.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
:arg name: the name of the fixture. This defaults to the name of the
decorated function. If a fixture is used in the same module in
which it is defined, the function name of the fixture will be
shadowed by the function arg that requests the fixture; one way
to resolve this is to name the decorated function
``fixture_<fixturename>`` and then use
``@pytest.fixture(name='<fixturename>')``.
"""
if params is not None:
params = list(params)
fixture_function, arguments = _parse_fixture_args(
callable_or_scope,
*args,
scope=scope,
params=params,
autouse=autouse,
ids=ids,
name=name
)
scope = arguments.get("scope")
params = arguments.get("params")
autouse = arguments.get("autouse")
ids = arguments.get("ids")
name = arguments.get("name")
if fixture_function and params is None and autouse is False:
# direct decoration
return FixtureFunctionMarker(scope, params, autouse, name=name)(
fixture_function
)
return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
|
58,171 |
def get_indicators_command(client: Client,
params: Dict[str, str],
args: Dict[str, str]
) -> CommandResults:
"""Wrapper for retrieving indicators from the feed to the war-room.
Args:
client: Client object with request
params: demisto.params()
args: demisto.args()
Returns:
Outputs.
"""
limit = int(args.get('limit', '10'))
tlp_color = params.get('tlp_color')
feed_tags = argToList(params.get('feedTags', ''))
indicators = fetch_indicators(client, tlp_color, feed_tags, limit)
human_readable = tableToMarkdown('Indicators from Snort IP Blocklist Feed:', indicators,
headers=['value', 'type'], headerTransform=string_to_table_header, removeNull=True)
return CommandResults(
readable_output=human_readable,
outputs_prefix='',
outputs_key_field='',
raw_response=indicators,
outputs={},
)
|
def get_indicators_command(client: Client,
params: Dict[str, str],
args: Dict[str, str]
) -> CommandResults:
"""Wrapper for retrieving indicators from the feed to the war-room.
Args:
client: Client object with request
params: demisto.params()
args: demisto.args()
Returns:
Outputs.
"""
limit = int(args.get('limit', '10'))
tlp_color = params.get('tlp_color')
feed_tags = argToList(params.get('feedTags', ''))
indicators = fetch_indicators(client, tlp_color, feed_tags, limit)
human_readable = tableToMarkdown(f'Indicators from Snort IP Blocklist Feed: (first {limit} indicators)', indicators,
headers=['value', 'type'], headerTransform=string_to_table_header, removeNull=True)
return CommandResults(
readable_output=human_readable,
outputs_prefix='',
outputs_key_field='',
raw_response=indicators,
outputs={},
)
|
23,084 |
def test_add_prefix():
df = pd.DataFrame({"x": [1, 2], "y": [3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.add_prefix("abc"), df.add_prefix("abc"))
assert_eq(ddf.x.add_prefix("abc").compute(), df.x.add_prefix("abc"))
|
def test_add_prefix():
df = pd.DataFrame({"x": [1, 2], "y": [3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.add_prefix("abc"), df.add_prefix("abc"))
assert_eq(ddf.x.add_prefix("abc"), df.x.add_prefix("abc"))
|
48,927 |
def substitute(fn, expr):
"""
Substitute expressions with other expressions.
"""
node = expr.op()
result = fn(node)
if result is lin.proceed:
pass
elif result is lin.halt:
return expr
else:
return result.to_expr()
new_args = []
for arg in node.args:
if isinstance(arg, tuple):
arg = tuple(substitute(fn, expr) for expr in arg)
elif isinstance(arg, ir.Expr):
arg = substitute(fn, arg)
new_args.append(arg)
new_node = type(node)(*new_args)
return new_node.to_expr()
|
def substitute(fn, expr):
"""
Substitute expressions with other expressions.
"""
node = expr.op()
result = fn(node)
if result is lin.halt:
return expr
elif result is not lin.proceed:
return result.to_expr()
new_args = []
for arg in node.args:
if isinstance(arg, tuple):
arg = tuple(substitute(fn, expr) for expr in arg)
elif isinstance(arg, ir.Expr):
arg = substitute(fn, arg)
new_args.append(arg)
new_node = type(node)(*new_args)
return new_node.to_expr()
|
5,798 |
def _rename_parameter(new_name, old_name, dep_version=None):
"""
Generate decorator for function with recently-renamed parameter.
Apply the decorator generated by `_rename_parameter` to functions with a
recently renamed parameter.
After decoration, the function behaves as follows:
If only the new parameter is passed into the function, behave as usual.
If only the old parameter is passed into the function (as a keyword), raise
a DeprecationWarning if `dep_version` is provided, and behave as usual
otherwise.
If both old and new parameters are passed into the function, raise a
DeprecationWarning if `dep_version` is provided, and raise the appropriate
TypeError (function got multiple values for argument).
Parameters
----------
new_name : str
New name of parameter
old_name : str
Old name of parameter
dep_version : str, optional
Version of SciPy in which old parameter was deprecated
Notes
-----
Untested with functions that accept *args. Probably won't work as written.
"""
def decorator(fun):
@functools.wraps(fun)
def wrapper(*args, **kwds):
# Check for intersection between positional and keyword args
params = list(inspect.signature(fun).parameters)
d_args = dict(zip(params, args))
intersection = set(d_args) & set(kwds)
if intersection:
message = (f"{fun.__name__}() got multiple values "
f"for argument '{list(intersection)[0]}'")
raise TypeError(message)
# Consolidate other positional and keyword args into `kwds`
kwds.update(d_args)
new_param = kwds.get(new_name, None)
got_new = new_param is not None
got_keyword_old = kwds.get(old_name, None) is not None
if got_keyword_old and dep_version:
message = (f"Use of keyword argument `{old_name}` is "
f"deprecated and replaced by `{new_name}`. "
f"Support for `{old_name}` will be removed two "
f"feature releases after SciPy {dep_version}.")
warnings.warn(message, DeprecationWarning, stacklevel=2)
if got_keyword_old and got_new:
message = (f"{fun.__name__}() got multiple values for "
f"argument now known as `{new_name}`")
raise TypeError(message)
kwds[new_name] = kwds.pop(old_name, new_param)
return fun(**kwds)
return wrapper
return decorator
|
def _rename_parameter(new_name, old_name, dep_version=None):
"""
Generate decorator for function with recently-renamed parameter.
Apply the decorator generated by `_rename_parameter` to functions with a
recently renamed parameter to maintain backward-compatibility.
After decoration, the function behaves as follows:
If only the new parameter is passed into the function, behave as usual.
If only the old parameter is passed into the function (as a keyword), raise
a DeprecationWarning if `dep_version` is provided, and behave as usual
otherwise.
If both old and new parameters are passed into the function, raise a
DeprecationWarning if `dep_version` is provided, and raise the appropriate
TypeError (function got multiple values for argument).
Parameters
----------
new_name : str
New name of parameter
old_name : str
Old name of parameter
dep_version : str, optional
Version of SciPy in which old parameter was deprecated
Notes
-----
Untested with functions that accept *args. Probably won't work as written.
"""
def decorator(fun):
@functools.wraps(fun)
def wrapper(*args, **kwds):
# Check for intersection between positional and keyword args
params = list(inspect.signature(fun).parameters)
d_args = dict(zip(params, args))
intersection = set(d_args) & set(kwds)
if intersection:
message = (f"{fun.__name__}() got multiple values "
f"for argument '{list(intersection)[0]}'")
raise TypeError(message)
# Consolidate other positional and keyword args into `kwds`
kwds.update(d_args)
new_param = kwds.get(new_name, None)
got_new = new_param is not None
got_keyword_old = kwds.get(old_name, None) is not None
if got_keyword_old and dep_version:
message = (f"Use of keyword argument `{old_name}` is "
f"deprecated and replaced by `{new_name}`. "
f"Support for `{old_name}` will be removed two "
f"feature releases after SciPy {dep_version}.")
warnings.warn(message, DeprecationWarning, stacklevel=2)
if got_keyword_old and got_new:
message = (f"{fun.__name__}() got multiple values for "
f"argument now known as `{new_name}`")
raise TypeError(message)
kwds[new_name] = kwds.pop(old_name, new_param)
return fun(**kwds)
return wrapper
return decorator
|
39,131 |
def mask_along_axis_iid(specgrams, mask_param, mask_value, axis):
# type: (Tensor, int, float, int) -> Tensor
r"""
Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where
``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``.
All examples will have the same mask interval.
Args:
specgrams (Tensor): Real spectrograms (batch, channel, freq, time)
mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param]
mask_value (float): Value to assign to the masked columns
axis (int): Axis to apply masking on (-2 -> frequency, -1 -> time)
Returns:
torch.Tensor: Masked spectrograms of dimensions (batch, channel, freq, time)
"""
if axis != -2 and axis != -1:
raise ValueError('Only Frequency and Time masking are supported')
value = torch.rand(specgrams.shape[:2]) * mask_param
min_value = torch.rand(specgrams.shape[:2]) * (specgrams.size(axis) - value)
# Create broadcastable mask
mask_start = (min_value.long())[..., None, None].float()
mask_end = (min_value.long() + value.long())[..., None, None].float()
mask = torch.arange(0, specgrams.size(axis)).float()
# Per batch example masking
specgrams = specgrams.transpose(axis, -1)
specgrams.masked_fill_((mask >= mask_start) & (mask < mask_end), mask_value)
specgrams = specgrams.transpose(axis, -1)
return specgrams
|
def mask_along_axis_iid(specgrams, mask_param, mask_value, axis):
# type: (Tensor, int, float, int) -> Tensor
r"""
Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where
``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``.
All examples will have the same mask interval.
Args:
specgrams (Tensor): Real spectrograms (batch, channel, freq, time)
mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param]
mask_value (float): Value to assign to the masked columns
axis (int): Axis to apply masking on (-2 -> frequency, -1 -> time)
Returns:
torch.Tensor: Masked spectrograms of dimensions (batch, channel, freq, time)
"""
if axis not in [-1, -2]:
raise ValueError('Only Frequency and Time masking are supported')
value = torch.rand(specgrams.shape[:2]) * mask_param
min_value = torch.rand(specgrams.shape[:2]) * (specgrams.size(axis) - value)
# Create broadcastable mask
mask_start = (min_value.long())[..., None, None].float()
mask_end = (min_value.long() + value.long())[..., None, None].float()
mask = torch.arange(0, specgrams.size(axis)).float()
# Per batch example masking
specgrams = specgrams.transpose(axis, -1)
specgrams.masked_fill_((mask >= mask_start) & (mask < mask_end), mask_value)
specgrams = specgrams.transpose(axis, -1)
return specgrams
|
50,525 |
def discretise_spiketimes(spiketrains, sampling_rate):
"""
Rounds down all spike times in the input spike train(s)
to multiples of the sampling_rate
Parameters
----------
spiketrains : neo.SpikeTrain or list of neo.SpikeTrain
The spiketrain(s) to discretise
sampling_rate : pq.Quantity
The desired sampling rate
Returns
-------
neo.SpikeTrain or list of neo.SpikeTrain
The discretised spiketrain(s)
"""
# spiketrains type check
was_single_spiketrain = False
if isinstance(spiketrains, (neo.SpikeTrain)):
spiketrains = [spiketrains]
was_single_spiketrain = True
elif isinstance(spiketrains, list):
for st in spiketrains:
if not isinstance(st, (np.ndarray, neo.SpikeTrain)):
raise TypeError(
"spiketrains must be a SpikeTrain, a numpy ndarray, or a "
"list of one of those, not %s." % type(spiketrains))
else:
raise TypeError(
"spiketrains must be a SpikeTrain or a list of SpikeTrain objects,"
" not %s." % type(spiketrains))
units = spiketrains[0].times.units
mag_sampling_rate = sampling_rate.rescale(1/units).magnitude.flatten()
new_spiketrains = []
for spiketrain in spiketrains:
mag_t_start = spiketrain.t_start.rescale(units).magnitude.flatten()
mag_times = spiketrain.times.magnitude.flatten()
discrete_times = (mag_times // (1 / mag_sampling_rate)
/ mag_sampling_rate)
mask = discrete_times < mag_t_start
if np.any(mask):
warnings.warn(f'{mask.sum()} spike(s) would be before t_start '
'and are set to t_start instead.')
discrete_times[mask] = mag_t_start
discrete_times *= units
new_spiketrain = spiketrain.duplicate_with_new_data(discrete_times)
new_spiketrain.annotations = spiketrain.annotations
new_spiketrain.sampling_rate = sampling_rate
new_spiketrains.append(new_spiketrain)
if was_single_spiketrain:
new_spiketrains = new_spiketrains[0]
return new_spiketrains
|
def discretise_spiketimes(spiketrains, sampling_rate):
"""
Rounds down all spike times in the input spike train(s)
to multiples of the sampling_rate
Parameters
----------
spiketrains : neo.SpikeTrain or list of neo.SpikeTrain
The spiketrain(s) to discretise
sampling_rate : pq.Quantity
The desired sampling rate
Returns
-------
neo.SpikeTrain or list of neo.SpikeTrain
The discretised spiketrain(s)
"""
# spiketrains type check
was_single_spiketrain = False
if isinstance(spiketrains, (neo.SpikeTrain)):
spiketrains = [spiketrains]
was_single_spiketrain = True
elif isinstance(spiketrains, list):
for st in spiketrains:
if not isinstance(st, (np.ndarray, neo.SpikeTrain)):
raise TypeError(
"spiketrains must be a SpikeTrain, a numpy ndarray, or a "
"list of one of those, not %s." % type(spiketrains))
else:
raise TypeError(
"spiketrains must be a SpikeTrain or a list of SpikeTrain objects,"
" not %s." % type(spiketrains))
if not isinstance(sampling_rate, pq.Quantity):
raise TypeError(
"The 'sampling_rate' must be pq.Quantity.\n"
"Found: %s." % type(sampling_rate))
units = spiketrains[0].times.units
mag_sampling_rate = sampling_rate.rescale(1/units).magnitude.flatten()
new_spiketrains = []
for spiketrain in spiketrains:
mag_t_start = spiketrain.t_start.rescale(units).magnitude.flatten()
mag_times = spiketrain.times.magnitude.flatten()
discrete_times = (mag_times // (1 / mag_sampling_rate)
/ mag_sampling_rate)
mask = discrete_times < mag_t_start
if np.any(mask):
warnings.warn(f'{mask.sum()} spike(s) would be before t_start '
'and are set to t_start instead.')
discrete_times[mask] = mag_t_start
discrete_times *= units
new_spiketrain = spiketrain.duplicate_with_new_data(discrete_times)
new_spiketrain.annotations = spiketrain.annotations
new_spiketrain.sampling_rate = sampling_rate
new_spiketrains.append(new_spiketrain)
if was_single_spiketrain:
new_spiketrains = new_spiketrains[0]
return new_spiketrains
|
28,549 |
def generate_dims_coords(shape, var_name, dims=None, coords=None, default_dims=None):
"""Generate default dimensions and coordinates for a variable.
Parameters
----------
shape : tuple[int]
Shape of the variable
var_name : str
Name of the variable. Used in the default name, if necessary
dims : list
List of dimensions for the variable
coords : dict[str] -> list[str]
Map of dimensions to coordinates
default_dims : list[str]
Dimensions that do not apply to the variable's shape
Returns
-------
list[str]
Default dims
dict[str] -> list[str]
Default coords
"""
if default_dims is None:
default_dims = []
if dims is None:
dims = []
if len([dim for dim in dims if dim not in default_dims]) > len(shape):
warnings.warn(
("In variable {{var_name}}, there are " +
"more dims ({dims_len}) given than exist ({shape_len}). " +
"Passed array should have shape (chains, draws, *shape)").format(
var_name=var_name,
dims_len=len(dims), shape_len=len(shape)
),
SyntaxWarning,
)
if coords is None:
coords = {}
coords = deepcopy(coords)
dims = deepcopy(dims)
for idx, dim_len in enumerate(shape):
if (len(dims) < idx + 1) or (dims[idx] is None):
dim_name = "{var_name}_dim_{idx}".format(var_name=var_name, idx=idx)
if len(dims) < idx + 1:
dims.append(dim_name)
else:
dims[idx] = dim_name
dim_name = dims[idx]
if dim_name not in coords:
coords[dim_name] = np.arange(dim_len)
coords = {key: coord for key, coord in coords.items() if any(key == dim for dim in dims)}
return dims, coords
|
def generate_dims_coords(shape, var_name, dims=None, coords=None, default_dims=None):
"""Generate default dimensions and coordinates for a variable.
Parameters
----------
shape : tuple[int]
Shape of the variable
var_name : str
Name of the variable. Used in the default name, if necessary
dims : list
List of dimensions for the variable
coords : dict[str] -> list[str]
Map of dimensions to coordinates
default_dims : list[str]
Dimensions that do not apply to the variable's shape
Returns
-------
list[str]
Default dims
dict[str] -> list[str]
Default coords
"""
if default_dims is None:
default_dims = []
if dims is None:
dims = []
if len([dim for dim in dims if dim not in default_dims]) > len(shape):
warnings.warn(
("In variable {var_name}, there are " +
"more dims ({dims_len}) given than exist ({shape_len}). " +
"Passed array should have shape (chains, draws, *shape)").format(
var_name=var_name,
dims_len=len(dims), shape_len=len(shape)
),
SyntaxWarning,
)
if coords is None:
coords = {}
coords = deepcopy(coords)
dims = deepcopy(dims)
for idx, dim_len in enumerate(shape):
if (len(dims) < idx + 1) or (dims[idx] is None):
dim_name = "{var_name}_dim_{idx}".format(var_name=var_name, idx=idx)
if len(dims) < idx + 1:
dims.append(dim_name)
else:
dims[idx] = dim_name
dim_name = dims[idx]
if dim_name not in coords:
coords[dim_name] = np.arange(dim_len)
coords = {key: coord for key, coord in coords.items() if any(key == dim for dim in dims)}
return dims, coords
|
3,186 |
def generate_culprit(data):
platform = data.get('platform')
exceptions = get_path(data, 'exception', 'values')
if exceptions:
# Synthetic events no longer get a culprit
last_exception = get_path(exceptions, -1)
if last_exception and (last_exception.get('mechanism') or {}).get('synthetic'):
return ''
stacktraces = [e['stacktrace'] for e in exceptions if get_path(e, 'stacktrace', 'frames')]
else:
stacktrace = data.get('stacktrace')
if stacktrace and stacktrace.get('frames'):
stacktraces = [stacktrace]
else:
stacktraces = None
culprit = None
if not culprit and stacktraces:
culprit = get_stacktrace_culprit(get_path(stacktraces, -1), platform=platform)
if not culprit and data.get('request'):
culprit = get_path(data, 'request', 'url')
return truncatechars(culprit or '', MAX_CULPRIT_LENGTH)
|
def generate_culprit(data):
platform = data.get('platform')
exceptions = get_path(data, 'exception', 'values')
if exceptions:
# Synthetic events no longer get a culprit
last_exception = get_path(exceptions, -1)
if get_path(last_exception, 'mechanism', 'synthetic'):
return ''
stacktraces = [e['stacktrace'] for e in exceptions if get_path(e, 'stacktrace', 'frames')]
else:
stacktrace = data.get('stacktrace')
if stacktrace and stacktrace.get('frames'):
stacktraces = [stacktrace]
else:
stacktraces = None
culprit = None
if not culprit and stacktraces:
culprit = get_stacktrace_culprit(get_path(stacktraces, -1), platform=platform)
if not culprit and data.get('request'):
culprit = get_path(data, 'request', 'url')
return truncatechars(culprit or '', MAX_CULPRIT_LENGTH)
|
58,789 |
def batch_matmul(tensor_a, tensor_b, out_dtype="", transpose_a=False, transpose_b=True):
r"""
Computes batch matrix multiplication of `A` and `B` when `A` and `B` are data
in batch.
The A & B can be transposed. For legacy reason, we use NT format(tensor_a non-transposed
and tensor_b transposed) by default.
.. math::
\mbox{batch_matmul}(A, B)[i, :, :] = \mbox{matmul}(A[i, :, :], B[i, :, :])
Parameters
----------
tensor_a : tvm.relay.Expr
The first input.
tensor_b : tvm.relay.Expr
The second input.
out_dtype : Optional[str]
Specifies the output data type for mixed precision batch matmul
transpose_a : Optional[bool] = False
Whether the data tensor is in transposed format.
transpose_b : Optional[bool] = True
Whether the weight tensor is in transposed format.
Returns
-------
result: tvm.relay.Expr
The computed result.
"""
return _make.batch_matmul(tensor_a, tensor_b, out_dtype, transpose_a, transpose_b)
|
def batch_matmul(tensor_a, tensor_b, out_dtype="", transpose_a=False, transpose_b=True):
r"""
Compute batch matrix multiplication of `tensor_a` and `tensor_b`.
Both `tensor_a` and `tensor_b` can be transposed. For legacy reason, we use NT format
(transpose_a=False, transpose_b=True) by default.
.. math::
\mbox{batch_matmul}(A, B)[i, :, :] = \mbox{matmul}(A[i, :, :], B[i, :, :])
Parameters
----------
tensor_a : tvm.relay.Expr
The first input.
tensor_b : tvm.relay.Expr
The second input.
out_dtype : Optional[str]
Specifies the output data type for mixed precision batch matmul
transpose_a : Optional[bool] = False
Whether the data tensor is in transposed format.
transpose_b : Optional[bool] = True
Whether the weight tensor is in transposed format.
Returns
-------
result: tvm.relay.Expr
The computed result.
"""
return _make.batch_matmul(tensor_a, tensor_b, out_dtype, transpose_a, transpose_b)
|
7,166 |
def tvl1(I0, I1, dt=0.2, lambda_=15, tau=0.3, nwarp=5, niter=10,
tol=1e-4, prefilter=False):
"""Coarse to fine TV-L1 optical flow estimator.
TV-L1 ia popular algorithm for optical flow estimation intrudced
by Zack et al. [1]_, improved in [2]_ and detailed in [3]_.
Parameters
----------
I0 : ~numpy.ndarray
The first gray scale image of the sequence.
I1 : ~numpy.ndarray
The second gray scale image of the sequence.
dt : float
Time step of the numerical scheme. Convergence is proved for
values dt < 0.125, but it can be larger for faster
convergence.
lambda_ : float
Attachement parameter. The smaller this parameter is,
the smoother is the solutions.
tau : float
Tightness parameter. It should have a small value in order to
maintain attachement and regularization parts in
correspondence.
nwarp : int
Number of times I1 is warped.
niter : int
Number of fixed point iteration.
tol : float
Tolerance used as stopping criterion based on the L² distance
between two consecutive values of (u, v).
prefilter : bool
whether to prefilter the estimated optical flow before each
image warp.
Returns
-------
flow : tuple[~numpy.ndarray]
The estimated optical flow.
References
----------
.. [1] Zach, C., Pock, T., & Bischof, H. (2007, September). A
duality based approach for realtime TV-L 1 optical flow. In Joint
pattern recognition symposium (pp. 214-223). Springer, Berlin,
Heidelberg.
.. [2] Wedel, A., Pock, T., Zach, C., Bischof, H., & Cremers,
D. (2009). An improved algorithm for TV-L 1 optical flow. In
Statistical and geometrical approaches to visual motion analysis
(pp. 23-45). Springer, Berlin, Heidelberg.
.. [3] Pérez, J. S., Meinhardt-Llopis, E., & Facciolo,
G. (2013). TV-L1 optical flow estimation. Image Processing On
Line, 2013, 137-150.
Examples
--------
>>> from skimage.color import rgb2gray
>>> from skimage.data import stereo_motorcycle
>>> from skimage.registration import tvl1
>>> I0, I1, disp = stereo_motorcycle()
>>> # --- Convert the images to gray level: color is not supported.
>>> I0 = rgb2gray(I0)
>>> I1 = rgb2gray(I1)
>>> flow = tvl1(I1, I0)
"""
solver = partial(_tvl1, dt=dt, lambda_=lambda_, tau=tau,
nwarp=nwarp, niter=niter, tol=tol,
prefilter=prefilter)
return coarse_to_fine(I0, I1, solver)
|
def tvl1(I0, I1, dt=0.2, lambda_=15, tau=0.3, nwarp=5, niter=10,
tol=1e-4, prefilter=False):
"""Coarse to fine TV-L1 optical flow estimator.
TV-L1 ia popular algorithm for optical flow estimation intrudced
by Zack et al. [1]_, improved in [2]_ and detailed in [3]_.
Parameters
----------
I0 : ~numpy.ndarray
The first gray scale image of the sequence.
I1 : ~numpy.ndarray
The second gray scale image of the sequence.
dt : float
Time step of the numerical scheme. Convergence is proved for
values dt < 0.125, but it can be larger for faster
convergence.
lambda_ : float
Attachement parameter. The smaller this parameter is,
the smoother is the solutions.
tau : float
Tightness parameter. It should have a small value in order to
maintain attachement and regularization parts in
correspondence.
nwarp : int
Number of times I1 is warped.
niter : int
Number of fixed point iteration.
tol : float
Tolerance used as stopping criterion based on the L² distance
between two consecutive values of (u, v).
prefilter : bool
Whether to prefilter the estimated optical flow before each
image warp.
Returns
-------
flow : tuple[~numpy.ndarray]
The estimated optical flow.
References
----------
.. [1] Zach, C., Pock, T., & Bischof, H. (2007, September). A
duality based approach for realtime TV-L 1 optical flow. In Joint
pattern recognition symposium (pp. 214-223). Springer, Berlin,
Heidelberg.
.. [2] Wedel, A., Pock, T., Zach, C., Bischof, H., & Cremers,
D. (2009). An improved algorithm for TV-L 1 optical flow. In
Statistical and geometrical approaches to visual motion analysis
(pp. 23-45). Springer, Berlin, Heidelberg.
.. [3] Pérez, J. S., Meinhardt-Llopis, E., & Facciolo,
G. (2013). TV-L1 optical flow estimation. Image Processing On
Line, 2013, 137-150.
Examples
--------
>>> from skimage.color import rgb2gray
>>> from skimage.data import stereo_motorcycle
>>> from skimage.registration import tvl1
>>> I0, I1, disp = stereo_motorcycle()
>>> # --- Convert the images to gray level: color is not supported.
>>> I0 = rgb2gray(I0)
>>> I1 = rgb2gray(I1)
>>> flow = tvl1(I1, I0)
"""
solver = partial(_tvl1, dt=dt, lambda_=lambda_, tau=tau,
nwarp=nwarp, niter=niter, tol=tol,
prefilter=prefilter)
return coarse_to_fine(I0, I1, solver)
|
39,086 |
def run(
app: typing.Union["ASGIApplication", str],
*,
host: str = "127.0.0.1",
port: int = 8000,
uds: typing.Optional[str] = None,
fd: typing.Optional[int] = None,
loop: LoopSetupType = "auto",
http: HTTPProtocolType = "auto",
ws: WSProtocolType = "auto",
ws_max_size: int = 16777216,
ws_ping_interval: float = 20.0,
ws_ping_timeout: float = 20.0,
ws_per_message_deflate: bool = True,
lifespan: LifespanType = "auto",
interface: InterfaceType = "auto",
debug: bool = False,
reload: bool = False,
reload_dirs: typing.Optional[typing.List[str]] = None,
reload_includes: typing.Optional[typing.List[str]] = None,
reload_excludes: typing.Optional[typing.List[str]] = None,
reload_delay: float = 0.25,
workers: typing.Optional[int] = None,
env_file: typing.Optional[str] = None,
log_config: typing.Optional[typing.Union[dict, str]] = LOGGING_CONFIG,
log_level: typing.Optional[str] = None,
access_log: bool = True,
proxy_headers: bool = True,
server_header: bool = True,
date_header: bool = True,
forwarded_allow_ips: typing.Optional[str] = None,
root_path: str = "",
limit_concurrency: typing.Optional[int] = None,
backlog: int = 2048,
limit_max_requests: typing.Optional[int] = None,
timeout_keep_alive: int = 5,
ssl_keyfile: typing.Optional[str] = None,
ssl_certfile: typing.Optional[str] = None,
ssl_keyfile_password: typing.Optional[str] = None,
ssl_version: int = int(SSL_PROTOCOL_VERSION),
ssl_cert_reqs: int = int(ssl.CERT_NONE),
ssl_ca_certs: typing.Optional[str] = None,
ssl_ciphers: str = "TLSv1",
headers: typing.Optional[typing.List[typing.Tuple[str, str]]] = None,
use_colors: typing.Optional[bool] = None,
app_dir: typing.Optional[str] = None,
factory: bool = False,
h11_max_incomplete_event_size: int = DEFAULT_MAX_INCOMPLETE_EVENT_SIZE,
) -> None:
if app_dir is not None:
sys.path.insert(0, app_dir)
config = Config(
app,
host=host,
port=port,
uds=uds,
fd=fd,
loop=loop,
http=http,
ws=ws,
ws_max_size=ws_max_size,
ws_ping_interval=ws_ping_interval,
ws_ping_timeout=ws_ping_timeout,
ws_per_message_deflate=ws_per_message_deflate,
lifespan=lifespan,
interface=interface,
debug=debug,
reload=reload,
reload_dirs=reload_dirs,
reload_includes=reload_includes,
reload_excludes=reload_excludes,
reload_delay=reload_delay,
workers=workers,
env_file=env_file,
log_config=log_config,
log_level=log_level,
access_log=access_log,
proxy_headers=proxy_headers,
server_header=server_header,
date_header=date_header,
forwarded_allow_ips=forwarded_allow_ips,
root_path=root_path,
limit_concurrency=limit_concurrency,
backlog=backlog,
limit_max_requests=limit_max_requests,
timeout_keep_alive=timeout_keep_alive,
ssl_keyfile=ssl_keyfile,
ssl_certfile=ssl_certfile,
ssl_keyfile_password=ssl_keyfile_password,
ssl_version=ssl_version,
ssl_cert_reqs=ssl_cert_reqs,
ssl_ca_certs=ssl_ca_certs,
ssl_ciphers=ssl_ciphers,
headers=headers,
use_colors=use_colors,
factory=factory,
h11_max_incomplete_event_size=h11_max_incomplete_event_size,
)
server = Server(config=config)
if (config.reload or config.workers > 1) and not isinstance(app, str):
logger = logging.getLogger("uvicorn.error")
logger.warning(
"You must pass the application as an import string to enable 'reload' or "
"'workers'."
)
sys.exit(1)
if config.should_reload:
sock = config.bind_socket()
ChangeReload(config, target=server.run, sockets=[sock]).run()
elif config.workers > 1:
sock = config.bind_socket()
Multiprocess(config, target=server.run, sockets=[sock]).run()
else:
server.run()
if config.uds:
os.remove(config.uds) # pragma: py-win32
if not server.started and not config.should_reload and config.workers == 1:
sys.exit(STARTUP_FAILURE)
|
def run(
app: typing.Union["ASGIApplication", str],
*,
host: str = "127.0.0.1",
port: int = 8000,
uds: typing.Optional[str] = None,
fd: typing.Optional[int] = None,
loop: LoopSetupType = "auto",
http: HTTPProtocolType = "auto",
ws: WSProtocolType = "auto",
ws_max_size: int = 16777216,
ws_ping_interval: float = 20.0,
ws_ping_timeout: float = 20.0,
ws_per_message_deflate: bool = True,
lifespan: LifespanType = "auto",
interface: InterfaceType = "auto",
debug: bool = False,
reload: bool = False,
reload_dirs: typing.Optional[typing.List[str]] = None,
reload_includes: typing.Optional[typing.List[str]] = None,
reload_excludes: typing.Optional[typing.List[str]] = None,
reload_delay: float = 0.25,
workers: typing.Optional[int] = None,
env_file: typing.Optional[str] = None,
log_config: typing.Optional[typing.Union[typing.Mapping[str, Any], str]] = LOGGING_CONFIG,
log_level: typing.Optional[str] = None,
access_log: bool = True,
proxy_headers: bool = True,
server_header: bool = True,
date_header: bool = True,
forwarded_allow_ips: typing.Optional[str] = None,
root_path: str = "",
limit_concurrency: typing.Optional[int] = None,
backlog: int = 2048,
limit_max_requests: typing.Optional[int] = None,
timeout_keep_alive: int = 5,
ssl_keyfile: typing.Optional[str] = None,
ssl_certfile: typing.Optional[str] = None,
ssl_keyfile_password: typing.Optional[str] = None,
ssl_version: int = int(SSL_PROTOCOL_VERSION),
ssl_cert_reqs: int = int(ssl.CERT_NONE),
ssl_ca_certs: typing.Optional[str] = None,
ssl_ciphers: str = "TLSv1",
headers: typing.Optional[typing.List[typing.Tuple[str, str]]] = None,
use_colors: typing.Optional[bool] = None,
app_dir: typing.Optional[str] = None,
factory: bool = False,
h11_max_incomplete_event_size: int = DEFAULT_MAX_INCOMPLETE_EVENT_SIZE,
) -> None:
if app_dir is not None:
sys.path.insert(0, app_dir)
config = Config(
app,
host=host,
port=port,
uds=uds,
fd=fd,
loop=loop,
http=http,
ws=ws,
ws_max_size=ws_max_size,
ws_ping_interval=ws_ping_interval,
ws_ping_timeout=ws_ping_timeout,
ws_per_message_deflate=ws_per_message_deflate,
lifespan=lifespan,
interface=interface,
debug=debug,
reload=reload,
reload_dirs=reload_dirs,
reload_includes=reload_includes,
reload_excludes=reload_excludes,
reload_delay=reload_delay,
workers=workers,
env_file=env_file,
log_config=log_config,
log_level=log_level,
access_log=access_log,
proxy_headers=proxy_headers,
server_header=server_header,
date_header=date_header,
forwarded_allow_ips=forwarded_allow_ips,
root_path=root_path,
limit_concurrency=limit_concurrency,
backlog=backlog,
limit_max_requests=limit_max_requests,
timeout_keep_alive=timeout_keep_alive,
ssl_keyfile=ssl_keyfile,
ssl_certfile=ssl_certfile,
ssl_keyfile_password=ssl_keyfile_password,
ssl_version=ssl_version,
ssl_cert_reqs=ssl_cert_reqs,
ssl_ca_certs=ssl_ca_certs,
ssl_ciphers=ssl_ciphers,
headers=headers,
use_colors=use_colors,
factory=factory,
h11_max_incomplete_event_size=h11_max_incomplete_event_size,
)
server = Server(config=config)
if (config.reload or config.workers > 1) and not isinstance(app, str):
logger = logging.getLogger("uvicorn.error")
logger.warning(
"You must pass the application as an import string to enable 'reload' or "
"'workers'."
)
sys.exit(1)
if config.should_reload:
sock = config.bind_socket()
ChangeReload(config, target=server.run, sockets=[sock]).run()
elif config.workers > 1:
sock = config.bind_socket()
Multiprocess(config, target=server.run, sockets=[sock]).run()
else:
server.run()
if config.uds:
os.remove(config.uds) # pragma: py-win32
if not server.started and not config.should_reload and config.workers == 1:
sys.exit(STARTUP_FAILURE)
|
59,260 |
def main(opcode_py, outfile='Include/opcode.h'):
opcode = {}
if hasattr(tokenize, 'open'):
fp = tokenize.open(opcode_py) # Python 3.2+
else:
fp = open(opcode_py) # Python 2.7
with fp:
code = fp.read()
exec(code, opcode)
opmap = opcode['opmap']
opname = opcode['opname']
hasconst = opcode['hasconst']
hasjrel = opcode['hasjrel']
hasjabs = opcode['hasjabs']
used = [ False ] * 256
next_op = 1
for name, op in opmap.items():
used[op] = True
with open(outfile, 'w') as fobj:
fobj.write(header)
for name in opname:
if name in opmap:
fobj.write(DEFINE.format(name, opmap[name]))
if name == 'POP_EXCEPT': # Special entry for HAVE_ARGUMENT
fobj.write(DEFINE.format("HAVE_ARGUMENT", opcode["HAVE_ARGUMENT"]))
for name in opcode['_specialized_instructions']:
while used[next_op]:
next_op += 1
fobj.write(DEFINE.format(name, next_op))
used[next_op] = True
fobj.write(DEFINE.format('DO_TRACING', 255))
fobj.write("\nextern const uint8_t _PyOpcode_Caches[256];\n")
fobj.write("\nextern const uint8_t _PyOpcode_Deopt[256];\n")
fobj.write("\n#ifdef NEED_OPCODE_TABLES\n")
write_int_array_from_ops("_PyOpcode_RelativeJump", opcode['hasjrel'], fobj)
write_int_array_from_ops("_PyOpcode_Jump", opcode['hasjrel'] + opcode['hasjabs'], fobj)
fobj.write("\nconst uint8_t _PyOpcode_Caches[256] = {\n")
for i, entries in enumerate(opcode["_inline_cache_entries"]):
if entries:
fobj.write(f" [{opname[i]}] = {entries},\n")
fobj.write("};\n")
deoptcodes = {}
for basic in opmap:
deoptcodes[basic] = basic
for basic, family in opcode["_specializations"].items():
for specialized in family:
deoptcodes[specialized] = basic
fobj.write("\nconst uint8_t _PyOpcode_Deopt[256] = {\n")
for opt, deopt in sorted(deoptcodes.items()):
fobj.write(f" [{opt}] = {deopt},\n")
fobj.write("};\n")
fobj.write("#endif /* OPCODE_TABLES */\n")
fobj.write("\n")
fobj.write("#define HAS_CONST(op) (false\\")
for op in hasconst:
fobj.write(f"\n || ((op) == {op}) \\")
fobj.write("\n )\n")
fobj.write("\n")
for i, (op, _) in enumerate(opcode["_nb_ops"]):
fobj.write(DEFINE.format(op, i))
fobj.write("\n")
fobj.write("#ifdef Py_DEBUG\n")
fobj.write("static const char *const _PyOpcode_OpName[256] = {\n")
for name, op in opmap.items():
fobj.write(f''' [{op}] = "{name}",\n''')
fobj.write("};\n")
fobj.write("#endif\n")
fobj.write(footer)
print(f"{outfile} regenerated from {opcode_py}")
|
def main(opcode_py, outfile='Include/opcode.h'):
opcode = {}
if hasattr(tokenize, 'open'):
fp = tokenize.open(opcode_py) # Python 3.2+
else:
fp = open(opcode_py) # Python 2.7
with fp:
code = fp.read()
exec(code, opcode)
opmap = opcode['opmap']
opname = opcode['opname']
hasconst = opcode['hasconst']
hasjrel = opcode['hasjrel']
hasjabs = opcode['hasjabs']
used = [ False ] * 256
next_op = 1
for name, op in opmap.items():
used[op] = True
with open(outfile, 'w') as fobj:
fobj.write(header)
for name in opname:
if name in opmap:
fobj.write(DEFINE.format(name, opmap[name]))
if name == 'POP_EXCEPT': # Special entry for HAVE_ARGUMENT
fobj.write(DEFINE.format("HAVE_ARGUMENT", opcode["HAVE_ARGUMENT"]))
for name in opcode['_specialized_instructions']:
while used[next_op]:
next_op += 1
fobj.write(DEFINE.format(name, next_op))
used[next_op] = True
fobj.write(DEFINE.format('DO_TRACING', 255))
fobj.write("\nextern const uint8_t _PyOpcode_Caches[256];\n")
fobj.write("\nextern const uint8_t _PyOpcode_Deopt[256];\n")
fobj.write("\n#ifdef NEED_OPCODE_TABLES\n")
write_int_array_from_ops("_PyOpcode_RelativeJump", opcode['hasjrel'], fobj)
write_int_array_from_ops("_PyOpcode_Jump", opcode['hasjrel'] + opcode['hasjabs'], fobj)
fobj.write("\nconst uint8_t _PyOpcode_Caches[256] = {\n")
for i, entries in enumerate(opcode["_inline_cache_entries"]):
if entries:
fobj.write(f" [{opname[i]}] = {entries},\n")
fobj.write("};\n")
deoptcodes = {}
for basic in opmap:
deoptcodes[basic] = basic
for basic, family in opcode["_specializations"].items():
for specialized in family:
deoptcodes[specialized] = basic
fobj.write("\nconst uint8_t _PyOpcode_Deopt[256] = {\n")
for opt, deopt in sorted(deoptcodes.items()):
fobj.write(f" [{opt}] = {deopt},\n")
fobj.write("};\n")
fobj.write("#endif /* OPCODE_TABLES */\n")
fobj.write("\n")
fobj.write("#define HAS_CONST(op) (false\\")
for op in hasconst:
fobj.write(f"\n || ((op) == {op}) \\")
fobj.write("\n )\n")
fobj.write("\n")
for i, (op, _) in enumerate(opcode["_nb_ops"]):
fobj.write(DEFINE.format(op, i))
fobj.write("\n")
fobj.write("#ifdef Py_DEBUG\n")
fobj.write("static const char *const _PyOpcode_OpName[256] = {\n")
for name in opmap:
fobj.write(f''' [{name}] = "{name}",\n''')
fobj.write("};\n")
fobj.write("#endif\n")
fobj.write(footer)
print(f"{outfile} regenerated from {opcode_py}")
|
14,393 |
def porkchop(body_dpt, body_arr, dpt_start, dpt_end, arr_start, arr_end, N=50):
"""Plots porkchop between two bodies.
Parameters
----------
body_dpt: poliastro.bodies.Body
Body for launch
body_arr: poliastro.bodies.Body
Body for arrival
dpt_start: str
Porkchop launch date starts in this value
dpt_end: str
Porkchop launch date ends in this value
arr_start: str
Porkchop arrival date starts in this value
arr_end: str
Porkchop arrival date ends in this value
Returns
-------
dpt: np.array
Departure time span
arr: np.array
Arrival time span
deltav_dpt: np.ndarray
Departure velocity needed for each time of flight
deltav_arr: np.ndarray
Arrival velocity needed for each time of flight
c3_dpt: np.ndarray
Characteristic launch energy
c3_arr: np.ndarray
Characteristic arrival energy
Example
-------
# Time requirements YYYY-MM-DD
# Data is from porkchop pag. 180
>>> from poliastro.plotting.porkchop import porkchop
>>> from poliastro.bodies import Earth, Mars
>>> import matplotlib.pyplot as plt
>>> departure_start = "2005-04-30"
>>> departure_end = "2005-10-07"
>>> arrival_start = "2005-11-16"
>>> arrival_end = "2006-12-21"
>>> dpt, arr, dv_dpt, dv_arr, c3dpt, c3arr = porkchop(Earth, Mars, departure_start, departure_end, arrival_start, arrival_end)
>>> plt.show()
"""
# Computing time spans fot departure and arrival
dpt = [
Time(d, format="jd")
for d in np.linspace(Time(dpt_start).jd, Time(dpt_end).jd, N + 1)
]
arr = [
Time(d, format="jd")
for d in np.linspace(Time(arr_start).jd, Time(arr_end).jd, N + 1)
]
# Prellocate in memory the arrays
deltav_dpt = np.zeros((len(dpt), len(arr)))
deltav_arr = np.zeros((len(dpt), len(arr)))
c3_dpt = np.zeros((len(dpt), len(arr)))
c3_arr = np.zeros((len(dpt), len(arr)))
iso_tof = np.zeros((len(dpt), len(arr)))
idx = 0
for d in dpt:
dv_dpt, dv_arr, c3_d, c3_a, t_flight = lambert_porkchop_vectorized(
body_dpt, body_arr, d, arr
)
deltav_dpt[idx] = dv_dpt
deltav_arr[idx] = dv_arr
c3_dpt[idx] = c3_d
c3_arr[idx] = c3_a
iso_tof[idx] = t_flight
idx += 1
"""
Algorithm works: 'for each launch get all arrivals'.
Contourf works: 'for each Y -> all X'.
We need to transpose the arrays.
"""
fig, ax = plt.subplots(figsize=(15, 15))
c3_levels = np.linspace(0, 45, 30)
t_levels = np.linspace(100, 500, 5)
c = plt.contourf(
[D.to_datetime() for D in dpt],
[A.to_datetime() for A in arr],
np.transpose(c3_dpt),
c3_levels,
)
l = plt.contour(
[D.to_datetime() for D in dpt],
[A.to_datetime() for A in arr],
np.transpose(c3_dpt),
c3_levels,
colors="black",
linestyles="solid",
)
t = plt.contour(
[D.to_datetime() for D in dpt],
[A.to_datetime() for A in arr],
np.transpose(iso_tof),
t_levels,
colors="red",
linestyles="dashed",
linewidths=3.5,
)
cbar = plt.colorbar(c)
cbar.set_label("$km^2/s^2$")
plt.clabel(l, inline=1, fmt="%1.1f", colors="k", fontsize=10)
plt.clabel(t, inline=1, fmt="%1.1f", colors="r", fontsize=14)
plt.grid()
fig.autofmt_xdate()
plt.title(
"{} - {} for year {}, C3 Launch, TFL".format(
body_dpt.name, body_arr.name, dpt[0].datetime.year
),
fontsize=14,
fontweight="bold",
)
plt.xlabel("Launch date", fontsize=10, fontweight="bold")
plt.ylabel("Arrival date", fontsize=10, fontweight="bold")
plt.show()
return dpt, arr, deltav_dpt, deltav_arr, c3_dpt, c3_arr
|
def porkchop(body_dpt, body_arr, dpt_start, dpt_end, arr_start, arr_end, N=50):
"""Plots porkchop between two bodies.
Parameters
----------
body_dpt: poliastro.bodies.Body
Body for launch
body_arr: poliastro.bodies.Body
Body for arrival
dpt_start: str
Porkchop launch date starts in this value
dpt_end: str
Porkchop launch date ends in this value
arr_start: str
Porkchop arrival date starts in this value
arr_end: str
Porkchop arrival date ends in this value
Returns
-------
dpt: np.array
Departure time span
arr: np.array
Arrival time span
deltav_dpt: np.ndarray
Departure velocity needed for each time of flight
deltav_arr: np.ndarray
Arrival velocity needed for each time of flight
c3_dpt: np.ndarray
Characteristic launch energy
c3_arr: np.ndarray
Characteristic arrival energy
Example
-------
# Time requirements YYYY-MM-DD
# Data is from porkchop pag. 180
>>> from poliastro.plotting.porkchop import porkchop
>>> from poliastro.bodies import Earth, Mars
>>> import matplotlib.pyplot as plt
>>> departure_start = "2005-04-30"
>>> departure_end = "2005-10-07"
>>> arrival_start = "2005-11-16"
>>> arrival_end = "2006-12-21"
>>> dpt, arr, dv_dpt, dv_arr, c3dpt, c3arr = porkchop(Earth, Mars, departure_start, departure_end, arrival_start, arrival_end)
>>> plt.show()
"""
# Computing time spans fot departure and arrival
dpt = [
Time(d, format="jd")
for d in np.linspace(Time(dpt_start).jd, Time(dpt_end).jd, N + 1)
]
arr = [
Time(d, format="jd")
for d in np.linspace(Time(arr_start).jd, Time(arr_end).jd, N + 1)
]
# Prellocate in memory the arrays
deltav_dpt = np.zeros((len(dpt), len(arr)))
deltav_arr = np.zeros((len(dpt), len(arr)))
c3_dpt = np.zeros((len(dpt), len(arr)))
c3_arr = np.zeros((len(dpt), len(arr)))
iso_tof = np.zeros((len(dpt), len(arr)))
idx = 0
for d in dpt:
dv_dpt, dv_arr, c3_d, c3_a, t_flight = lambert_porkchop_vectorized(
body_dpt, body_arr, d, arr
)
deltav_dpt[idx] = dv_dpt
deltav_arr[idx] = dv_arr
c3_dpt[idx] = c3_d
c3_arr[idx] = c3_a
iso_tof[idx] = t_flight
idx += 1
"""
Algorithm works: 'for each launch get all arrivals'.
Contourf works: 'for each Y -> all X'.
We need to transpose the arrays.
"""
fig, ax = plt.subplots(figsize=(15, 15))
c3_levels = np.linspace(0, 45, 30)
t_levels = np.linspace(100, 500, 5)
c = plt.contourf(
[D.to_datetime() for D in dpt],
[A.to_datetime() for A in arr],
np.transpose(c3_dpt),
c3_levels,
)
l = plt.contour(
[D.to_datetime() for D in dpt],
[A.to_datetime() for A in arr],
c3_dpt.T,
c3_levels,
colors="black",
linestyles="solid",
)
t = plt.contour(
[D.to_datetime() for D in dpt],
[A.to_datetime() for A in arr],
np.transpose(iso_tof),
t_levels,
colors="red",
linestyles="dashed",
linewidths=3.5,
)
cbar = plt.colorbar(c)
cbar.set_label("$km^2/s^2$")
plt.clabel(l, inline=1, fmt="%1.1f", colors="k", fontsize=10)
plt.clabel(t, inline=1, fmt="%1.1f", colors="r", fontsize=14)
plt.grid()
fig.autofmt_xdate()
plt.title(
"{} - {} for year {}, C3 Launch, TFL".format(
body_dpt.name, body_arr.name, dpt[0].datetime.year
),
fontsize=14,
fontweight="bold",
)
plt.xlabel("Launch date", fontsize=10, fontweight="bold")
plt.ylabel("Arrival date", fontsize=10, fontweight="bold")
plt.show()
return dpt, arr, deltav_dpt, deltav_arr, c3_dpt, c3_arr
|
10,809 |
def _remove_unneeded_phis(phimap):
"""Remove unneeded PHis from the phimap
"""
all_phis = []
for philist in phimap.values():
all_phis.extend(philist)
unneeded_phis = set()
# Find unneeded PHIs.
for phi in all_phis:
ivs = phi.value.incoming_values
# It's unneeded if the incomings are either undefined or
# the PHI node it self
if all(iv is ir.UNDEFINED or iv == phi.target for iv in ivs):
unneeded_phis.add(phi)
# Fix up references to unneeded PHIs
for phi in all_phis:
for unneed in unneeded_phis:
if unneed is not phi:
# If the unneeded PHI is in the current phi's incoming values
if unneed.target in phi.value.incoming_values:
# Replace the unneeded PHI with a UNDEFINED
idx = phi.value.incoming_values.index(unneed.target)
phi.value.incoming_values[idx] = ir.UNDEFINED
# Remove unneeded phis
for philist in phimap.values():
for unneeded in unneeded_phis:
if unneeded in philist:
philist.remove(unneeded)
|
def _remove_unneeded_phis(phimap):
"""Remove unneeded PHIs from the phimap
"""
all_phis = []
for philist in phimap.values():
all_phis.extend(philist)
unneeded_phis = set()
# Find unneeded PHIs.
for phi in all_phis:
ivs = phi.value.incoming_values
# It's unneeded if the incomings are either undefined or
# the PHI node it self
if all(iv is ir.UNDEFINED or iv == phi.target for iv in ivs):
unneeded_phis.add(phi)
# Fix up references to unneeded PHIs
for phi in all_phis:
for unneed in unneeded_phis:
if unneed is not phi:
# If the unneeded PHI is in the current phi's incoming values
if unneed.target in phi.value.incoming_values:
# Replace the unneeded PHI with a UNDEFINED
idx = phi.value.incoming_values.index(unneed.target)
phi.value.incoming_values[idx] = ir.UNDEFINED
# Remove unneeded phis
for philist in phimap.values():
for unneeded in unneeded_phis:
if unneeded in philist:
philist.remove(unneeded)
|
40,525 |
def parse_dashboard_json(input_path):
try:
with open(input_path) as json_file:
try:
dashboard = json.load(json_file)
except json.decoder.JSONDecodeError as ex:
raise CLIError('JSON decode error for {}: {}'.format(json_file, str(ex)))
if 'location' not in dashboard:
raise CLIError(str(json_file) + " does not contain the property 'location'")
if 'properties' not in dashboard:
raise CLIError(str(json_file) + " does not contain the property 'properties'")
if 'lenses' not in dashboard['properties']:
raise CLIError(str(json_file) + " does not contain the property 'lenses' in 'properties'")
if 'metadata' not in dashboard['properties']:
raise CLIError(str(json_file) + " does not contain the property 'metadata' in 'properties'")
return dashboard
except FileNotFoundError as ex:
raise CLIError('File not Found: {}'.format(str(ex)))
|
def parse_dashboard_json(input_path):
try:
with open(input_path) as json_file:
try:
dashboard = json.load(json_file)
except json.decoder.JSONDecodeError as ex:
raise CLIError('There was an error decoding the JSON file {}: {}'.format(json_file, str(ex)))
if 'location' not in dashboard:
raise CLIError(str(json_file) + " does not contain the property 'location'")
if 'properties' not in dashboard:
raise CLIError(str(json_file) + " does not contain the property 'properties'")
if 'lenses' not in dashboard['properties']:
raise CLIError(str(json_file) + " does not contain the property 'lenses' in 'properties'")
if 'metadata' not in dashboard['properties']:
raise CLIError(str(json_file) + " does not contain the property 'metadata' in 'properties'")
return dashboard
except FileNotFoundError as ex:
raise CLIError('File not Found: {}'.format(str(ex)))
|
3,788 |
def _test_reveal(
path: str,
expression: str,
reveal: str,
expected_reveal: str,
lineno: int,
) -> None:
"""Error-reporting helper function for `test_reveal`."""
strip_pattern = re.pattern(r"(\w+\.)+(\w+)")
stripped_reveal = strip_pattern.sub(strip_func, reveal)
stripped_expected_reveal = strip_pattern.sub(strip_func, expected_reveal)
if stripped_reveal not in stripped_expected_reveal:
raise AssertionError(
_REVEAL_MSG.format(lineno,
expression,
stripped_expected_reveal,
stripped_reveal)
)
|
def _test_reveal(
path: str,
expression: str,
reveal: str,
expected_reveal: str,
lineno: int,
) -> None:
"""Error-reporting helper function for `test_reveal`."""
strip_pattern = re.compile(r"(\w+\.)+(\w+)")
stripped_reveal = strip_pattern.sub(strip_func, reveal)
stripped_expected_reveal = strip_pattern.sub(strip_func, expected_reveal)
if stripped_reveal not in stripped_expected_reveal:
raise AssertionError(
_REVEAL_MSG.format(lineno,
expression,
stripped_expected_reveal,
stripped_reveal)
)
|
35,344 |
def launch_mapdl_on_cluster(
nproc=2,
memory=4,
loglevel="ERROR",
additional_switches="",
verbose=False,
start_timeout=600,
tag="latest",
**kwargs,
):
"""Start MAPDL on the ANSYS jupyter cluster in gRPC mode.
Parameters
----------
nproc : int, optional
Number of processors. Defaults to 2.
memory : float, optional
Fixed amount of memory to request for MAPDL in Gigabytes. If
the mapdl instance requires more ram than your provide MAPDL
may segfault.
loglevel : str, optional
Sets which messages are printed to the console. Default
``'INFO'`` logs out all MAPDL messages, ``'WARNING``` prints only
messages containing MAPDL warnings, and ``'ERROR'`` prints only
error messages.
additional_switches : str, optional
Additional switches for MAPDL, for example ``"-p aa_r"``, the
academic research license, would be added with:
- ``additional_switches="-p aa_r"``
Avoid adding switches like ``"-i"`` ``"-o"`` or ``"-b"`` as
these are already included to start up the MAPDL server. See
the notes section for additional details.
start_timeout : float, optional
Maximum allowable time to connect to the MAPDL server.
tag : str, optional
Docker image tag from `PyAnsys MAPDL Image
<https://github.com/orgs/pyansys/packages/container/package/pymapdl%2Fmapdl>`. Defaults
to ``"latest"``. For example "v22.1.0".
Returns
-------
MapdlGrpc
MAPDL instance.
Examples
--------
Launch MAPDL using the default configuration.
>>> from ansys.mapdl import launch_mapdl
>>> mapdl = launch_mapdl()
Launch MAPDL and guarantee 16 GB minimum RAM and 8 CPUs.
>>> mapdl = launch_mapdl(memory=16, nproc=8)
"""
# attempt to connect to the remote scheduler
check_manager()
# check additional_switches args
if "-m " in additional_switches:
raise ValueError(
'Memory option "-m" not permitted when launching from the '
"kubernetes cluster and is set with the ``memory`` parameter"
)
if "-np " in additional_switches:
raise ValueError(
'CPU option "-np" not permitted when launching from the '
"kubernetes cluster and is set with the ``nproc`` parameter"
)
# check resources
nproc = int(nproc)
if nproc < 0:
raise ValueError("Requested CPUs ``nproc`` must be greater than 0")
if nproc > MAX_CPU:
raise ValueError(f"Requested CPUs ``nproc`` must be less than {MAX_CPU}")
if memory < 0.25:
raise ValueError("Requested memory ``mem`` must be greater than 0.25")
if memory > MAX_MEM:
raise ValueError(f"Requested memory ``mem`` must be less than than {MAX_MEM}")
# # convert memory from GB to Mi
memory *= 1024
if "-smp" in additional_switches:
raise ValueError(
'Ignoring additional switch "-smp". Incompatible with docker container.'
)
additional_switches += f"-m -{memory} -np {nproc}"
args = additional_switches.split()
ip, name = manager.spawn_mapdl(version=tag, args=args, verbose=verbose)
# connect to the pod instance
from ansys.mapdl.core import Mapdl
return Mapdl(ip, loglevel=loglevel)
|
def launch_mapdl_on_cluster(
nproc=2,
memory=4,
loglevel="ERROR",
additional_switches="",
verbose=False,
start_timeout=600,
tag="latest",
**kwargs,
):
"""Start MAPDL on the ANSYS jupyter cluster in gRPC mode.
Parameters
----------
nproc : int, optional
Number of processors. Defaults to 2.
memory : float, optional
Fixed amount of memory to request for MAPDL in Gigabytes. If
the mapdl instance requires more ram than your provide MAPDL
may segfault.
loglevel : str, optional
Sets which messages are printed to the console. Default
``'INFO'`` logs out all MAPDL messages, ``'WARNING``` prints only
messages containing MAPDL warnings, and ``'ERROR'`` prints only
error messages.
additional_switches : str, optional
Additional switches for MAPDL, for example ``"-p aa_r"``, the
academic research license, would be added with:
- ``additional_switches="-p aa_r"``
Avoid adding switches like ``"-i"`` ``"-o"`` or ``"-b"`` as
these are already included to start up the MAPDL server. See
the notes section for additional details.
start_timeout : float, optional
Maximum allowable time to connect to the MAPDL server.
tag : str, optional
Docker image tag from `PyAnsys MAPDL Image
<https://github.com/orgs/pyansys/packages/container/package/pymapdl%2Fmapdl>`. Defaults
to ``"latest"``. For example "v22.1.0".
Returns
-------
MapdlGrpc
MAPDL instance.
Examples
--------
Launch MAPDL using the default configuration.
>>> from ansys.mapdl import launch_mapdl
>>> mapdl = launch_mapdl()
Launch MAPDL and guarantee 16 GB minimum RAM and 8 CPUs.
>>> mapdl = launch_mapdl(memory=16, nproc=8)
"""
# attempt to connect to the remote scheduler
check_manager()
# check additional_switches args
if "-m " in additional_switches:
raise ValueError(
'Memory option "-m" not permitted when launching from the '
"kubernetes cluster and is set with the ``memory`` parameter"
)
if "-np " in additional_switches:
raise ValueError(
'CPU option "-np" not permitted when launching from the '
"kubernetes cluster and is set with the ``nproc`` parameter"
)
# check resources
nproc = int(nproc)
if nproc < 0:
raise ValueError("Requested CPUs ``nproc`` must be greater than 0")
if nproc > MAX_CPU:
raise ValueError(f"Requested CPUs ``nproc`` must be less than {MAX_CPU}")
if memory < 0.25:
raise ValueError("Requested memory ``mem`` must be greater than 0.25")
if memory > MAX_MEM:
raise ValueError(f"Requested memory ``mem`` must be less than than {MAX_MEM}")
# # convert memory from GB to Mi
memory *= 1024
if "-smp" in additional_switches:
raise ValueError(
'The additional switch "-smp" is incompatible with docker containers.'
)
additional_switches += f"-m -{memory} -np {nproc}"
args = additional_switches.split()
ip, name = manager.spawn_mapdl(version=tag, args=args, verbose=verbose)
# connect to the pod instance
from ansys.mapdl.core import Mapdl
return Mapdl(ip, loglevel=loglevel)
|
7,140 |
def forward_energy(image, mode, mask=None):
"""Find the edge magnitude using forward energy for seam carving.
Depending on the direction in `mode`, determines the magnitude of
each pixel based on the new edges created after removing a seam
containing that pixel.
Parameters
----------
image : 2-D array
Image to process.
mode : str {'horizontal', 'vertical'}
Indicates whether seams are to be removed vertically or horizontally.
Removing seams horizontally will decrease the height whereas removing
vertically will decrease the width.
mask : 2-D array, optional
An optional mask to limit the application to a certain area.
Note that pixels surrounding masked regions are also masked to
prevent masked regions from affecting the result.
Returns
-------
output : 2-D array
The forward energy edge map.
References
----------
.. [1] Michael Rubinstein, Ariel Shamir, and Shai Avidan
"Improved Seam Carving for Video Retargeting"
http://www.faculty.idc.ac.il/arik/SCWeb/vidret/index.html
"""
assert_nD(image, 2)
image = img_as_float(image)
if mode == 'horizontal':
image = np.swapaxes(image, 0, 1)
height = image.shape[0]
width = image.shape[1]
energy = np.zeros((height, width))
m = np.zeros((height, width))
U = np.roll(image, 1, axis=0)
L = np.roll(image, 1, axis=1)
R = np.roll(image, -1, axis=1)
cU = np.abs(R - L)
cL = np.abs(U - L) + cU
cR = np.abs(U - R) + cU
for i in range(1, height):
mU = m[i - 1]
mL = np.roll(mU, 1)
mR = np.roll(mU, -1)
mULR = np.array([mU, mL, mR])
cULR = np.array([cU[i], cL[i], cR[i]])
mULR += cULR
argmins = np.argmin(mULR, axis=0)
m[i] = np.choose(argmins, mULR)
energy[i] = np.choose(argmins, cULR)
if mode == 'horizontal':
energy = np.swapaxes(energy, 0, 1)
return _mask_filter_result(energy, mask)
|
def forward_energy(image, mode, mask=None):
"""Find the edge magnitude using forward energy for seam carving.
Depending on the direction in `mode`, determines the magnitude of
each pixel based on the new edges created after removing a seam
containing that pixel.
Parameters
----------
image : 2-D array
Image to process.
mode : str {'horizontal', 'vertical'}
Indicates whether seams are to be removed vertically or horizontally.
Removing seams horizontally will decrease the height whereas removing
vertically will decrease the width.
mask : 2-D array, optional
An optional mask to limit the application to a certain area.
Note that pixels surrounding masked regions are also masked to
prevent masked regions from affecting the result.
Returns
-------
output : 2-D array
The forward energy edge map.
References
----------
.. [1] Michael Rubinstein, Ariel Shamir, and Shai Avidan
"Improved Seam Carving for Video Retargeting"
http://www.faculty.idc.ac.il/arik/SCWeb/vidret/index.html
"""
assert_nD(image, 2)
image = img_as_float(image)
if mode == 'horizontal':
image = np.swapaxes(image, 0, 1)
height, width = image.shape
width = image.shape[1]
energy = np.zeros((height, width))
m = np.zeros((height, width))
U = np.roll(image, 1, axis=0)
L = np.roll(image, 1, axis=1)
R = np.roll(image, -1, axis=1)
cU = np.abs(R - L)
cL = np.abs(U - L) + cU
cR = np.abs(U - R) + cU
for i in range(1, height):
mU = m[i - 1]
mL = np.roll(mU, 1)
mR = np.roll(mU, -1)
mULR = np.array([mU, mL, mR])
cULR = np.array([cU[i], cL[i], cR[i]])
mULR += cULR
argmins = np.argmin(mULR, axis=0)
m[i] = np.choose(argmins, mULR)
energy[i] = np.choose(argmins, cULR)
if mode == 'horizontal':
energy = np.swapaxes(energy, 0, 1)
return _mask_filter_result(energy, mask)
|
46,005 |
def integral(input: torch.Tensor) -> torch.Tensor:
r"""Calculates integral of the input tensor.
Args:
image: the input tensor with shape :math:`(B,C,H,W)` with shape :math:`(B,C,H,W)`.
Returns:
Integral tensor for the input tensor
Examples:
>>> input = torch.randn(2,2,5,5)
>>> output = integral(input)
>>> output.shape
torch.Size([2, 2, 5, 5])
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
S = torch.cumsum(input, dim=-1)
S = torch.cumsum(S, dim=-2)
return S
|
def integral(input: torch.Tensor) -> torch.Tensor:
r"""Calculates the integral image tensor summed area table .
Args:
image: the input tensor with shape :math:`(B,C,H,W)` with shape :math:`(B,C,H,W)`.
Returns:
Integral tensor for the input tensor
Examples:
>>> input = torch.randn(2,2,5,5)
>>> output = integral(input)
>>> output.shape
torch.Size([2, 2, 5, 5])
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
S = torch.cumsum(input, dim=-1)
S = torch.cumsum(S, dim=-2)
return S
|
6,196 |
def githubSetup(GITHUBTOKEN):
"""Import the GITHUB Token and add proper header."""
LOGGER.info('Setting up GITHUB')
if not GITHUBTOKEN:
try:
from GitTokens import GITHUBTOKEN
except ImportError:
raise ImportError(G_ERROR)
if GITHUBTOKEN:
SESSION.headers.update({'Accept': 'application/vnd.github.v3+json',
'Authorization': 'token %s ' % GITHUBTOKEN})
|
def githubSetup(GITHUBTOKEN=''):
"""Import the GITHUB Token and add proper header."""
LOGGER.info('Setting up GITHUB')
if not GITHUBTOKEN:
try:
from GitTokens import GITHUBTOKEN
except ImportError:
raise ImportError(G_ERROR)
if GITHUBTOKEN:
SESSION.headers.update({'Accept': 'application/vnd.github.v3+json',
'Authorization': 'token %s ' % GITHUBTOKEN})
|
36,353 |
def _flat_divmod(na, da, nb, db):
"""(a // b, a % b)"""
# div = a // b
n_div, d_div = na * db, da * nb
div = n_div // d_div
# mod = a - b * div == (na*db - da*nb * div) / (da*db)
n_mod, d_mod = n_div - d_div * div, da*db
return div, n_mod, d_mod
|
def _flat_divmod(na, da, nb, db):
"""(a // b, a % b)"""
# div = a // b
n_div, d_div = na * db, da * nb
div = n_div // d_div
# mod = a - b * div == (na*db - da*nb * div) / (da*db)
n_mod, d_mod = n_div - d_div * div, da * db
return div, n_mod, d_mod
|
11,071 |
def filter_tests_by_tags(suite, tags, exclude_tags):
suite_class = type(suite)
return suite_class(
test for test in iter_test_cases(suite)
if does_test_match_tags(test, tags, exclude_tags)
)
|
def filter_tests_by_tags(suite, tags, exclude_tags):
suite_class = type(suite)
return suite_class(
test for test in iter_test_cases(suite)
if test_match_tags(test, tags, exclude_tags)
)
|
31,082 |
def extract_list_of_events_from_indicator(indicator_data: Dict[str, Any]) -> Dict[str, Any]:
list_of_events = indicator_data.get("CustomFields", {}).get('chronicleassetsummary', [])
number_of_events = {'GENERIC_EVENT': 0, 'NETWORK_HTTP': 0, 'NETWORK_CONNECTION': 0, 'USER_LOGIN': 0, 'OTHERS': 0}
if list_of_events:
for event in list_of_events:
if event.get('eventtype') in number_of_events.keys():
number_of_events[event.get('eventtype', 0)] += 1
else:
number_of_events['OTHERS'] += 1
return create_pie(number_of_events)
|
def extract_list_of_events_from_indicator(indicator_data: Dict[str, Any]) -> Dict[str, Any]:
list_of_events = indicator_data.get("CustomFields", {}).get('chronicleassetsummary', [])
number_of_events = {'GENERIC_EVENT': 0, 'NETWORK_HTTP': 0, 'NETWORK_CONNECTION': 0, 'USER_LOGIN': 0, 'OTHERS': 0}
if list_of_events:
for event in list_of_events:
if event.get('eventtype') in number_of_events:
number_of_events[event.get('eventtype', 0)] += 1
else:
number_of_events['OTHERS'] += 1
return create_pie(number_of_events)
|
59,049 |
def test_msg_data_bounds_check(get_contract):
code = """
@external
def foo(bar: uint256) -> Bytes[4]:
data: Bytes[4] = msg.data[4] # msg.data.size > 4 (max_len)
return data
"""
contract = get_contract(code)
with pytest.raises(TransactionFailed):
contract.foo(42)
|
def test_msg_data_bounds_check(get_contract):
code = """
@external
def foo(bar: uint256) -> Bytes[36]:
data: Bytes[36] = slice(msg.data, 0, 36)
return data
"""
contract = get_contract(code)
contract.foo(42).hex() == "c2985578000000000000000000000000000000000000000000000000000000000000002a" # fn sig + abi.encode(42)
|
41,373 |
def test_costml(signal_bkps_5D_noisy, signal_bkps_1D_noisy):
"""Test if `CostMl.fit` actually (re-)fits the metric matrix.
Refitting the metric matrix should only happen if the user did not
provide a metric matrix.
"""
# no user-defined metric matrix
c = CostMl()
for (signal, bkps) in (signal_bkps_5D_noisy, signal_bkps_1D_noisy):
c.fit(signal=signal)
c.error(0, 100)
c.sum_of_costs(bkps)
# with a user-defined metric matric
signal, bkps = signal_bkps_5D_noisy
_, n_dims = signal.shape
c = CostMl(metric=np.eye(n_dims))
c.fit(signal)
c.error(10, 50)
assert np.allclose(c.metric, np.eye(n_dims))
|
def test_costml(signal_bkps_5D_noisy, signal_bkps_1D_noisy):
"""Test if `CostMl.fit` actually (re-)fits the metric matrix.
Refitting the metric matrix should only happen if the user did not
provide a metric matrix.
"""
# no user-defined metric matrix
c = CostMl()
for (signal, bkps) in (signal_bkps_5D_noisy, signal_bkps_1D_noisy):
c.fit(signal=signal)
c.error(0, 100)
c.sum_of_costs(bkps)
# with a user-defined metric matrix
signal, bkps = signal_bkps_5D_noisy
_, n_dims = signal.shape
c = CostMl(metric=np.eye(n_dims))
c.fit(signal)
c.error(10, 50)
assert np.allclose(c.metric, np.eye(n_dims))
|
37,817 |
def execute_cmd(
docker: DockerContainer,
cmd_str: str,
before_build: bool,
target_arch: str,
env: Optional[Dict[str, str]] = None,
) -> None:
invalid_cmd = False
pip_install_env_create = True
tmpdirpath = ""
assert env is not None
target_arch_env = TargetArchEnvUtil(env.get("CROSS_ROOT"), target_arch)
cmds = [cmd.strip().replace("\t", " ") for cmd in cmd_str.split("&&")]
# Copy install_deps.sh script from container's tmp to host machine tmp and use it
if not os.path.isfile(target_arch_env.tmp + "/install_deps.sh"):
docker.call(
[
"cp",
target_arch_env.tmp + "/install_deps.sh",
target_arch_env.host_machine_tmp_in_container,
]
)
for cmd in cmds:
if cmd.startswith("yum "):
# Install the dependencies into the emulated docker container and
# Copy back the installed files into host machine
print(
"\nRunning cmd: '"
+ cmd
+ "' in target's native container '"
+ native_docker_images[target_arch]
+ "' and copy the artifacts into the toolchain\n"
)
subprocess.run(
[
"docker",
"run",
"--rm",
"--volume=/:/host", # ignored on CircleCI
native_docker_images[target_arch],
"bash",
"-c",
target_arch_env.host_machine_tmp_in_container
+ '/install_deps.sh "'
+ cmd
+ '"',
],
check=True,
)
# The instaleld dependencies are in /tmp/install_deps on host machine.
# Copy them into the toolchain
dir_list = os.listdir(target_arch_env.host_machine_deps_usr_out_container)
for dir in dir_list:
docker.call(
[
"cp",
"-rf",
target_arch_env.host_machine_deps_usr_in_container + "/" + dir,
target_arch_env.toolchain_deps,
]
)
elif cmd.startswith("pip ") or cmd.startswith("python ") or cmd.startswith("python3 "):
if pip_install_env_create is True and before_build is True:
tmpdirpath = docker.call(["mktemp", "-d"], capture_output=True).strip()
# Adding temp directory in PATH
env_path_var = env.get("PATH")
env_path_var = f"{tmpdirpath}:{env_path_var}"
temp_dict = {"PATH": env_path_var}
env.update(temp_dict)
build_pip = docker.call(
["which", "build-pip"], env=env, capture_output=True
).strip()
build_pybin = build_pip[: build_pip.rindex("/")]
docker.call(["ln", "-s", build_pip, tmpdirpath + "/pip"], env=env)
docker.call(
["ln", "-s", build_pybin + "/build-pip3", tmpdirpath + "/pip3"], env=env
)
docker.call(
["ln", "-s", build_pybin + "/build-python", tmpdirpath + "/python"], env=env
)
docker.call(
["ln", "-s", build_pybin + "/build-python3", tmpdirpath + "/python3"], env=env
)
pip_install_env_create = False
docker.call(["sh", "-c", cmd], env=env)
else:
print(
"During cross compilation, in wheel build phase, only pip/python/yum related commands are allowed"
)
invalid_cmd = True
break
docker.call(["rm", "-rf", tmpdirpath])
if invalid_cmd is True:
sys.exit(1)
|
def execute_cmd(
docker: DockerContainer,
cmd_str: str,
before_build: bool,
target_arch: str,
env: Optional[Dict[str, str]] = None,
) -> None:
invalid_cmd = False
pip_install_env_create = True
tmpdirpath = ""
assert env is not None
target_arch_env = TargetArchEnvUtil(env.get("CROSS_ROOT"), target_arch)
cmds = [cmd.strip().replace("\t", " ") for cmd in cmd_str.split("&&")]
# Copy install_deps.sh script from container's tmp to host machine tmp and use it
if not os.path.isfile(target_arch_env.tmp + "/install_deps.sh"):
docker.call(
[
"cp",
f"{target_arch_env.tmp}/install_deps.sh",
target_arch_env.host_machine_tmp_in_container,
]
)
for cmd in cmds:
if cmd.startswith("yum "):
# Install the dependencies into the emulated docker container and
# Copy back the installed files into host machine
print(
"\nRunning cmd: '"
+ cmd
+ "' in target's native container '"
+ native_docker_images[target_arch]
+ "' and copy the artifacts into the toolchain\n"
)
subprocess.run(
[
"docker",
"run",
"--rm",
"--volume=/:/host", # ignored on CircleCI
native_docker_images[target_arch],
"bash",
"-c",
target_arch_env.host_machine_tmp_in_container
+ '/install_deps.sh "'
+ cmd
+ '"',
],
check=True,
)
# The instaleld dependencies are in /tmp/install_deps on host machine.
# Copy them into the toolchain
dir_list = os.listdir(target_arch_env.host_machine_deps_usr_out_container)
for dir in dir_list:
docker.call(
[
"cp",
"-rf",
target_arch_env.host_machine_deps_usr_in_container + "/" + dir,
target_arch_env.toolchain_deps,
]
)
elif cmd.startswith("pip ") or cmd.startswith("python ") or cmd.startswith("python3 "):
if pip_install_env_create is True and before_build is True:
tmpdirpath = docker.call(["mktemp", "-d"], capture_output=True).strip()
# Adding temp directory in PATH
env_path_var = env.get("PATH")
env_path_var = f"{tmpdirpath}:{env_path_var}"
temp_dict = {"PATH": env_path_var}
env.update(temp_dict)
build_pip = docker.call(
["which", "build-pip"], env=env, capture_output=True
).strip()
build_pybin = build_pip[: build_pip.rindex("/")]
docker.call(["ln", "-s", build_pip, tmpdirpath + "/pip"], env=env)
docker.call(
["ln", "-s", build_pybin + "/build-pip3", tmpdirpath + "/pip3"], env=env
)
docker.call(
["ln", "-s", build_pybin + "/build-python", tmpdirpath + "/python"], env=env
)
docker.call(
["ln", "-s", build_pybin + "/build-python3", tmpdirpath + "/python3"], env=env
)
pip_install_env_create = False
docker.call(["sh", "-c", cmd], env=env)
else:
print(
"During cross compilation, in wheel build phase, only pip/python/yum related commands are allowed"
)
invalid_cmd = True
break
docker.call(["rm", "-rf", tmpdirpath])
if invalid_cmd is True:
sys.exit(1)
|
35,390 |
def string_to_uidgid(userandgroup):
""" Translate the userandgroup string to uid and gid.
The userandgroup parameter must be a string of the format '<user>[:<group>]'.
User and group can be strings or integers. If no group is given, -1 will be
returned for gid.
"""
params = userandgroup.split(':')
if len(params) > 2:
return None, None, "User and group '%s' are in wrong format. Expected <user>[:group]" % userandgroup
gid = -1
if len(params) == 2:
if params[1].isnumeric():
gid = int(params[1])
else:
try:
gr = grp.getgrnam(params[1])
gid = gr.gr_gid
except Exception as e:
return None, None, 'Could not resolve group %s: %s' % (params[1], str(e))
if params[0].isnumeric():
uid = int(params[0])
else:
try:
passwd = pwd.getpwnam(params[0])
uid = passwd.pw_uid
except Exception as e:
return None, None, 'Could not resolve user %s: %s' % (params[0], str(e))
return uid, gid, None
|
def string_to_uidgid(userandgroup):
""" Translate the userandgroup string to uid and gid.
The userandgroup parameter must be a string of the format '<user>[:<group>]'.
User and group can be strings or integers. If no group is given, -1 will be
returned for gid.
"""
params = userandgroup.split(':')
if len(params) > 2:
return None, None, "User and group '%s' are in wrong format. Expected <user>[:group]" % userandgroup
gid = -1
if len(params) == 2:
if params[1].isnumeric():
gid = int(params[1])
else:
try:
gr = grp.getgrnam(params[1])
gid = gr.gr_gid
except KeyError as e:
return None, None, 'Could not resolve group %s: %s' % (params[1], str(e))
if params[0].isnumeric():
uid = int(params[0])
else:
try:
passwd = pwd.getpwnam(params[0])
uid = passwd.pw_uid
except Exception as e:
return None, None, 'Could not resolve user %s: %s' % (params[0], str(e))
return uid, gid, None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.