hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7949686514a6610ed1a7c04a8e2dcf9fe9e28b80
| 1,646 |
py
|
Python
|
samples/generated_samples/cloudprivatecatalog_generated_privatecatalog_v1beta1_private_catalog_search_versions_sync.py
|
renovate-bot/python-private-catalog
|
2f48fd9aa925f29d382265115aac8faa77786e54
|
[
"Apache-2.0"
] | 7 |
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
samples/generated_samples/cloudprivatecatalog_generated_privatecatalog_v1beta1_private_catalog_search_versions_sync.py
|
renovate-bot/python-private-catalog
|
2f48fd9aa925f29d382265115aac8faa77786e54
|
[
"Apache-2.0"
] | 43 |
2021-06-03T01:32:48.000Z
|
2022-03-07T17:02:04.000Z
|
google/cloud/privatecatalog/v1beta1/privatecatalog-v1beta1-py/samples/generated_samples/cloudprivatecatalog_generated_privatecatalog_v1beta1_private_catalog_search_versions_sync.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4 |
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for SearchVersions
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-private-catalog
# [START cloudprivatecatalog_generated_privatecatalog_v1beta1_PrivateCatalog_SearchVersions_sync]
from google.cloud import privatecatalog_v1beta1
def sample_search_versions():
"""Snippet for search_versions"""
# Create a client
client = privatecatalog_v1beta1.PrivateCatalogClient()
# Initialize request argument(s)
request = privatecatalog_v1beta1.SearchVersionsRequest(
resource="resource_value",
query="query_value",
)
# Make the request
page_result = client.search_versions(request=request)
for response in page_result:
print(response)
# [END cloudprivatecatalog_generated_privatecatalog_v1beta1_PrivateCatalog_SearchVersions_sync]
| 34.291667 | 97 | 0.769137 |
794968f1d36bfbab95f8a78c3d6487365c36a058
| 357 |
py
|
Python
|
pose_classification_kit/config.py
|
huangshunliang/pose-classification-kit
|
a51ae6aeeb4185c1b70f4a3fa83484b65b524623
|
[
"MIT"
] | 12 |
2021-07-04T22:04:38.000Z
|
2022-03-25T05:25:26.000Z
|
pose_classification_kit/config.py
|
huangshunliang/pose-classification-kit
|
a51ae6aeeb4185c1b70f4a3fa83484b65b524623
|
[
"MIT"
] | 1 |
2022-03-07T07:36:40.000Z
|
2022-03-08T06:58:00.000Z
|
pose_classification_kit/config.py
|
huangshunliang/pose-classification-kit
|
a51ae6aeeb4185c1b70f4a3fa83484b65b524623
|
[
"MIT"
] | null | null | null |
import pathlib
# Path to OpenPose installation folder on your system.
OPENPOSE_PATH = pathlib.Path("C:/") / "Program files" / "OpenPose"
# Path to model folder.
MODELS_PATH = pathlib.Path(".").resolve() / "pose_classification_kit" / "models"
# Path to datasets folder.
DATASETS_PATH = pathlib.Path(".").resolve() / "pose_classification_kit" / "datasets"
| 32.454545 | 84 | 0.72549 |
794968ff990d094ea758a101ab34fa6c5171c33b
| 4,347 |
py
|
Python
|
cytopy/tests/test_geometry.py
|
JANHMS/CytoPy
|
8537d707fa25645b55b4ec1e25fff9f19847fb1b
|
[
"MIT"
] | 41 |
2020-04-08T11:01:28.000Z
|
2022-03-11T17:17:18.000Z
|
cytopy/tests/test_geometry.py
|
JANHMS/CytoPy
|
8537d707fa25645b55b4ec1e25fff9f19847fb1b
|
[
"MIT"
] | 27 |
2020-04-07T14:59:24.000Z
|
2022-03-01T20:43:34.000Z
|
cytopy/tests/test_geometry.py
|
JANHMS/CytoPy
|
8537d707fa25645b55b4ec1e25fff9f19847fb1b
|
[
"MIT"
] | 8 |
2020-04-28T15:16:24.000Z
|
2022-03-02T19:02:14.000Z
|
from cytopy.data.geometry import PopulationGeometry, ThresholdGeom, PolygonGeom, create_polygon, \
polygon_overlap, create_convex_hull, probablistic_ellipse, inside_ellipse
from shapely.geometry import Polygon
from sklearn.datasets import make_blobs
from sklearn.mixture import GaussianMixture
import numpy as np
import pytest
def test_create_geom():
kwargs = dict(x="X",
y="Y",
transform_x="logicle",
transform_y="logicle")
test = PopulationGeometry(**kwargs)
for k, v in kwargs.items():
assert test[k] == v
def test_create_threshold():
kwargs = dict(x="X",
y="Y",
transform_x="logicle",
transform_y="logicle",
x_threshold=4.344,
y_threshold=2.435)
test = ThresholdGeom(**kwargs)
for k, v in kwargs.items():
assert test[k] == v
def test_create_polygongeom():
kwargs = dict(x="X",
y="Y",
transform_x="logicle",
transform_y="logicle",
x_values=list(np.random.normal(0, 0.5, 1000)),
y_values=list(np.random.normal(0, 0.5, 1000)))
test = PolygonGeom(**kwargs)
for k, v in kwargs.items():
assert test[k] == v
def test_create_polygon():
x = [2, 6, 9, 10, 2]
y = [5, 19, 18, 10, 5]
poly = create_polygon(x, y)
assert isinstance(poly, Polygon)
assert np.array_equal(poly.exterior.xy[0], np.array(x))
assert np.array_equal(poly.exterior.xy[1], np.array(y))
@pytest.mark.parametrize("poly1,poly2,expected",
[(np.array([[0, 4.], [10, 4.], [10, 8.2], [10, 8.2], [0, 8.2], [0, 4.]]),
np.array([[0, 4.], [5, 4.], [5, 8.2], [5, 8.2], [0, 8.2], [0, 4.]]),
0.5),
(np.array([[0, 4.], [10, 4.], [10, 8.2], [10, 8.2], [0, 4.]]),
np.array([[12, 4.], [15, 4.], [15, 8.2], [15, 8.2], [12, 4.]]),
0.0)])
def test_polygon_overlap(poly1, poly2, expected):
poly1, poly2 = Polygon(poly1), Polygon(poly2)
assert polygon_overlap(poly1, poly2) == expected
assert polygon_overlap(poly1, poly2, threshold=0.6) == 0.
def test_create_convex_hull():
test_data = make_blobs(n_samples=1000,
n_features=2,
centers=1,
center_box=(0, 5),
random_state=42)[0]
x, y = test_data[:, 0], test_data[:, 1]
hull = create_convex_hull(x, y)
assert isinstance(hull[0], list)
assert isinstance(hull[1], list)
for idx, t in enumerate([x, y]):
lower = np.quantile(t, 0.05)
upper = np.quantile(t, 0.95)
t_ = [i for i in t if lower < i < upper]
for i in range(100):
s = np.random.choice(t_, 1)[0]
assert s >= np.min(hull[idx])
assert s <= np.max(hull[idx])
@pytest.mark.parametrize("conf", [0.95, 0.8, 0.5])
def test_probablistic_ellipse(conf):
test_data = make_blobs(n_samples=1000,
n_features=2,
centers=1,
center_box=(1, 5),
random_state=42)[0]
model = GaussianMixture(random_state=42, n_components=1)
model.fit(test_data)
center = model.means_[0]
width, height, angle = probablistic_ellipse(model.covariances_[0], conf)
mask = inside_ellipse(test_data, center=center, width=width, height=height, angle=angle)
assert test_data[mask].shape[0] / test_data.shape[0] == pytest.approx(conf, 0.1)
@pytest.mark.parametrize("test_data,expected_mask",
[(np.array([[3, 4.5], [7.5, 9]]), [True, False]),
(np.array([[3, 4.5], [0, 0]]), [True, False]),
(np.array([[11, 5], [6.2, 4.3]]), [False, True])])
def test_inside_ellipse(test_data, expected_mask):
center, width, height, angle = (5, 5), 10, 5, 15
mask = inside_ellipse(data=test_data,
center=center,
width=width,
height=height,
angle=angle)
assert isinstance(mask, list)
assert np.array_equal(mask, expected_mask)
| 37.8 | 98 | 0.529791 |
794969ae4aaa75d23653a7ccba86423bae0926a6
| 4,830 |
py
|
Python
|
hpOneView/resources/networking/fabrics.py
|
doziya/hpeOneView
|
ef9bee2a0e1529e93bd6e8d84eff07fb8533049d
|
[
"MIT"
] | 107 |
2015-02-16T12:40:36.000Z
|
2022-03-09T05:27:58.000Z
|
hpOneView/resources/networking/fabrics.py
|
doziya/hpeOneView
|
ef9bee2a0e1529e93bd6e8d84eff07fb8533049d
|
[
"MIT"
] | 148 |
2015-03-17T16:09:39.000Z
|
2020-02-09T16:28:06.000Z
|
hpOneView/resources/networking/fabrics.py
|
doziya/hpeOneView
|
ef9bee2a0e1529e93bd6e8d84eff07fb8533049d
|
[
"MIT"
] | 80 |
2015-01-03T22:58:53.000Z
|
2021-04-16T11:37:03.000Z
|
# -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2017) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from hpOneView.resources.resource import ResourceClient
class Fabrics(object):
"""
Fabrics API client.
"""
URI = '/rest/fabrics'
DEFAULT_VALUES = {
'300': {'type': 'vlan-pool'},
'500': {'type': 'vlan-pool'}
}
def __init__(self, con):
self._connection = con
self._client = ResourceClient(con, self.URI)
def get_all(self, start=0, count=-1, filter='', sort=''):
"""
Gets a paginated collection of all fabrics based on the specified parameters.
Filters can be used in the URL to control the number of fabrics that are returned.
With no filters specified, the API returns all supported fabrics.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
filter (list or str):
A general filter/query string to narrow the list of items returned. The
default is no filter; all resources are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time with the oldest entry first.
Returns:
list: A list of fabrics.
"""
return self._client.get_all(start, count, filter=filter, sort=sort)
def get(self, id_or_uri):
"""
Gets the fabric with the specified ID.
Args:
id_or_uri: ID or URI of fabric.
Returns:
dict: The fabric.
"""
return self._client.get(id_or_uri)
def get_by(self, field, value):
"""
Gets all fabrics that match the filter.
The search is case-insensitive.
Args:
field: Field name to filter.
value: Value to filter.
Returns:
list: A list of fabrics.
"""
return self._client.get_by(field, value)
def get_reserved_vlan_range(self, id_or_uri):
"""
Gets the reserved vlan ID range for the fabric.
Note:
This method is only available on HPE Synergy.
Args:
id_or_uri: ID or URI of fabric.
Returns:
dict: vlan-pool
"""
uri = self._client.build_uri(id_or_uri) + "/reserved-vlan-range"
return self._client.get(uri)
def update_reserved_vlan_range(self, id_or_uri, vlan_pool, force=False):
"""
Updates the reserved vlan ID range for the fabric.
Note:
This method is only available on HPE Synergy.
Args:
id_or_uri: ID or URI of fabric.
vlan_pool (dict): vlan-pool data to update.
force: If set to true, the operation completes despite any problems with network connectivity or errors
on the resource itself. The default is false.
Returns:
dict: The fabric
"""
uri = self._client.build_uri(id_or_uri) + "/reserved-vlan-range"
return self._client.update(resource=vlan_pool, uri=uri, force=force, default_values=self.DEFAULT_VALUES)
| 34.255319 | 116 | 0.646584 |
79496a4e0cc9d5c6aeb343e9f2c5d32c0368939a
| 200 |
py
|
Python
|
Chapter10/07_leds.py
|
PacktPublishing/MicroPython-Cookbook
|
ffd6aa15c303459570a89ba31b5bc734f05cb387
|
[
"MIT"
] | 16 |
2019-07-01T16:24:22.000Z
|
2022-03-03T06:54:57.000Z
|
Chapter10/07_leds.py
|
ccwu0918/MicroPython-Cookbook
|
ffd6aa15c303459570a89ba31b5bc734f05cb387
|
[
"MIT"
] | null | null | null |
Chapter10/07_leds.py
|
ccwu0918/MicroPython-Cookbook
|
ffd6aa15c303459570a89ba31b5bc734f05cb387
|
[
"MIT"
] | 19 |
2019-04-17T08:30:12.000Z
|
2022-01-14T03:05:37.000Z
|
from machine import Pin
import time
red = Pin(0, Pin.OUT)
blue = Pin(2, Pin.OUT)
while True:
blue.value(0)
red.value(1)
time.sleep(1)
blue.value(1)
red.value(0)
time.sleep(1)
| 15.384615 | 23 | 0.62 |
79496b3c98eb4a6cd7441368dd5306e5c4ab036c
| 2,537 |
py
|
Python
|
sys/sys-programbar.py
|
all3g/pieces
|
bc378fd22ddc700891fe7f34ab0d5b341141e434
|
[
"CNRI-Python"
] | 34 |
2016-10-31T02:05:24.000Z
|
2018-11-08T14:33:13.000Z
|
sys/sys-programbar.py
|
join-us/python-programming
|
bc378fd22ddc700891fe7f34ab0d5b341141e434
|
[
"CNRI-Python"
] | 2 |
2017-05-11T03:00:31.000Z
|
2017-11-01T23:37:37.000Z
|
sys/sys-programbar.py
|
join-us/python-programming
|
bc378fd22ddc700891fe7f34ab0d5b341141e434
|
[
"CNRI-Python"
] | 21 |
2016-08-19T09:05:45.000Z
|
2018-11-08T14:33:16.000Z
|
import sys
import time
class ProgressBar(object):
"""ProgressBae class holds the option of the progress bar.
The option are:
start State from which start the progress. For example,
if start is and the end is 10, the progress of this state
is 50%
end State in which the progress has terminated.
width --
fill String to use for "filled" used to represent the progress
blank String to use for "filled" used to represent remaining
format Format
"""
def __init__(self, start=0, end=10, width=12, fill='=', blank='.',
format='[%(fill)s>%(blank)s] %(progress)s%%',
incremental=True):
super(ProgressBar, self).__init__()
self.start = start
self.end = end
self.width = width
self.fill = fill
self.blank = blank
self.format = format
self.incremental = incremental
self.step = 100 / float(width)
self.reset()
def __add__(self, increment):
increment = self._get_progress(increment)
if 100 > self.progress + increment:
self.progress += increment
else:
self.progress = 100
return self
def __str__(self):
progressed = int(self.progress / self.step)
fill = progressed * self.fill
blank = (self.width-progressed) * self.blank
return self.format % {'fill': fill, 'blank': blank,
'progress': int(self.progress)}
__repr__ = __str__
def _get_progress(self, increment):
return float(increment * 100) / self.end
def reset(self):
"""Resets the current progress to the start point.
"""
self.progress = self._get_progress(self.start)
return self
class AnimatedProgressBar(ProgressBar):
def __init__(self, *args, **kwargs):
super(AnimatedProgressBar, self).__init__(*args, **kwargs)
self.stdout = kwargs.get('stdout', sys.stdout)
def show_progress(self):
if hasattr(self.stdout, 'isatty') and self.stdout.isatty():
self.stdout.write('\r')
else:
self.stdout.write('\n')
self.stdout.write(str(self))
self.stdout.flush()
if __name__ == "__main__":
p = AnimatedProgressBar(end=100, width=80)
while True:
p + 5
p.show_progress()
time.sleep(0.1)
if p.progress == 100:
break
| 30.939024 | 79 | 0.568782 |
79496c5400746bb5a00f761a2ebf840c3e33225f
| 619 |
py
|
Python
|
day-17/main.py
|
jmolinski/advent-of-code-2017
|
cc199d38540b15a0f6b78e609f64b0045174ea7c
|
[
"Unlicense"
] | null | null | null |
day-17/main.py
|
jmolinski/advent-of-code-2017
|
cc199d38540b15a0f6b78e609f64b0045174ea7c
|
[
"Unlicense"
] | null | null | null |
day-17/main.py
|
jmolinski/advent-of-code-2017
|
cc199d38540b15a0f6b78e609f64b0045174ea7c
|
[
"Unlicense"
] | null | null | null |
step = int(open('data.txt', 'r').read())
def track(n, rounds=1):
lst, pos = [0, 1], 1
for i in range(2, rounds):
pos = (pos + step + 1) % (i)
lst.insert(pos, i)
return lst[(lst.index(n) + 1) % len(lst)]
def track_after_zero(rounds=1):
pos, zero_pos, after_zero = 1, 0, 0
for i in range(2, rounds):
pos = (pos + step + 1) % (i)
if pos == zero_pos + 1:
after_zero = i
zero_pos += (zero_pos >= pos)
return after_zero
answer_part_1 = track(2017, 2018)
answer_part_2 = track_after_zero(50 * 1000 * 1000)
print(answer_part_1, answer_part_2)
| 22.925926 | 50 | 0.568659 |
79496cc111128fc8ed6b80f9dacbdfb615ce658a
| 21,885 |
py
|
Python
|
allennlp/models/constituency_parser.py
|
justindujardin/allennlp
|
c4559f3751775aa8bc018db417edc119d29d8051
|
[
"Apache-2.0"
] | 1 |
2020-09-19T07:09:27.000Z
|
2020-09-19T07:09:27.000Z
|
allennlp/models/constituency_parser.py
|
justindujardin/allennlp
|
c4559f3751775aa8bc018db417edc119d29d8051
|
[
"Apache-2.0"
] | null | null | null |
allennlp/models/constituency_parser.py
|
justindujardin/allennlp
|
c4559f3751775aa8bc018db417edc119d29d8051
|
[
"Apache-2.0"
] | 1 |
2021-02-04T08:42:23.000Z
|
2021-02-04T08:42:23.000Z
|
from typing import Dict, Tuple, List, NamedTuple, Any
from overrides import overrides
import torch
from torch.nn.modules.linear import Linear
from nltk import Tree
from allennlp.common.checks import check_dimensions_match
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder, FeedForward
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.span_extractors.span_extractor import SpanExtractor
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator
from allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits
from allennlp.nn.util import masked_softmax, get_lengths_from_binary_sequence_mask
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.training.metrics import EvalbBracketingScorer, DEFAULT_EVALB_DIR
from allennlp.common.checks import ConfigurationError
class SpanInformation(NamedTuple):
"""
A helper namedtuple for handling decoding information.
# Parameters
start : `int`
The start index of the span.
end : `int`
The exclusive end index of the span.
no_label_prob : `float`
The probability of this span being assigned the `NO-LABEL` label.
label_prob : `float`
The probability of the most likely label.
"""
start: int
end: int
label_prob: float
no_label_prob: float
label_index: int
@Model.register("constituency_parser")
class SpanConstituencyParser(Model):
"""
This `SpanConstituencyParser` simply encodes a sequence of text
with a stacked `Seq2SeqEncoder`, extracts span representations using a
`SpanExtractor`, and then predicts a label for each span in the sequence.
These labels are non-terminal nodes in a constituency parse tree, which we then
greedily reconstruct.
# Parameters
vocab : `Vocabulary`, required
A Vocabulary, required in order to compute sizes for input/output projections.
text_field_embedder : `TextFieldEmbedder`, required
Used to embed the `tokens` `TextField` we get as input to the model.
span_extractor : `SpanExtractor`, required.
The method used to extract the spans from the encoded sequence.
encoder : `Seq2SeqEncoder`, required.
The encoder that we will use in between embedding tokens and
generating span representations.
feedforward : `FeedForward`, required.
The FeedForward layer that we will use in between the encoder and the linear
projection to a distribution over span labels.
pos_tag_embedding : `Embedding`, optional.
Used to embed the `pos_tags` `SequenceLabelField` we get as input to the model.
initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`)
Used to initialize the model parameters.
evalb_directory_path : `str`, optional (default=`DEFAULT_EVALB_DIR`)
The path to the directory containing the EVALB executable used to score
bracketed parses. By default, will use the EVALB included with allennlp,
which is located at allennlp/tools/EVALB . If `None`, EVALB scoring
is not used.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
span_extractor: SpanExtractor,
encoder: Seq2SeqEncoder,
feedforward: FeedForward = None,
pos_tag_embedding: Embedding = None,
initializer: InitializerApplicator = InitializerApplicator(),
evalb_directory_path: str = DEFAULT_EVALB_DIR,
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
self.text_field_embedder = text_field_embedder
self.span_extractor = span_extractor
self.num_classes = self.vocab.get_vocab_size("labels")
self.encoder = encoder
self.feedforward_layer = TimeDistributed(feedforward) if feedforward else None
self.pos_tag_embedding = pos_tag_embedding or None
if feedforward is not None:
output_dim = feedforward.get_output_dim()
else:
output_dim = span_extractor.get_output_dim()
self.tag_projection_layer = TimeDistributed(Linear(output_dim, self.num_classes))
representation_dim = text_field_embedder.get_output_dim()
if pos_tag_embedding is not None:
representation_dim += pos_tag_embedding.get_output_dim()
check_dimensions_match(
representation_dim,
encoder.get_input_dim(),
"representation dim (tokens + optional POS tags)",
"encoder input dim",
)
check_dimensions_match(
encoder.get_output_dim(),
span_extractor.get_input_dim(),
"encoder input dim",
"span extractor input dim",
)
if feedforward is not None:
check_dimensions_match(
span_extractor.get_output_dim(),
feedforward.get_input_dim(),
"span extractor output dim",
"feedforward input dim",
)
self.tag_accuracy = CategoricalAccuracy()
if evalb_directory_path is not None:
self._evalb_score = EvalbBracketingScorer(evalb_directory_path)
else:
self._evalb_score = None
initializer(self)
@overrides
def forward(
self, # type: ignore
tokens: TextFieldTensors,
spans: torch.LongTensor,
metadata: List[Dict[str, Any]],
pos_tags: TextFieldTensors = None,
span_labels: torch.LongTensor = None,
) -> Dict[str, torch.Tensor]:
"""
# Parameters
tokens : TextFieldTensors, required
The output of `TextField.as_array()`, which should typically be passed directly to a
`TextFieldEmbedder`. This output is a dictionary mapping keys to `TokenIndexer`
tensors. At its most basic, using a `SingleIdTokenIndexer` this is : `{"tokens":
Tensor(batch_size, num_tokens)}`. This dictionary will have the same keys as were used
for the `TokenIndexers` when you created the `TextField` representing your
sequence. The dictionary is designed to be passed directly to a `TextFieldEmbedder`,
which knows how to combine different word representations into a single vector per
token in your input.
spans : `torch.LongTensor`, required.
A tensor of shape `(batch_size, num_spans, 2)` representing the
inclusive start and end indices of all possible spans in the sentence.
metadata : List[Dict[str, Any]], required.
A dictionary of metadata for each batch element which has keys:
tokens : `List[str]`, required.
The original string tokens in the sentence.
gold_tree : `nltk.Tree`, optional (default = None)
Gold NLTK trees for use in evaluation.
pos_tags : `List[str]`, optional.
The POS tags for the sentence. These can be used in the
model as embedded features, but they are passed here
in addition for use in constructing the tree.
pos_tags : `torch.LongTensor`, optional (default = None)
The output of a `SequenceLabelField` containing POS tags.
span_labels : `torch.LongTensor`, optional (default = None)
A torch tensor representing the integer gold class labels for all possible
spans, of shape `(batch_size, num_spans)`.
# Returns
An output dictionary consisting of:
class_probabilities : `torch.FloatTensor`
A tensor of shape `(batch_size, num_spans, span_label_vocab_size)`
representing a distribution over the label classes per span.
spans : `torch.LongTensor`
The original spans tensor.
tokens : `List[List[str]]`, required.
A list of tokens in the sentence for each element in the batch.
pos_tags : `List[List[str]]`, required.
A list of POS tags in the sentence for each element in the batch.
num_spans : `torch.LongTensor`, required.
A tensor of shape (batch_size), representing the lengths of non-padded spans
in `enumerated_spans`.
loss : `torch.FloatTensor`, optional
A scalar loss to be optimised.
"""
embedded_text_input = self.text_field_embedder(tokens)
if pos_tags is not None and self.pos_tag_embedding is not None:
embedded_pos_tags = self.pos_tag_embedding(pos_tags)
embedded_text_input = torch.cat([embedded_text_input, embedded_pos_tags], -1)
elif self.pos_tag_embedding is not None:
raise ConfigurationError("Model uses a POS embedding, but no POS tags were passed.")
mask = get_text_field_mask(tokens)
# Looking at the span start index is enough to know if
# this is padding or not. Shape: (batch_size, num_spans)
span_mask = (spans[:, :, 0] >= 0).squeeze(-1).long()
if span_mask.dim() == 1:
# This happens if you use batch_size 1 and encounter
# a length 1 sentence in PTB, which do exist. -.-
span_mask = span_mask.unsqueeze(-1)
if span_labels is not None and span_labels.dim() == 1:
span_labels = span_labels.unsqueeze(-1)
num_spans = get_lengths_from_binary_sequence_mask(span_mask)
encoded_text = self.encoder(embedded_text_input, mask)
span_representations = self.span_extractor(encoded_text, spans, mask, span_mask)
if self.feedforward_layer is not None:
span_representations = self.feedforward_layer(span_representations)
logits = self.tag_projection_layer(span_representations)
class_probabilities = masked_softmax(logits, span_mask.unsqueeze(-1))
output_dict = {
"class_probabilities": class_probabilities,
"spans": spans,
"tokens": [meta["tokens"] for meta in metadata],
"pos_tags": [meta.get("pos_tags") for meta in metadata],
"num_spans": num_spans,
}
if span_labels is not None:
loss = sequence_cross_entropy_with_logits(logits, span_labels, span_mask)
self.tag_accuracy(class_probabilities, span_labels, span_mask)
output_dict["loss"] = loss
# The evalb score is expensive to compute, so we only compute
# it for the validation and test sets.
batch_gold_trees = [meta.get("gold_tree") for meta in metadata]
if all(batch_gold_trees) and self._evalb_score is not None and not self.training:
gold_pos_tags: List[List[str]] = [
list(zip(*tree.pos()))[1] for tree in batch_gold_trees
]
predicted_trees = self.construct_trees(
class_probabilities.cpu().data,
spans.cpu().data,
num_spans.data,
output_dict["tokens"],
gold_pos_tags,
)
self._evalb_score(predicted_trees, batch_gold_trees)
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Constructs an NLTK `Tree` given the scored spans. We also switch to exclusive
span ends when constructing the tree representation, because it makes indexing
into lists cleaner for ranges of text, rather than individual indices.
Finally, for batch prediction, we will have padded spans and class probabilities.
In order to make this less confusing, we remove all the padded spans and
distributions from `spans` and `class_probabilities` respectively.
"""
all_predictions = output_dict["class_probabilities"].cpu().data
all_spans = output_dict["spans"].cpu().data
all_sentences = output_dict["tokens"]
all_pos_tags = output_dict["pos_tags"] if all(output_dict["pos_tags"]) else None
num_spans = output_dict["num_spans"].data
trees = self.construct_trees(
all_predictions, all_spans, num_spans, all_sentences, all_pos_tags
)
batch_size = all_predictions.size(0)
output_dict["spans"] = [all_spans[i, : num_spans[i]] for i in range(batch_size)]
output_dict["class_probabilities"] = [
all_predictions[i, : num_spans[i], :] for i in range(batch_size)
]
output_dict["trees"] = trees
return output_dict
def construct_trees(
self,
predictions: torch.FloatTensor,
all_spans: torch.LongTensor,
num_spans: torch.LongTensor,
sentences: List[List[str]],
pos_tags: List[List[str]] = None,
) -> List[Tree]:
"""
Construct `nltk.Tree`'s for each batch element by greedily nesting spans.
The trees use exclusive end indices, which contrasts with how spans are
represented in the rest of the model.
# Parameters
predictions : `torch.FloatTensor`, required.
A tensor of shape `(batch_size, num_spans, span_label_vocab_size)`
representing a distribution over the label classes per span.
all_spans : `torch.LongTensor`, required.
A tensor of shape (batch_size, num_spans, 2), representing the span
indices we scored.
num_spans : `torch.LongTensor`, required.
A tensor of shape (batch_size), representing the lengths of non-padded spans
in `enumerated_spans`.
sentences : `List[List[str]]`, required.
A list of tokens in the sentence for each element in the batch.
pos_tags : `List[List[str]]`, optional (default = None).
A list of POS tags for each word in the sentence for each element
in the batch.
# Returns
A `List[Tree]` containing the decoded trees for each element in the batch.
"""
# Switch to using exclusive end spans.
exclusive_end_spans = all_spans.clone()
exclusive_end_spans[:, :, -1] += 1
no_label_id = self.vocab.get_token_index("NO-LABEL", "labels")
trees: List[Tree] = []
for batch_index, (scored_spans, spans, sentence) in enumerate(
zip(predictions, exclusive_end_spans, sentences)
):
selected_spans = []
for prediction, span in zip(
scored_spans[: num_spans[batch_index]], spans[: num_spans[batch_index]]
):
start, end = span
no_label_prob = prediction[no_label_id]
label_prob, label_index = torch.max(prediction, -1)
# Does the span have a label != NO-LABEL or is it the root node?
# If so, include it in the spans that we consider.
if int(label_index) != no_label_id or (start == 0 and end == len(sentence)):
selected_spans.append(
SpanInformation(
start=int(start),
end=int(end),
label_prob=float(label_prob),
no_label_prob=float(no_label_prob),
label_index=int(label_index),
)
)
# The spans we've selected might overlap, which causes problems when we try
# to construct the tree as they won't nest properly.
consistent_spans = self.resolve_overlap_conflicts_greedily(selected_spans)
spans_to_labels = {
(span.start, span.end): self.vocab.get_token_from_index(span.label_index, "labels")
for span in consistent_spans
}
sentence_pos = pos_tags[batch_index] if pos_tags is not None else None
trees.append(self.construct_tree_from_spans(spans_to_labels, sentence, sentence_pos))
return trees
@staticmethod
def resolve_overlap_conflicts_greedily(spans: List[SpanInformation]) -> List[SpanInformation]:
"""
Given a set of spans, removes spans which overlap by evaluating the difference
in probability between one being labeled and the other explicitly having no label
and vice-versa. The worst case time complexity of this method is `O(k * n^4)` where `n`
is the length of the sentence that the spans were enumerated from (and therefore
`k * m^2` complexity with respect to the number of spans `m`) and `k` is the
number of conflicts. However, in practice, there are very few conflicts. Hopefully.
This function modifies `spans` to remove overlapping spans.
# Parameters
spans : `List[SpanInformation]`, required.
A list of spans, where each span is a `namedtuple` containing the
following attributes:
start : `int`
The start index of the span.
end : `int`
The exclusive end index of the span.
no_label_prob : `float`
The probability of this span being assigned the `NO-LABEL` label.
label_prob : `float`
The probability of the most likely label.
# Returns
A modified list of `spans`, with the conflicts resolved by considering local
differences between pairs of spans and removing one of the two spans.
"""
conflicts_exist = True
while conflicts_exist:
conflicts_exist = False
for span1_index, span1 in enumerate(spans):
for span2_index, span2 in list(enumerate(spans))[span1_index + 1 :]:
if (
span1.start < span2.start < span1.end < span2.end
or span2.start < span1.start < span2.end < span1.end
):
# The spans overlap.
conflicts_exist = True
# What's the more likely situation: that span2 was labeled
# and span1 was unlabled, or that span1 was labeled and span2
# was unlabled? In the first case, we delete span2 from the
# set of spans to form the tree - in the second case, we delete
# span1.
if (
span1.no_label_prob + span2.label_prob
< span2.no_label_prob + span1.label_prob
):
spans.pop(span2_index)
else:
spans.pop(span1_index)
break
return spans
@staticmethod
def construct_tree_from_spans(
spans_to_labels: Dict[Tuple[int, int], str], sentence: List[str], pos_tags: List[str] = None
) -> Tree:
"""
# Parameters
spans_to_labels : `Dict[Tuple[int, int], str]`, required.
A mapping from spans to constituency labels.
sentence : `List[str]`, required.
A list of tokens forming the sentence to be parsed.
pos_tags : `List[str]`, optional (default = None)
A list of the pos tags for the words in the sentence, if they
were either predicted or taken as input to the model.
# Returns
An `nltk.Tree` constructed from the labelled spans.
"""
def assemble_subtree(start: int, end: int):
if (start, end) in spans_to_labels:
# Some labels contain nested spans, e.g S-VP.
# We actually want to create (S (VP ...)) nodes
# for these labels, so we split them up here.
labels: List[str] = spans_to_labels[(start, end)].split("-")
else:
labels = None
# This node is a leaf.
if end - start == 1:
word = sentence[start]
pos_tag = pos_tags[start] if pos_tags is not None else "XX"
tree = Tree(pos_tag, [word])
if labels is not None and pos_tags is not None:
# If POS tags were passed explicitly,
# they are added as pre-terminal nodes.
while labels:
tree = Tree(labels.pop(), [tree])
elif labels is not None:
# Otherwise, we didn't want POS tags
# at all.
tree = Tree(labels.pop(), [word])
while labels:
tree = Tree(labels.pop(), [tree])
return [tree]
argmax_split = start + 1
# Find the next largest subspan such that
# the left hand side is a constituent.
for split in range(end - 1, start, -1):
if (start, split) in spans_to_labels:
argmax_split = split
break
left_trees = assemble_subtree(start, argmax_split)
right_trees = assemble_subtree(argmax_split, end)
children = left_trees + right_trees
if labels is not None:
while labels:
children = [Tree(labels.pop(), children)]
return children
tree = assemble_subtree(0, len(sentence))
return tree[0]
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
all_metrics = {}
all_metrics["tag_accuracy"] = self.tag_accuracy.get_metric(reset=reset)
if self._evalb_score is not None:
evalb_metrics = self._evalb_score.get_metric(reset=reset)
all_metrics.update(evalb_metrics)
return all_metrics
| 44.212121 | 100 | 0.620105 |
79496d0aeeb2135dd72012a161b5568fb44c7777
| 14,581 |
py
|
Python
|
desktop/core/ext-py/South-1.0.2/south/db/oracle.py
|
vinaymundada27/Hue
|
7bffb33bbe7cfa34d340241c4ba3b19476211b2a
|
[
"Apache-2.0"
] | 11 |
2019-03-20T07:38:35.000Z
|
2021-06-18T09:42:46.000Z
|
filenv/lib/python2.7/site-packages/south/db/oracle.py
|
betoesquivel/fil2014
|
4c2e9188769096391bb206b76ed1ab8bd2ff66a1
|
[
"MIT"
] | null | null | null |
filenv/lib/python2.7/site-packages/south/db/oracle.py
|
betoesquivel/fil2014
|
4c2e9188769096391bb206b76ed1ab8bd2ff66a1
|
[
"MIT"
] | 5 |
2019-06-29T03:13:02.000Z
|
2020-04-23T04:47:11.000Z
|
from __future__ import print_function
import os.path
import sys
import re
import warnings
import cx_Oracle
from django.db import connection, models
from django.db.backends.util import truncate_name
from django.core.management.color import no_style
from django.db.models.fields import NOT_PROVIDED
from django.db.utils import DatabaseError
# In revision r16016 function get_sequence_name has been transformed into
# method of DatabaseOperations class. To make code backward-compatible we
# need to handle both situations.
try:
from django.db.backends.oracle.base import get_sequence_name\
as original_get_sequence_name
except ImportError:
original_get_sequence_name = None
from south.db import generic
class DatabaseOperations(generic.DatabaseOperations):
"""
Oracle implementation of database operations.
"""
backend_name = 'oracle'
alter_string_set_type = 'ALTER TABLE %(table_name)s MODIFY %(column)s %(type)s %(nullity)s;'
alter_string_set_default = 'ALTER TABLE %(table_name)s MODIFY %(column)s DEFAULT %(default)s;'
alter_string_update_nulls_to_default = \
'UPDATE %(table_name)s SET %(column)s = %(default)s WHERE %(column)s IS NULL;'
add_column_string = 'ALTER TABLE %s ADD %s;'
delete_column_string = 'ALTER TABLE %s DROP COLUMN %s;'
add_constraint_string = 'ALTER TABLE %(table_name)s ADD CONSTRAINT %(constraint)s %(clause)s'
allows_combined_alters = False
has_booleans = False
constraints_dict = {
'P': 'PRIMARY KEY',
'U': 'UNIQUE',
'C': 'CHECK',
'R': 'FOREIGN KEY'
}
def get_sequence_name(self, table_name):
if original_get_sequence_name is None:
return self._get_connection().ops._get_sequence_name(table_name)
else:
return original_get_sequence_name(table_name)
#TODO: This will cause very obscure bugs if anyone uses a column name or string value
# that looks like a column definition (with 'CHECK', 'DEFAULT' and/or 'NULL' in it)
# e.g. "CHECK MATE" varchar(10) DEFAULT 'NULL'
def adj_column_sql(self, col):
# Syntax fixes -- Oracle is picky about clause order
col = re.sub('(?P<constr>CHECK \(.*\))(?P<any>.*)(?P<default>DEFAULT \d+)',
lambda mo: '%s %s%s'%(mo.group('default'), mo.group('constr'), mo.group('any')), col) #syntax fix for boolean/integer field only
col = re.sub('(?P<not_null>(NOT )?NULL) (?P<misc>(.* )?)(?P<default>DEFAULT.+)',
lambda mo: '%s %s %s'%(mo.group('default'),mo.group('not_null'),mo.group('misc') or ''), col) #fix order of NULL/NOT NULL and DEFAULT
return col
def check_meta(self, table_name):
return table_name in [ m._meta.db_table for m in models.get_models() ] #caching provided by Django
def normalize_name(self, name):
"""
Get the properly shortened and uppercased identifier as returned by quote_name(), but without the actual quotes.
"""
nn = self.quote_name(name)
if nn[0] == '"' and nn[-1] == '"':
nn = nn[1:-1]
return nn
@generic.invalidate_table_constraints
def create_table(self, table_name, fields):
qn = self.quote_name(table_name)
columns = []
autoinc_sql = ''
for field_name, field in fields:
field = self._field_sanity(field)
# avoid default values in CREATE TABLE statements (#925)
field._suppress_default = True
col = self.column_sql(table_name, field_name, field)
if not col:
continue
col = self.adj_column_sql(col)
columns.append(col)
if isinstance(field, models.AutoField):
autoinc_sql = connection.ops.autoinc_sql(table_name, field_name)
sql = 'CREATE TABLE %s (%s);' % (qn, ', '.join([col for col in columns]))
self.execute(sql)
if autoinc_sql:
self.execute(autoinc_sql[0])
self.execute(autoinc_sql[1])
@generic.invalidate_table_constraints
def delete_table(self, table_name, cascade=True):
qn = self.quote_name(table_name)
# Note: PURGE is not valid syntax for Oracle 9i (it was added in 10)
if cascade:
self.execute('DROP TABLE %s CASCADE CONSTRAINTS;' % qn)
else:
self.execute('DROP TABLE %s;' % qn)
# If the table has an AutoField a sequence was created.
sequence_sql = """
DECLARE
i INTEGER;
BEGIN
SELECT COUNT(*) INTO i FROM USER_CATALOG
WHERE TABLE_NAME = '%(sq_name)s' AND TABLE_TYPE = 'SEQUENCE';
IF i = 1 THEN
EXECUTE IMMEDIATE 'DROP SEQUENCE "%(sq_name)s"';
END IF;
END;
/""" % {'sq_name': self.get_sequence_name(table_name)}
self.execute(sequence_sql)
@generic.invalidate_table_constraints
def alter_column(self, table_name, name, field, explicit_name=True, ignore_constraints=False):
if self.dry_run:
if self.debug:
print(' - no dry run output for alter_column() due to dynamic DDL, sorry')
return
qn = self.quote_name(table_name)
# hook for the field to do any resolution prior to it's attributes being queried
if hasattr(field, 'south_init'):
field.south_init()
field = self._field_sanity(field)
# Add _id or whatever if we need to
field.set_attributes_from_name(name)
if not explicit_name:
name = field.column
qn_col = self.quote_name(name)
# First, change the type
# This will actually also add any CHECK constraints needed,
# since e.g. 'type' for a BooleanField is 'NUMBER(1) CHECK (%(qn_column)s IN (0,1))'
params = {
'table_name':qn,
'column': qn_col,
'type': self._db_type_for_alter_column(field),
'nullity': 'NOT NULL',
'default': 'NULL'
}
if field.null:
params['nullity'] = 'NULL'
sql_templates = [
(self.alter_string_set_type, params, []),
(self.alter_string_set_default, params, []),
]
if not field.null and field.has_default():
# Use default for rows that had nulls. To support the case where
# the new default does not fit the old type, we need to first change
# the column type to the new type, but null=True; then set the default;
# then complete the type change.
def change_params(**kw):
"A little helper for non-destructively changing the params"
p = params.copy()
p.update(kw)
return p
sql_templates[:0] = [
(self.alter_string_set_type, change_params(nullity='NULL'),[]),
(self.alter_string_update_nulls_to_default, change_params(default="%s"), [field.get_default()]),
]
if not ignore_constraints:
# drop CHECK constraints. Make sure this is executed before the ALTER TABLE statements
# generated above, since those statements recreate the constraints we delete here.
check_constraints = self._constraints_affecting_columns(table_name, [name], "CHECK")
for constraint in check_constraints:
self.execute(self.delete_check_sql % {
'table': self.quote_name(table_name),
'constraint': self.quote_name(constraint),
})
# Drop foreign constraints
try:
self.delete_foreign_key(qn, qn_col)
except ValueError:
# There weren't any
pass
for sql_template, params, args in sql_templates:
try:
self.execute(sql_template % params, args, print_all_errors=False)
except DatabaseError as exc:
description = str(exc)
# Oracle complains if a column is already NULL/NOT NULL
if 'ORA-01442' in description or 'ORA-01451' in description:
# so we just drop NULL/NOT NULL part from target sql and retry
params['nullity'] = ''
sql = sql_template % params
self.execute(sql)
# Oracle also has issues if we try to change a regular column
# to a LOB or vice versa (also REF, object, VARRAY or nested
# table, but these don't come up much in Django apps)
elif 'ORA-22858' in description or 'ORA-22859' in description:
self._alter_column_lob_workaround(table_name, name, field)
else:
self._print_sql_error(exc, sql_template % params)
raise
if not ignore_constraints:
# Add back FK constraints if needed
if field.rel: #and self.supports_foreign_keys:
self.add_deferred_sql(
self.foreign_key_sql(
qn[1:-1], # foreign_key_sql uses this as part of constraint name
qn_col[1:-1], # foreign_key_sql uses this as part of constraint name
field.rel.to._meta.db_table,
field.rel.to._meta.get_field(field.rel.field_name).column
)
)
def _alter_column_lob_workaround(self, table_name, name, field):
"""
Oracle refuses to change a column type from/to LOB to/from a regular
column. In Django, this shows up when the field is changed from/to
a TextField.
What we need to do instead is:
- Rename the original column
- Add the desired field as new
- Update the table to transfer values from old to new
- Drop old column
"""
renamed = self._generate_temp_name(name)
self.rename_column(table_name, name, renamed)
self.add_column(table_name, name, field, keep_default=False)
self.execute("UPDATE %s set %s=%s" % (
self.quote_name(table_name),
self.quote_name(name),
self.quote_name(renamed),
))
self.delete_column(table_name, renamed)
def _generate_temp_name(self, for_name):
suffix = hex(hash(for_name)).upper()[1:]
return self.normalize_name(for_name + "_" + suffix)
@generic.copy_column_constraints #TODO: Appears to be nulled by the delete decorator below...
@generic.delete_column_constraints
def rename_column(self, table_name, old, new):
if old == new:
# Short-circuit out
return []
self.execute('ALTER TABLE %s RENAME COLUMN %s TO %s;' % (
self.quote_name(table_name),
self.quote_name(old),
self.quote_name(new),
))
@generic.invalidate_table_constraints
def add_column(self, table_name, name, field, keep_default=False):
field = self._field_sanity(field)
sql = self.column_sql(table_name, name, field)
sql = self.adj_column_sql(sql)
if sql:
params = (
self.quote_name(table_name),
sql
)
sql = self.add_column_string % params
self.execute(sql)
# Now, drop the default if we need to
if field.default is not None:
field.default = NOT_PROVIDED
self.alter_column(table_name, name, field, explicit_name=False, ignore_constraints=True)
def delete_column(self, table_name, name):
return super(DatabaseOperations, self).delete_column(self.quote_name(table_name), name)
def lookup_constraint(self, db_name, table_name, column_name=None):
if column_name:
# Column names in the constraint cache come from the database,
# make sure we use the properly shortened/uppercased version
# for lookup.
column_name = self.normalize_name(column_name)
return super(DatabaseOperations, self).lookup_constraint(db_name, table_name, column_name)
def _constraints_affecting_columns(self, table_name, columns, type="UNIQUE"):
if columns:
columns = [self.normalize_name(c) for c in columns]
return super(DatabaseOperations, self)._constraints_affecting_columns(table_name, columns, type)
def _field_sanity(self, field):
"""
This particular override stops us sending DEFAULTs for BooleanField.
"""
if isinstance(field, models.BooleanField) and field.has_default():
field.default = int(field.to_python(field.get_default()))
# On Oracle, empty strings are null
if isinstance(field, (models.CharField, models.TextField)):
field.null = field.empty_strings_allowed
return field
def _default_value_workaround(self, value):
from datetime import date,time,datetime
if isinstance(value, (date,time,datetime)):
return "'%s'" % value
else:
return super(DatabaseOperations, self)._default_value_workaround(value)
def _fill_constraint_cache(self, db_name, table_name):
self._constraint_cache.setdefault(db_name, {})
self._constraint_cache[db_name][table_name] = {}
rows = self.execute("""
SELECT user_cons_columns.constraint_name,
user_cons_columns.column_name,
user_constraints.constraint_type
FROM user_constraints
JOIN user_cons_columns ON
user_constraints.table_name = user_cons_columns.table_name AND
user_constraints.constraint_name = user_cons_columns.constraint_name
WHERE user_constraints.table_name = '%s'
""" % self.normalize_name(table_name))
for constraint, column, kind in rows:
self._constraint_cache[db_name][table_name].setdefault(column, set())
self._constraint_cache[db_name][table_name][column].add((self.constraints_dict[kind], constraint))
return
| 42.141618 | 155 | 0.59941 |
79496e081704d82c2c2efc0a25f1ba5ae96c499d
| 9,826 |
py
|
Python
|
docs/source/conf.py
|
smoe/cyvcf2
|
b8e25fce6289a384be424e1b158b29d2b91b65e1
|
[
"MIT"
] | null | null | null |
docs/source/conf.py
|
smoe/cyvcf2
|
b8e25fce6289a384be424e1b158b29d2b91b65e1
|
[
"MIT"
] | null | null | null |
docs/source/conf.py
|
smoe/cyvcf2
|
b8e25fce6289a384be424e1b158b29d2b91b65e1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# cyvcf2 documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 14 08:40:54 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import re
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.imgmath',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cyvcf2'
copyright = u'2016, Brent Pedersen'
author = u'Brent Pedersen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
#
init_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../../cyvcf2/__init__.py")
)
with open(init_file, 'r') as f:
version_file = f.read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
pkg_version = version_match.group(1)
# The short X.Y version.
version = pkg_version
# The full version, including alpha/beta/rc tags.
release = pkg_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = u'cyvcf2 v0.6.5'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'cyvcf2doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cyvcf2.tex', u'cyvcf2 Documentation',
u'Brent Pedersen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cyvcf2', u'cyvcf2 Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cyvcf2', u'cyvcf2 Documentation',
author, 'cyvcf2', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32.216393 | 80 | 0.712294 |
79496f7acfca0195efd21f98d30e355bdd3bc467
| 8,142 |
py
|
Python
|
mmdet/core/bbox/assigners/atss_assigner.py
|
sanghun3819/LQM
|
f00a4ccc5a35cf430a70ff262148a8183cc99864
|
[
"Apache-2.0"
] | 4 |
2021-12-30T07:38:26.000Z
|
2022-02-08T11:30:55.000Z
|
mmdet/core/bbox/assigners/atss_assigner.py
|
POSTECH-IMLAB/LQM
|
a0f4d061c5f1486b43ddd884c37e651c6c8b16e7
|
[
"Apache-2.0"
] | null | null | null |
mmdet/core/bbox/assigners/atss_assigner.py
|
POSTECH-IMLAB/LQM
|
a0f4d061c5f1486b43ddd884c37e651c6c8b16e7
|
[
"Apache-2.0"
] | null | null | null |
import torch
from ..builder import BBOX_ASSIGNERS
from ..iou_calculators import build_iou_calculator
from .assign_result import AssignResult
from .base_assigner import BaseAssigner
@BBOX_ASSIGNERS.register_module()
class ATSSAssigner(BaseAssigner):
"""Assign a corresponding gt bbox or background to each bbox.
Each proposals will be assigned with `0` or a positive integer
indicating the ground truth index.
- 0: negative sample, no assigned gt
- positive integer: positive sample, index (1-based) of assigned gt
Args:
topk (float): number of bbox selected in each level
"""
def __init__(self,
topk,
iou_calculator=dict(type='BboxOverlaps2D'),
ignore_iof_thr=-1,
positive_iou_thr=-1):
self.topk = topk
self.iou_calculator = build_iou_calculator(iou_calculator)
self.ignore_iof_thr = ignore_iof_thr
self.positive_iou_thr = positive_iou_thr
# https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py
def assign(self,
bboxes,
num_level_bboxes,
gt_bboxes,
gt_bboxes_ignore=None,
gt_labels=None):
"""Assign gt to bboxes.
The assignment is done in following steps
1. compute iou between all bbox (bbox of all pyramid levels) and gt
2. compute center distance between all bbox and gt
3. on each pyramid level, for each gt, select k bbox whose center
are closest to the gt center, so we total select k*l bbox as
candidates for each gt
4. get corresponding iou for the these candidates, and compute the
mean and std, set mean + std as the iou threshold
5. select these candidates whose iou are greater than or equal to
the threshold as postive
6. limit the positive sample's center in gt
Args:
bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
num_level_bboxes (List): num of bboxes in each level
gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
labelled as `ignored`, e.g., crowd boxes in COCO.
gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
Returns:
:obj:`AssignResult`: The assign result.
"""
INF = 100000000
bboxes = bboxes[:, :4]
num_gt, num_bboxes = gt_bboxes.size(0), bboxes.size(0)
iou_overlap_thr = bboxes.new_full((num_gt, ),
self.positive_iou_thr,
dtype=torch.float)
# compute iou between all bbox and gt
overlaps = self.iou_calculator(bboxes, gt_bboxes)
# assign 0 by default
assigned_gt_inds = overlaps.new_full((num_bboxes, ),
0,
dtype=torch.long)
if num_gt == 0 or num_bboxes == 0:
# No ground truth or boxes, return empty assignment
max_overlaps = overlaps.new_zeros((num_bboxes, ))
if num_gt == 0:
# No truth, assign everything to background
assigned_gt_inds[:] = 0
if gt_labels is None:
assigned_labels = None
else:
assigned_labels = overlaps.new_full((num_bboxes, ),
-1,
dtype=torch.long)
return AssignResult(
num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)
# compute center distance between all bbox and gt
gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0
gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0
gt_points = torch.stack((gt_cx, gt_cy), dim=1)
bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0
bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0
bboxes_points = torch.stack((bboxes_cx, bboxes_cy), dim=1)
distances = (bboxes_points[:, None, :] -
gt_points[None, :, :]).pow(2).sum(-1).sqrt()
if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None
and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0):
ignore_overlaps = self.iou_calculator(
bboxes, gt_bboxes_ignore, mode='iof')
ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr
distances[ignore_idxs, :] = INF
assigned_gt_inds[ignore_idxs] = -1
# Selecting candidates based on the center distance
candidate_idxs = []
start_idx = 0
for level, bboxes_per_level in enumerate(num_level_bboxes):
# on each pyramid level, for each gt,
# select k bbox whose center are closest to the gt center
end_idx = start_idx + bboxes_per_level
distances_per_level = distances[start_idx:end_idx, :]
selectable_k = min(self.topk, bboxes_per_level)
_, topk_idxs_per_level = distances_per_level.topk(
selectable_k, dim=0, largest=False)
candidate_idxs.append(topk_idxs_per_level + start_idx)
start_idx = end_idx
candidate_idxs = torch.cat(candidate_idxs, dim=0)
# get corresponding iou for the these candidates, and compute the
# mean and std, set mean + std as the iou threshold
candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)]
overlaps_mean_per_gt = candidate_overlaps.mean(0)
overlaps_std_per_gt = candidate_overlaps.std(0)
overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt
if(self.positive_iou_thr > 0) :
is_pos = candidate_overlaps >= iou_overlap_thr[None, :]
else:
is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :]
# limit the positive sample's center in gt
for gt_idx in range(num_gt):
candidate_idxs[:, gt_idx] += gt_idx * num_bboxes
ep_bboxes_cx = bboxes_cx.view(1, -1).expand(
num_gt, num_bboxes).contiguous().view(-1)
ep_bboxes_cy = bboxes_cy.view(1, -1).expand(
num_gt, num_bboxes).contiguous().view(-1)
candidate_idxs = candidate_idxs.view(-1)
# calculate the left, top, right, bottom distance between positive\
# bbox center and gt side
l_ = ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0]
t_ = ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1]
r_ = gt_bboxes[:, 2] - ep_bboxes_cx[candidate_idxs].view(-1, num_gt)
b_ = gt_bboxes[:, 3] - ep_bboxes_cy[candidate_idxs].view(-1, num_gt)
is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01
is_pos = is_pos & is_in_gts
# if an anchor box is assigned to multiple gts,
# the one with the highest IoU will be selected.
overlaps_inf = torch.full_like(overlaps,
-INF).t().contiguous().view(-1)
index = candidate_idxs.view(-1)[is_pos.view(-1)]
overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index]
overlaps_inf = overlaps_inf.view(num_gt, -1).t()
max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1)
assigned_gt_inds[
max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1
if gt_labels is not None:
assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)
pos_inds = torch.nonzero(
assigned_gt_inds > 0, as_tuple=False).squeeze()
if pos_inds.numel() > 0:
assigned_labels[pos_inds] = gt_labels[
assigned_gt_inds[pos_inds] - 1]
else:
assigned_labels = None
return AssignResult(
num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)
| 43.774194 | 87 | 0.598993 |
7949709af15d21bfbb0b2fe9924c0632a4ccc6e2
| 912 |
py
|
Python
|
selfdrive/controls/lib/latcontrol.py
|
fallen8angel/forNEXO-YONG
|
5661ae0fb2fefc41fda9e474e094d4b5440ecb8e
|
[
"MIT"
] | null | null | null |
selfdrive/controls/lib/latcontrol.py
|
fallen8angel/forNEXO-YONG
|
5661ae0fb2fefc41fda9e474e094d4b5440ecb8e
|
[
"MIT"
] | null | null | null |
selfdrive/controls/lib/latcontrol.py
|
fallen8angel/forNEXO-YONG
|
5661ae0fb2fefc41fda9e474e094d4b5440ecb8e
|
[
"MIT"
] | 2 |
2022-02-25T03:36:09.000Z
|
2022-03-13T12:24:37.000Z
|
from abc import abstractmethod, ABC
from common.realtime import DT_CTRL
from common.numpy_fast import clip
MIN_STEER_SPEED = 0.3
class LatControl(ABC):
def __init__(self, CP, CI):
self.sat_count_rate = 1.0 * DT_CTRL
self.sat_limit = CP.steerLimitTimer
self.sat_count = 0.
# we define the steer torque scale as [-1.0...1.0]
self.steer_max = 1.0
@abstractmethod
def update(self, active, CS, CP, VM, params, last_actuators, desired_curvature, desired_curvature_rate, llk):
pass
def reset(self):
self.sat_count = 0.
def _check_saturation(self, saturated, CS):
if saturated and CS.vEgo > 10. and not CS.steeringRateLimited and not CS.steeringPressed:
self.sat_count += self.sat_count_rate
else:
self.sat_count -= self.sat_count_rate
self.sat_count = clip(self.sat_count, 0.0, self.sat_limit)
return self.sat_count > (self.sat_limit - 1e-3)
| 29.419355 | 111 | 0.710526 |
794970ea38cf84e6d2f928600b557756d0f1b494
| 16,856 |
py
|
Python
|
qtools/exportclient.py
|
Awesomium40/qtools
|
9348ddb597cd0470cad61c720496797f9d6db677
|
[
"MIT"
] | null | null | null |
qtools/exportclient.py
|
Awesomium40/qtools
|
9348ddb597cd0470cad61c720496797f9d6db677
|
[
"MIT"
] | null | null | null |
qtools/exportclient.py
|
Awesomium40/qtools
|
9348ddb597cd0470cad61c720496797f9d6db677
|
[
"MIT"
] | null | null | null |
from . import constants
from . import utils
from . import exceptions
from . import qsf
import datetime
import getpass
import io
import logging
import os
import re
import requests
import time
import zipfile
_QDC = 'Q_DATA_CENTER'
_QAT = 'Q_API_TOKEN'
logging.getLogger('exportclient').addHandler(logging.NullHandler())
class ExportClient(object):
date_time_format = '%Y-%m-%dT%H:%M:%SZ'
date_time_re = re.compile(r'^(?P<year>[0-9]{4})-(?P<month>[0-1]((?<=1)[0-2]|(?<=0)[0-9]))-' +
r'(?P<day>[0-3]((?<=3)[0-1]|(?<=[0-2])[0-9]))' +
r'(?P<time>T[0-9]{2}:[0-9]{2}:[0-9]{2}Z)$')
def __init__(self, data_center=None, token=None, **kwargs):
"""
Creates a new instance of ExportClient class
:param data_center: string. Can specify either your qualtrics data center or the OS environment variable at
which this data is stored. Optional.
Omitting will cause a search for the OS environment variable 'Q_DATA_CENTER'
:param token: string. Can specify either your qualtrics API key or the OS environment variable at which
this data is stored. Optional
Omittign will cause a search for the OS environment variable 'Q_API_KEY'
:param kwargs:
"""
ERR_BASE = ("parameter '{0}' was not specified and variable '{1}' was " +
"not found in environment variables. Please specify {0} or add {1} " +
"to your OS environment variables.")
if data_center is not None:
dc = os.environ.get(data_center)
dc = data_center
else:
dc = os.environ.get(_QDC)
if dc is None: dc = getpass.getpass("Please enter your Qualtrics data center: ")
if token is not None:
tkn = os.environ.get(token)
if tkn is None: tkn = token
else:
tkn = os.environ.get(_QAT)
if tkn is None: tkn = getpass.getpass("Please enter your Qualtrics API token: ")
if tkn is None:
raise ValueError(ERR_BASE.format('token', _QAT))
if dc is None:
raise ValueError(ERR_BASE.format('data_center', _QDC))
self._data_center = dc
self._token = tkn
self._headers = {
"content-type": "application/json",
}
self._url_base = f'https://{self._data_center}' + \
f".qualtrics.com/API/v3/"
@staticmethod
def _await_export_(url, headers, survey_name=None, report_progress=True, update_every=0.5):
"""
ec._await_export_(url, headers, report_progress=True) -> str
:param url: the qualtrics request check URL for the survey responses export
:param headers: Headers for the request
:param report_progress: Whether to display the progress of the export process. Default True
:param update_every: How often (in seconds) to check status of the export. Default 0.5
:return: json object containing the request response
"""
status = None
prefix = f"Exporting {survey_name}: " if survey_name is not None else 'Export Progress: '
# Periodically check the update of the export
while status not in ('complete', 'failed'):
response = requests.get(url, headers=headers)
response_json = response.json()
progress = response_json['result']['percentComplete']
if report_progress:
utils._progress_bar_(progress, 100, prefix=prefix)
status = response_json['result']['status']
time.sleep(update_every)
if status == 'failed':
raise exceptions.ExportException('Export Failed', response.reason)
return response_json
@staticmethod
def _create_cre_body_(**kwargs):
"""
:param format:
:param startDate: DateTime or ISO-8601 string in UTC time. Only export responses recorded after this date
:param endDate: DateTime or ISO-8601 string. Only export responses recorded prior to this date
:param limit: integer. Maximum number of responses to export
:param useLabels: Boolean specifying whether to export recode value instead of text of answer choice
:param seenUnansweredRecode: Int with which to recode seen, but unanswered questions
:param multiselectSeenUnansweredRecode: int with which to recode seen but unanswered choices for MS questions
:param includeDisplayOrder: Bool specifying whether to include display order information
:param formatDecimalAsComma: Bool specifying whehter to use commas instead of period as decimal separator
:param timeZone: constants.TimeZone specifying response date values. None for GMT
:param newlineReplacement: string specifying newline delimiter for CSV/TSV formats
:param questionIds: list[str]. Only export questions with IDs in the list
:param embeddedDataIds: list[str] Export only specified embeddeddata
:param surveyMetadataIds: Export only specified metadata columns
:param compress: Boolean whether to export results in compressed format
:param exportResponsesInProgress: Boolean whether to export the in-progress responses only
:param breakoutSets: Boolean split multi-value fields into columns
:param filterId: Export only responses that match a saved filter
:param allowContinuation: Boolean. Set True in order to request a continuation token when export finished
:param continuationToken: String continuation token used to get new responses recorded since last export
:return: object
"""
def date_func(date_value: datetime.datetime):
try:
result = date_value.strftime(ExportClient.date_time_format)
except AttributeError:
match = ExportClient.date_time_re.search(date_value)
result = date_value if match is not None else None
return result
bool_func = lambda x: str(bool(x)).lower()
list_func = lambda items: list(items)
keywords = {'startDate': date_func, 'endDate': date_func, 'limit': int,
'useLabels': bool_func, 'seenUnansweredRecode': int,
'multiselectSeenUnansweredRecode': int, 'includeDisplayOrder': bool,
'formatDecimalAsComma': bool_func, 'timeZone': lambda x: x,
'newlineReplacement': lambda x: x,
'questionIds': list_func, 'embeddedDataIds': list_func, 'surveyMetadataIds': list_func,
'compress': bool_func,
'exportResponsesInProgress': bool_func, 'breakoutSets': bool_func,
' filterId': lambda x: str(x), 'allowContinuation': bool_func,
'continuationToken': lambda x: str(x)}
params = {key: keywords.get(key)(value) for key, value in kwargs.items()
if key in keywords and keywords.get(key)(value) is not None}
body = {key: value for key, value in params.items()} if len(params) > 0 else {}
return body
def _locate_survey_id_(self, locator=None):
"""
ec._locate_survey_id(locator) -> str
returns either the survey ID located by the callable locator
or by the default of ExportClient._prompt_for_survey_ if locator is not specified
:param locator: callable which returns a valid qualtrics survey ID
:return:
"""
locator = self._prompt_for_survey_ if locator is None or not callable(locator) else locator
survey_id = locator()
if survey_id is None:
raise ValueError("Must specify valid value for either survey_id or locator")
return survey_id
def _prompt_for_survey_(self):
try_again = True
survey_id = None
surveys = self.get_survey_list()
survey_list = {i: survey_data for i, survey_data in enumerate(surveys.items(), 1)}
prompt = ("Surveys:\n" +
"\n".join(f'{key}: {value[1]}' for key, value in survey_list.items()) +
'\nc: cancel')
while try_again:
print(prompt)
selection = input("Please select a survey: ")
if selection.lower() == 'c':
try_again = False
continue
try:
selection = int(selection)
survey_id = survey_list.get(selection)[0]
except ValueError as err:
print("invalid selection")
try_again = input("Select again (y/n)? ").lower() == 'y'
survey_id = None
except TypeError as err:
print("invalid selection")
try_again = input("Select again (y/n)? ").lower() == 'y'
survey_id = None
else:
try_again = False
return survey_id
def get_survey_list(self):
"""
ec.list_surveys() -> dict[str: str]
Queries the qualtrics List Surveys API for surveys owned by the current user and returns a dictonary
whose keys are survey ID and whose values are survey names
:return: dict
"""
url = f'{self._url_base}surveys'
headers = {'x-api-token': self._token,
"content-type": "multipart/form-data"}
response = requests.get(url, headers=headers)
if not response.ok:
raise exceptions.ExportException("Unable to retrieve list of surveys", response.reason)
data = response.json()['result']['elements']
return {itm.get('id'): itm.get('name') for itm in data}
def export_codebook(self, survey_id=None, locator=None):
"""
ec.export_codebook(out_path, survey_id=None, **kwargs)
Exports a codebook to
:param survey_id:
:param locator: keyword argument providing a callable which returns the ID of the survey to be exported.
:return: openpyxl.Workbook
"""
survey_id = self._locate_survey_id_(locator=locator) if survey_id is None else survey_id
data = self.export_survey_definition(survey_id=survey_id, locator=locator, format=constants.Format.TXT)
survey = qsf.Survey(data)
return survey.codebook()
def export_responses(self, out_folder, survey_id=None, file_format=constants.Format.SPSS, report_progress=True,
update_every=0.5, **kwargs):
"""
ec.export_responses(self, out_path, survey_id, file_format=constants.Format.SPSS, report_progress=True,
update_every=0.5, **kwargs) -> None
:param out_folder: path to the folder in which response data is to be saved
:param survey_id: string specifying the qualtrics ID of the survey to be exported.
If no survey id is specified, user will be prompted with a list of survey ids
:param file_format: constants.Format specifying the file format for the result export
:param report_progress: Whether to display the progress of the export. Default True
:param update_every: How often to check progress of export (in seconds). Default 0.5
:param locator: kwarg Callable which returns the survey ID of the survey whose responses are to be exported
(or None if no survey ID can be located) if survey_id is not specified. Optional.
:param startDate: DateTime or ISO-8601 datetime string in UTC time.
Only export responses recorded after this date. Optional. Omit to export all responses
:param endDate: DateTime or ISO-8601 datetime string. Only export responses recorded prior to this date.
Optional. Omit to export all responses
:param limit: integer. Maximum number of responses to export. Optional. Omit to export all responses
:param useLabels: Boolean specifying whether to export recode value instead of text of answer choice. Optional
:param seenUnansweredRecode: Int with which to recode seen, but unanswered questions. Optional
:param multiselectSeenUnansweredRecode: int with which to recode seen but unanswered choices for MS questions.
Optional
:param includeDisplayOrder: Bool specifying whether to include display order information. Optional
:param formatDecimalAsComma: Bool specifying whehter to use commas instead of period as decimal separator. Optional
:param timeZone: constants.TimeZone specifying response date values. None for GMT. Optional
:param newlineReplacement: string specifying newline delimiter for CSV/TSV formats. Optional
:param questionIds: list[str]. Only export questions with IDs in the list. Optional. Omit to export all.
:param embeddedDataIds: list[str] Export only specified embeddeddata, Optiona. Omit to export all
:param surveyMetadataIds: Export only specified metadata columns> Optional. Omit to export all
:param compress: Boolean whether to export results in compressed format. Optional. Default True
:param exportResponsesInProgress: Boolean whether to export the in-progress responses only.
Optional. Default False
:param breakoutSets: Boolean split multi-value fields into columns. Optional. Default True
:param filterId: Export only responses that match a saved filter. Optional. Omit to export all
:param allowContinuation: Boolean. Set True in order to request a continuation token when export finished
:param continuationToken: String continuation token used to get new responses recorded since last export
:return: None
:raises exceptions.ExportException if the request for download does not return OK status
"""
# If no survey specified, either use the provided callable to retrieve survey ID
# or present user with a prompt that allows to choose from available surveys to export
locator = kwargs.get('locator', self._prompt_for_survey_)
survey_id = self._locate_survey_id_(locator=locator) if survey_id is None else survey_id
survey_name = self.get_survey_list().get(survey_id)
if survey_id is None:
logging.info("No survey ID specified. Aborting...")
return
body = {"format": f"{file_format}"}
base_url = f'{self._url_base}surveys/{survey_id}/export-responses/'
headers = self._headers
headers['x-api-token'] = self._token
body_args = self._create_cre_body_(**kwargs)
body.update(body_args)
# Create the export request
response = requests.post(base_url, json=body, headers=headers)
if not response.ok:
raise exceptions.ExportException("Export Error. Check err.Reason for details", response.reason)
dl_response = response.json()
# Build the URL for checking progress of the export
progress_id = dl_response['result']['progressId']
check_url = base_url + progress_id
check_response = self._await_export_(check_url, headers=headers, report_progress=report_progress,
update_every=update_every)
# Download and unzip the completed file
file_id = check_response['result']['fileId']
dl_url = base_url + file_id + r'/file'
download = requests.get(dl_url, headers=headers, stream=True)
zipfile.ZipFile(io.BytesIO(download.content)).extractall(out_folder)
def export_survey_definition(self, survey_id=None, locator=None, format=constants.Format.JSON):
"""
ec.export_survey_definition(survey_id=None, locator=None, format=constants.Format.JSON) -> object
Exports the survey definition (qsf) associated with the survey specified by survey_id or located by locator
:param survey_id: The ID of the survey whose definition is to be exported
:param locator: Callable which returns the ID of the survey to be exported when survey_id is None
:param format: constants.Format that specifies output type. Format.JSON or Format.TXT
:return: text or JSON data, as specified by format
"""
locator = self._prompt_for_survey_ if locator is None or not callable(locator) else locator
survey_id = locator() if survey_id is None else survey_id
url = f'{self._url_base}survey-definitions/{survey_id}?format=qsf'
headers = {'x-api-token': self._token}
response = requests.get(url, headers=headers)
if not response.ok:
raise exceptions.ExportException(f"Unable to export definition for survey {survey_id}. " +
"Check result for details", response.reason)
return response.json() if format == constants.Format.JSON else response.text
| 49 | 123 | 0.65757 |
794970fd5e1b4aecb202c7e233342dbedcb55db2
| 9,116 |
py
|
Python
|
lcm/lcm/nf/tests/test_change_ext_conn.py
|
onap/vfc-gvnfm-vnflcm
|
e3127fee0fdb5bf193fddc74a69312363a6d20eb
|
[
"Apache-2.0"
] | 1 |
2019-04-02T03:15:20.000Z
|
2019-04-02T03:15:20.000Z
|
lcm/lcm/nf/tests/test_change_ext_conn.py
|
onap/vfc-gvnfm-vnflcm
|
e3127fee0fdb5bf193fddc74a69312363a6d20eb
|
[
"Apache-2.0"
] | null | null | null |
lcm/lcm/nf/tests/test_change_ext_conn.py
|
onap/vfc-gvnfm-vnflcm
|
e3127fee0fdb5bf193fddc74a69312363a6d20eb
|
[
"Apache-2.0"
] | 1 |
2021-10-15T15:26:47.000Z
|
2021-10-15T15:26:47.000Z
|
# Copyright (C) 2019 ZTE. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from lcm.pub.database.models import NfInstModel, JobStatusModel, StorageInstModel, NetworkInstModel, \
SubNetworkInstModel, PortInstModel, FlavourInstModel, VmInstModel, VNFCInstModel
from lcm.pub.exceptions import NFLCMException
from lcm.pub.utils import restcall
from lcm.pub.vimapi import api
from lcm.pub.utils.jobutil import JobUtil
from lcm.nf.biz.change_ext_conn import ChangeExtConn
from . import const
class TestChangeExtConn(TestCase):
def setUp(self):
self.client = APIClient()
NfInstModel(nfinstid='12345',
nf_name='VNF1',
nf_desc="VNF DESC",
vnfdid="1",
netype="XGW",
vendor="ZTE",
vnfSoftwareVersion="V1",
version="V1",
package_id="2",
status='NOT_INSTANTIATED').save()
NfInstModel(nfinstid='123',
nf_name='VNF1',
nf_desc="VNF DESC",
vnfdid="1",
netype="XGW",
vendor="ZTE",
vnfSoftwareVersion="V1",
version="V1",
package_id="2",
status='INSTANTIATED').save()
self.req_data = {
"extVirtualLinks": [{
"id": "string",
"resourceId": "329efb86-5cbb-4fc0-bc7c-6ea28f9d7389",
"resourceSubnetId": "429efb86-5cbb-4fc0-bc7c-6ea28f9d7389",
"extCps": [{
"cpdId": "ext_cp",
"cpConfig": [{
"cpInstanceId": "",
"cpProtocolData": [{
"layerProtocol": "IP_OVER_ETHERNET",
"ipOverEthernet": {
"ipAddresses": [{
"type": "IPV4",
"numDynamicAddresses": 0,
"subnetId": "59e9ffa9-b67e-4c05-b191-ed179007536e"
}]
}
}]
}]
}],
"extLinkPorts": []
}],
"vimConnectionInfo": [{
"id": "tecs_RegionOne",
"vimType": "openstack",
"vimId": "tecs_RegionOne",
"accessInfo": {
"tenant": "chinamobile"
}
}],
"additionalParams": {
"vmid": "552ea058-6441-4de5-b4c1-b0a52c7557e8"
}
}
def tearDown(self):
NfInstModel.objects.filter(nfinstid='12345').delete()
NfInstModel.objects.filter(nfinstid='123').delete()
def assert_job_result(self, job_id, job_progress, job_detail):
jobs = JobStatusModel.objects.filter(
jobid=job_id,
progress=job_progress,
descp=job_detail
)
self.assertEqual(1, len(jobs))
def test_change_ext_conn_not_found(self):
url = "/api/vnflcm/v1/vnf_instances/12/change_ext_conn"
response = self.client.post(url,
data=self.req_data,
format='json')
self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code)
def test_change_ext_conn_conflict(self):
url = "/api/vnflcm/v1/vnf_instances/12345/change_ext_conn"
response = self.client.post(url,
data=self.req_data,
format='json')
self.assertEqual(status.HTTP_409_CONFLICT, response.status_code)
def test_change_ext_conn_badreq(self):
url = "/api/vnflcm/v1/vnf_instances/123/change_ext_conn"
response = self.client.post(url,
data={},
format='json')
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
@mock.patch.object(JobUtil, 'create_job')
def test_change_ext_conn_inner_error(self, mock_run):
mock_run.return_value = NFLCMException('Boom!')
url = "/api/vnflcm/v1/vnf_instances/123/change_ext_conn"
response = self.client.post(url,
data=self.req_data,
format='json')
self.assertEqual(
status.HTTP_500_INTERNAL_SERVER_ERROR,
response.status_code)
@mock.patch.object(restcall, 'call_req')
@mock.patch.object(api, 'call')
def test_change_ext_conn_sucess(self, mock_call, mock_call_req):
self.nf_inst_id = '12345'
res_cache = {"volume": {}, "flavor": {}, "port": {}}
res_cache["port"]["ext_cp"] = "port1"
NfInstModel(nfinstid=self.nf_inst_id,
nf_name='VNF1',
nf_desc="VNF DESC",
vnfdid="1",
netype="XGW",
vendor="ZTE",
vnfSoftwareVersion="V1",
version="V1",
package_id="2",
status='INSTANTIATED',
vnfd_model=json.dumps(const.vnfd_for_scale),
vimInfo=json.dumps({}),
resInfo=json.dumps(res_cache)).save()
StorageInstModel.objects.create(
storageid="1",
vimid="1",
resourceid="11",
insttype=0,
instid=self.nf_inst_id,
is_predefined=1
)
NetworkInstModel.objects.create(
networkid='1',
vimid='1',
resourceid='1',
name='pnet_network',
is_predefined=1,
tenant='admin',
insttype=0,
instid=self.nf_inst_id
)
SubNetworkInstModel.objects.create(
subnetworkid='1',
vimid='1',
resourceid='1',
networkid='1',
is_predefined=1,
name='sub_pnet',
tenant='admin',
insttype=0,
instid=self.nf_inst_id
)
PortInstModel.objects.create(
portid='1',
networkid='1',
subnetworkid='1',
vimid='1',
resourceid='1',
is_predefined=1,
name='ext_cp',
tenant='admin',
insttype=0,
instid=self.nf_inst_id
)
FlavourInstModel.objects.create(
flavourid="1",
vimid="1",
resourceid="11",
instid=self.nf_inst_id,
is_predefined=1,
name="Flavor_sunshine"
)
VmInstModel.objects.create(
vmid="1",
vimid="1",
resourceid="11",
insttype=0,
instid=self.nf_inst_id,
vmname="test_01",
is_predefined=1,
operationalstate=1
)
VmInstModel.objects.create(
vmid="2",
vimid="1",
resourceid="22",
insttype=0,
instid=self.nf_inst_id,
vmname="test_02",
is_predefined=1,
operationalstate=1
)
VNFCInstModel.objects.create(
vnfcinstanceid="1",
instid=self.nf_inst_id,
vmid="1"
)
VNFCInstModel.objects.create(
vnfcinstanceid="2",
instid=self.nf_inst_id,
vmid="2"
)
r1_apply_grant_result = [
0,
json.JSONEncoder().encode(const.instantiate_grant_result),
'200'
]
mock_call_req.side_effect = [
r1_apply_grant_result,
]
mock_call.side_effect = [
const.c1_data_get_tenant_id,
const.c7_data_create_flavor,
const.c6_data_create_port
]
self.job_id = JobUtil.create_job('NF', 'VNF_CHANGE_EXT_CONN', self.nf_inst_id)
JobUtil.add_job_status(self.job_id, 0, "VNF_'VNF_CHANGE_EXT_CONN'_READY")
ChangeExtConn(self.req_data, self.nf_inst_id, self.job_id,).run()
print([{job.progress: job.descp} for job in JobStatusModel.objects.filter(jobid=self.job_id)])
self.assert_job_result(
self.job_id,
100,
'Change ext conn success.'
)
| 35.609375 | 102 | 0.515796 |
7949720481c5aed43a85afc9b8d3572159562089
| 4,340 |
py
|
Python
|
tests/result_storages/test_file_storage.py
|
tony612/thumbor
|
2efec9fc1bc67c9d029904870fa624a81966f3a1
|
[
"MIT"
] | 1 |
2021-02-25T18:41:01.000Z
|
2021-02-25T18:41:01.000Z
|
tests/result_storages/test_file_storage.py
|
tony612/thumbor
|
2efec9fc1bc67c9d029904870fa624a81966f3a1
|
[
"MIT"
] | null | null | null |
tests/result_storages/test_file_storage.py
|
tony612/thumbor
|
2efec9fc1bc67c9d029904870fa624a81966f3a1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
import tempfile
from datetime import datetime
from os.path import abspath, dirname, join
import mock
from preggy import expect
from tornado.testing import gen_test
from tests.base import TestCase
from thumbor.config import Config
from thumbor.context import Context, RequestParameters
from thumbor.result_storages import ResultStorageResult
from thumbor.result_storages.file_storage import Storage as FileStorage
class BaseFileStorageTestCase(TestCase):
def __init__(self, *args, **kw):
self.storage_path = None
self.context = None
self.file_storage = None
super(BaseFileStorageTestCase, self).__init__(*args, **kw)
def get_config(self):
self.storage_path = tempfile.TemporaryDirectory()
return Config(RESULT_STORAGE_FILE_STORAGE_ROOT_PATH=self.storage_path.name)
def tearDown(self):
super(BaseFileStorageTestCase, self).tearDown()
if self.storage_path is not None:
self.storage_path.cleanup()
@staticmethod
def get_request():
return RequestParameters()
@staticmethod
def get_fixture_path():
return abspath(join(dirname(__file__), "../fixtures/result_storages"))
def get_context(self):
cfg = self.get_config()
ctx = Context(None, cfg, None)
ctx.request = self.get_request()
self.context = ctx
self.file_storage = FileStorage(self.context)
return ctx
@staticmethod
def get_http_path():
return "http://example.com/path/to/a.jpg"
class FileStorageTestCase(BaseFileStorageTestCase):
@gen_test
async def test_normalized_path(self):
expect(self.file_storage).not_to_be_null()
expect(self.file_storage.normalize_path(self.get_http_path())).to_equal(
f"{self.storage_path.name}/default/b6/be/"
"a3e916129541a9e7146f69a15eb4d7c77c98"
)
class WebPFileStorageTestCase(BaseFileStorageTestCase):
def get_config(self):
self.storage_path = tempfile.TemporaryDirectory()
return Config(
AUTO_WEBP=True,
RESULT_STORAGE_FILE_STORAGE_ROOT_PATH=self.storage_path.name,
)
def tearDown(self):
super(WebPFileStorageTestCase, self).tearDown()
self.storage_path.cleanup()
def get_request(self):
return RequestParameters(accepts_webp=True)
@gen_test
async def test_normalized_path_with_auto_webp_path(self):
expect(self.file_storage).not_to_be_null()
expect(self.file_storage.normalize_path(self.get_http_path())).to_equal(
f"{self.storage_path.name}/auto_webp/b6/be/"
"a3e916129541a9e7146f69a15eb4d7c77c98"
)
class ResultStorageResultTestCase(BaseFileStorageTestCase):
def get_config(self):
return Config(RESULT_STORAGE_FILE_STORAGE_ROOT_PATH=self.get_fixture_path())
def get_request(self):
return RequestParameters(url="image.jpg")
@gen_test
async def test_can_get_image_from_storage(self):
result = await self.file_storage.get()
expect(result).to_be_instance_of(ResultStorageResult)
expect(result.successful).to_equal(True)
expect(len(result)).to_equal(5319)
expect(len(result)).to_equal(result.metadata["ContentLength"])
expect(result.last_modified).to_be_instance_of(datetime)
class ExpiredFileStorageTestCase(BaseFileStorageTestCase):
def get_config(self):
return Config(
RESULT_STORAGE_FILE_STORAGE_ROOT_PATH=self.get_fixture_path(),
RESULT_STORAGE_EXPIRATION_SECONDS=10,
)
def get_request(self):
return RequestParameters(url="image.jpg")
@gen_test
async def test_cannot_get_expired_1_day_old_image(self):
current_timestamp = (datetime.utcnow() - datetime(1970, 1, 1)).total_seconds()
new_mtime = current_timestamp - 60 * 60 * 24
with mock.patch(
"thumbor.result_storages.file_storage.getmtime", return_value=new_mtime,
):
result = await self.file_storage.get()
expect(result).to_be_null()
| 32.38806 | 86 | 0.704839 |
794972bc0e7dd76c4d7b0adf20f1549e00e7648e
| 7,844 |
py
|
Python
|
views/network_panel.py
|
nv-hiep/NeuralNetworkSnake
|
979d547df716b1cdeb3e1d41fa4f813094e2306e
|
[
"MIT"
] | null | null | null |
views/network_panel.py
|
nv-hiep/NeuralNetworkSnake
|
979d547df716b1cdeb3e1d41fa4f813094e2306e
|
[
"MIT"
] | null | null | null |
views/network_panel.py
|
nv-hiep/NeuralNetworkSnake
|
979d547df716b1cdeb3e1d41fa4f813094e2306e
|
[
"MIT"
] | null | null | null |
from PyQt5 import QtGui, QtCore, QtWidgets
from PyQt5.QtCore import Qt
from typing import List
from network.neural_network import *
from helper.snake import Snake
from helper.config import config
class NetworkPanel(QtWidgets.QWidget):
def __init__(self, parent, snake: Snake):
super().__init__(parent)
self.snake = snake
self.neuron_xy = {}
font = QtGui.QFont('Times', 11, QtGui.QFont.Normal)
font_bold = QtGui.QFont('Times', 11, QtGui.QFont.Bold)
# creating a label widgets
self.label_network = QtWidgets.QLabel('Network Structure', self)
self.label_network.setFont(font_bold)
self.label_network.move(200, 25)
self.label_network.setFixedSize(400, 20)
self.label_layers = QtWidgets.QLabel('Layer units: ' + '[{}, {}, 4]'.format(config['vision_type'] * 3 + 4 + 4, ', '.join([str(num_neurons) for num_neurons in config['hidden_layer_units'][config['vision_type']] ])), self)
self.label_layers.setFont(font)
self.label_layers.move(200, 50)
self.label_layers.setFixedSize(400, 20)
self.label_hidden = QtWidgets.QLabel('Hidden layer activation: ' + ' '.join([word.capitalize() for word in config['hidden_activation'].split('_')]), self)
self.label_hidden.setFont(font)
self.label_hidden.move(200, 75)
self.label_hidden.setFixedSize(400, 20)
self.label_ouput = QtWidgets.QLabel('Output layer activation: ' + ' '.join([word.capitalize() for word in config['output_activation'].split('_')]), self)
self.label_ouput.setFont(font)
self.label_ouput.move(200, 100)
self.label_ouput.setFixedSize(400, 20)
self.label_vision = QtWidgets.QLabel('Snake vision: ' + str(config['vision_type']) + ' directions', self)
self.label_vision.setFont(font)
self.label_vision.move(200, 125)
self.label_vision.setFixedSize(400, 20)
self.label_vision_type = QtWidgets.QLabel('Apple/Self Vision: ' + config['apple_and_self_vision'].lower(), self)
self.label_vision_type.setFont(font)
self.label_vision_type.move(200, 150)
self.label_vision_type.setFixedSize(400, 20)
# creating a label widgets
self.label_generation = QtWidgets.QLabel('Snake Game', self)
self.label_generation.setFont(font_bold)
self.label_generation.move(200, 650)
self.label_generation.setFixedSize(400, 30)
self.label_generation = QtWidgets.QLabel('Generation: 1', self)
self.label_generation.setFont(font)
self.label_generation.move(200, 675)
self.label_generation.setFixedSize(400, 30)
self.label_curr_indiv = QtWidgets.QLabel('Individual: 1/100', self)
self.label_curr_indiv.setFont(font)
self.label_curr_indiv.move(200, 700)
self.label_curr_indiv.setFixedSize(400, 30)
self.label_best_score = QtWidgets.QLabel('Best score: 0', self)
self.label_best_score.setFont(font)
self.label_best_score.move(200, 725)
self.label_best_score.setFixedSize(400, 30)
self.label_best_fitness = QtWidgets.QLabel('Best fitness: 0', self)
self.label_best_fitness.setFont(font)
self.label_best_fitness.move(200, 750)
self.label_best_fitness.setFixedSize(400, 30)
self.label_snake_length = QtWidgets.QLabel('Snake length: 0', self)
self.label_snake_length.setFont(font)
self.label_snake_length.move(200, 775)
self.label_snake_length.setFixedSize(400, 30)
# self.label_snake_length.setStyleSheet('border: 1px solid black;')
self.show()
def paintEvent(self, event: QtGui.QPaintEvent) -> None:
painter = QtGui.QPainter()
painter.begin(self)
self._display(painter)
painter.end()
def update(self) -> None:
self.repaint()
def _display(self, painter: QtGui.QPainter) -> None:
'''
Plot the layers of network
'''
# Height and Width of the window
H = self.frameGeometry().height()
# W = self.frameGeometry().width()
# The input of the network
inputs = self.snake.vision_as_array
# [List of ] sizes of the layers
layer_units = self.snake.network.layer_units
# Output from a neural network
pred_node = np.argmax( self.snake.network._forward_prop(inputs) )
# vertical space among units, and radius size of a unit
input_units = inputs.shape[0]
vertical_space, radius = (4, 6) if input_units > 32 else (5, 8)
# Margins and offsets
left_margin = 15
h_offset = left_margin
# Draw layers and their units
for layer, n_units in enumerate(layer_units):
# Vertical offset - for plotting
v_offset = (H - ((2*radius + vertical_space) * n_units) )/2
layer_output = None
weights = None
if layer > 0:
# Output of each layer
layer_output = self.snake.network.layers[layer].A
# Weights matrix of each layer
weights = self.snake.network.layers[layer].W
prev_n_units = weights.shape[1]
curr_n_units = weights.shape[0]
for curr_unit in range(n_units):
_x = h_offset
_y = curr_unit * (radius*2 + vertical_space) + v_offset
t = (layer, curr_unit)
if t not in self.neuron_xy:
self.neuron_xy[t] = (_x, _y + radius)
# Input layer
if layer == 0:
# If the node is fed, it's green, else it's gray
painter.setBrush(QtGui.QBrush(Qt.green if inputs[curr_unit, 0] > 0 else Qt.gray))
# Hidden layers
if (layer > 0) and (layer < len(layer_units) - 1):
painter.setBrush(QtGui.QBrush(Qt.cyan if layer_output[curr_unit, 0] > 0. else Qt.gray))
# Output layer
if layer == len(layer_units) - 1:
text = ('Up', 'Down', 'Left', 'Right')[curr_unit]
painter.setPen(QtGui.QPen(Qt.red if curr_unit == pred_node else Qt.black))
painter.drawText(h_offset + 30, curr_unit * (radius*2 + vertical_space) + v_offset + 1.5*radius, text)
painter.setBrush(QtGui.QBrush(Qt.green if curr_unit == pred_node else Qt.gray))
# draw the nodes as circles
painter.drawEllipse(_x, _y, radius*2, radius*2)
# draw lines connecting the nodes
if layer > 0:
for prev_unit in range(prev_n_units):
line_color = Qt.blue if weights[curr_unit, prev_unit] > 0 else Qt.gray
painter.setPen(QtGui.QPen(line_color))
# Locations of the nodes
start = self.neuron_xy[(layer-1, prev_unit)]
end = self.neuron_xy[(layer, curr_unit)]
# Offset start[0] by diameter of circle so that the line starts on the right of the circle
painter.drawLine(start[0] + radius*2, start[1], end[0], end[1])
# End - unit nodes
# Add distance between layers
h_offset += 100
# End - layer
# End - def
# End - class
| 37.89372 | 229 | 0.574579 |
794973c536de7b04033c7de1773de2084c3d0906
| 1,268 |
py
|
Python
|
app/api/migrations/0002_project.py
|
nabaz/projecttracker
|
c6b326592f7a6925b2fbc0924350dd0951beca0f
|
[
"MIT"
] | null | null | null |
app/api/migrations/0002_project.py
|
nabaz/projecttracker
|
c6b326592f7a6925b2fbc0924350dd0951beca0f
|
[
"MIT"
] | null | null | null |
app/api/migrations/0002_project.py
|
nabaz/projecttracker
|
c6b326592f7a6925b2fbc0924350dd0951beca0f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-02-07 20:08
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(editable=False)),
('updated_at', models.DateTimeField()),
('name', models.CharField(max_length=100)),
('description', models.TextField()),
('start_date', models.DateField(default=None)),
('end_date', models.DateField(default=None)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Customer')),
('user', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| 35.222222 | 114 | 0.593849 |
794974dff0b2faff6e6a8fa361bdaf77c7e3a81e
| 647 |
py
|
Python
|
utilities/embed.py
|
GryPr/Vireo
|
bcdd5eafcfa548b300ce334005e8517b01e322c0
|
[
"MIT"
] | null | null | null |
utilities/embed.py
|
GryPr/Vireo
|
bcdd5eafcfa548b300ce334005e8517b01e322c0
|
[
"MIT"
] | 8 |
2022-03-28T16:15:46.000Z
|
2022-03-31T01:31:29.000Z
|
utilities/embed.py
|
GryPr/Vireo
|
bcdd5eafcfa548b300ce334005e8517b01e322c0
|
[
"MIT"
] | null | null | null |
import disnake
from disnake import Embed
def generic_embed(title: str, description: str, author: str) -> Embed:
embed = disnake.Embed(color=0x09a2e3)
embed.title = title
embed.description = description
embed.set_footer(text=f"Requested by {author}")
return embed
def wip_embed() -> Embed:
embed = disnake.Embed(title="You're early!",
description=f"Command has not been implemented yet!")
return embed
def error_embed(error: str, title: str = "Error has occurred") -> Embed:
embed = disnake.Embed(color=0xff0000)
embed.title = title
embed.description = error
return embed
| 26.958333 | 79 | 0.676971 |
7949757b5ab73ee99343f3324f0b5018d87547ed
| 2,127 |
py
|
Python
|
pub_data_visualization/load/plot/subplot/forecasting_error.py
|
cre-dev/pub-data-visualization
|
229bb7a543684be2cb06935299345ce3263da946
|
[
"MIT"
] | 8 |
2021-04-29T11:46:18.000Z
|
2022-02-21T16:07:14.000Z
|
pub_data_visualization/load/plot/subplot/forecasting_error.py
|
cre-dev/pub-data-visualization
|
229bb7a543684be2cb06935299345ce3263da946
|
[
"MIT"
] | 3 |
2021-03-15T14:26:43.000Z
|
2021-12-02T15:27:49.000Z
|
pub_data_visualization/load/plot/subplot/forecasting_error.py
|
cre-dev/pub-data-visualization
|
229bb7a543684be2cb06935299345ce3263da946
|
[
"MIT"
] | null | null | null |
import matplotlib as mpl
#
from .... import global_var
def forecasting_error(ax,
df,
load_unit = None,
load_observation_nature = None,
load_forecast_nature = None,
**kwargs
):
"""
Draws in a subplot the forecasting error.
:param ax: The ax to fill
:param df: The load data
:param load_observation_nature: The nature of the observation data to plot
:param load_forecast_nature: The nature of the forecasts to plot
:param kwargs: additional parameter for the plt.plot function
:type ax: matplotlib.axes._subplots.AxesSubplot
:type df: pd.DataFrame
:type load_observation_nature: string
:type load_forecast_nature: string
:type kwargs: dict
:return: None
:rtype: None
"""
forecasting_error = ( df.loc[df[global_var.load_nature] == load_observation_nature][load_unit]
- df.loc[df[global_var.load_nature] == load_forecast_nature][load_unit]
)
forecasting_error = forecasting_error.squeeze().dropna()
ax.plot(forecasting_error.index,
forecasting_error,
**kwargs,
)
ax.fill_between(
forecasting_error.index,
forecasting_error,
where = (forecasting_error) > 0,
label = 'Positive errors',
color = mpl.colors.cnames['deepskyblue'],
)
ax.fill_between(
forecasting_error.index,
forecasting_error,
where = (forecasting_error) < 0,
label = 'Negative errors',
color = mpl.colors.cnames['firebrick'],
)
ax.plot(
[forecasting_error.index.min(),
forecasting_error.index.max(),
],
[0,0],
color = 'k',
ls = ':',
)
| 32.227273 | 99 | 0.504937 |
7949758aeafa913995611bc6e55343d78f875543
| 75,345 |
py
|
Python
|
test/fx_acc/test_acc_tracer.py
|
you74674/pytorch
|
06838ce8b16b2cc2f9e903f3ebdd46659a0e66bb
|
[
"Intel"
] | 1 |
2022-01-20T03:49:23.000Z
|
2022-01-20T03:49:23.000Z
|
test/fx_acc/test_acc_tracer.py
|
you74674/pytorch
|
06838ce8b16b2cc2f9e903f3ebdd46659a0e66bb
|
[
"Intel"
] | 14 |
2021-10-14T06:58:50.000Z
|
2021-12-17T11:51:07.000Z
|
test/fx_acc/test_acc_tracer.py
|
you74674/pytorch
|
06838ce8b16b2cc2f9e903f3ebdd46659a0e66bb
|
[
"Intel"
] | null | null | null |
# Owner(s): ["oncall: fx"]
import unittest
from typing import Callable, List
import numpy as np
import torch
import torch.fx.experimental.fx_acc.acc_normalizer as acc_normalizer
import torch.fx.experimental.fx_acc.acc_ops as acc_ops
import torch.fx.experimental.fx_acc.acc_tracer as acc_tracer
import torch.fx.experimental.fx_acc.acc_utils as acc_utils
import torch.nn as nn
import torchvision
from parameterized import parameterized, param
torch.manual_seed(0)
class AccTracerTest(unittest.TestCase):
def _make_model_unit_test(
self,
model,
*args,
input_shape=None,
enable_allclose=False,
**kwargs,
):
"""
Test that the model can be traced correctly and is producing correct
result.
"""
if input_shape is None:
input_shape = [1, 3, 224, 224]
input = torch.randn(input_shape)
traced = acc_tracer.trace(model, [input])
if enable_allclose:
torch.testing.assert_allclose(model(input), traced(input))
else:
self.assertTrue(torch.equal(model(input), traced(input)))
def _make_acc_op_function_test(
self,
acc_op: Callable,
torch_op,
*args,
input_shape=(2, 3),
validate_same_kwargs=True,
enable_allclose=False,
**kwargs,
):
"""
Test that acc_op is traced somewhat.
"""
class TestModule(torch.nn.Module):
def __init__(self, torch_op, args, kwargs):
super().__init__()
self._torch_op = torch_op
self._args = args
self._kwargs = kwargs
def forward(self, a: torch.Tensor) -> torch.Tensor:
return self._torch_op(a, *self._args, **self._kwargs)
m = TestModule(torch_op, args, kwargs)
a = torch.randn(*input_shape)
traced = acc_tracer.trace(m, [a])
ph_a = acc_op_node = None
for node in traced.graph.nodes:
if node.op == "placeholder":
if str(node.target) == "a":
ph_a = node
elif node.op == "call_function":
self.assertEqual(node.target, acc_op)
self.assertEqual(node.kwargs["input"], ph_a)
if validate_same_kwargs:
for key, value in kwargs.items():
self.assertEqual(node.kwargs[key], value)
acc_op_node = node
elif node.op == "output":
if acc_op is None:
# If we expect no new acc_op after graph building
# and found we have only output in traced graph
continue
self.assertEqual(acc_op_node, node.args[0])
else:
self.fail(f"Unexpected node: {node.format_node()}")
ref_outputs = m(a)
outputs = traced(a)
if isinstance(ref_outputs, torch.Tensor):
ref_outputs = [ref_outputs]
outputs = [outputs]
for ref_output, output in zip(ref_outputs, outputs):
if enable_allclose:
torch.testing.assert_allclose(
torch.nan_to_num(ref_output), torch.nan_to_num(output)
)
else:
self.assertTrue(
torch.equal(torch.nan_to_num(ref_output), torch.nan_to_num(output))
)
def test_sum(self):
self._make_acc_op_function_test(acc_ops.sum, torch.sum)
self._make_acc_op_function_test(acc_ops.sum, torch.sum, dim=(1,), keepdim=True)
def test_mean(self):
self._make_acc_op_function_test(acc_ops.mean, torch.mean)
self._make_acc_op_function_test(acc_ops.mean, torch.mean, dim=(1,), keepdim=True)
def test_pad(self):
self._make_acc_op_function_test(acc_ops.pad, torch.nn.functional.pad, pad=(2, 0))
def test_max(self):
def torch_max(x, *args, **kwargs):
return x.max(*args, **kwargs)
self._make_acc_op_function_test(acc_ops.max_full_reduce, torch_max)
self._make_acc_op_function_test(
acc_ops.max_dim_reduce, torch_max, dim=1, keepdim=True
)
self._make_acc_op_function_test(
acc_ops.max_dim_reduce, torch_max, input_shape=(1, 4), dim=1, keepdim=True
)
self._make_acc_op_function_test(
acc_ops.max_dim_reduce, torch_max, input_shape=(3, 4, 3), dim=2
)
@parameterized.expand(
[
param("max_maximum", orig_op=torch.max, expected_op=acc_ops.maximum),
param(
"maximum_maximum", orig_op=torch.maximum, expected_op=acc_ops.maximum
),
param("min_minimum", orig_op=torch.min, expected_op=acc_ops.minimum),
param(
"minimum_minimum", orig_op=torch.minimum, expected_op=acc_ops.minimum
),
]
)
def test_maximum_minimum(self, _: str, orig_op, expected_op):
class TestModule(torch.nn.Module):
def __init__(self, orig_op):
super().__init__()
self.orig_op = orig_op
def forward(self, input: torch.Tensor, other: torch.Tensor) -> torch.Tensor:
return self.orig_op(input, other)
m = TestModule(orig_op)
input, other = torch.randn(2, 2), torch.randn(2, 2)
traced = acc_tracer.trace(m, [input, other])
ph_in = ph_oth = mxm = None
for node in traced.graph.nodes:
if node.op == "placeholder":
if str(node.target) == "other":
ph_oth = node
else:
self.assertTrue(str(node.target) == "input")
ph_in = node
elif node.op == "call_function":
if node.target == expected_op:
self.assertEqual(node.kwargs["input"], ph_in)
self.assertEqual(node.kwargs["other"], ph_oth)
mxm = node
elif node.op == "output":
self.assertEqual(mxm, node.args[0])
else:
self.fail(f"Unexpected node: {node.format_node()}")
self.assertTrue(torch.equal(m(input, other), traced(input, other)))
def test_conv(self):
"""
Test that a conv is traced as expected.
"""
class TestModule(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(8, 7, 3, stride=2)
def forward(self, a: torch.Tensor) -> torch.Tensor:
return self.conv(a)
m = TestModule()
input = torch.randn(3, 8, 10, 10)
traced = acc_tracer.trace(m, [input])
ph = weight_attr = bias_attr = conv = None
for node in traced.graph.nodes:
if node.op == "placeholder":
self.assertEqual(str(node.target), "a")
ph = node
elif node.op == "get_attr" and node.target == "conv.weight":
weight_attr = node
elif node.op == "get_attr" and node.target == "conv.bias":
bias_attr = node
elif node.op == "call_function":
self.assertEqual(node.target, acc_ops.conv2d)
self.assertEqual(node.kwargs["input"], ph)
self.assertEqual(node.kwargs["weight"], weight_attr)
self.assertEqual(node.kwargs["bias"], bias_attr)
self.assertEqual(node.kwargs["stride"], (2, 2))
self.assertEqual(node.kwargs["padding"], (0, 0))
self.assertEqual(node.kwargs["dilation"], (1, 1))
self.assertEqual(node.kwargs["groups"], 1)
conv = node
elif node.op == "output":
self.assertEqual(conv, node.args[0])
else:
self.fail(f"Unexpected node: {node.format_node()}")
self.assertTrue(torch.equal(m(input), traced(input)))
def test_quantized_conv2d(self):
class TestModule(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.quantized.Conv2d(3, 3, 1)
def forward(self, a: torch.Tensor) -> torch.Tensor:
return self.conv(a)
m = TestModule()
input = torch.quantize_per_tensor(
torch.randn(1, 3, 1, 1), scale=0.01, zero_point=3, dtype=torch.quint8
)
traced = acc_tracer.trace(m, [input])
print(traced.graph)
ph = weight_attr = bias_attr = conv = None
for node in traced.graph.nodes:
if node.op == "placeholder":
self.assertEqual(str(node.target), "a")
ph = node
elif node.op == "get_attr" and node.target == "conv_weight":
weight_attr = node
elif node.op == "get_attr" and node.target == "conv_bias":
bias_attr = node
elif node.op == "call_function":
self.assertEqual(node.target, acc_ops.quantized_conv2d)
self.assertEqual(node.kwargs["input"], ph)
self.assertEqual(node.kwargs["weight"], weight_attr)
self.assertEqual(node.kwargs["bias"], bias_attr)
conv = node
elif node.op == "output":
self.assertEqual(conv, node.args[0])
else:
self.fail(f"Unexpected node: {node.format_node()}")
self.assertTrue(torch.equal(m(input), traced(input)))
def test_quantized_convrelu2d(self):
class TestModule(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.intrinsic.quantized.ConvReLU2d(3, 3, 1)
def forward(self, a: torch.Tensor) -> torch.Tensor:
return self.conv(a)
m = TestModule()
input = torch.quantize_per_tensor(
torch.randn(1, 3, 1, 1), scale=0.01, zero_point=3, dtype=torch.quint8
)
traced = acc_tracer.trace(m, [input])
ph = weight_attr = bias_attr = conv = relu = None
for node in traced.graph.nodes:
if node.op == "placeholder":
self.assertEqual(str(node.target), "a")
ph = node
elif node.op == "get_attr" and node.target == "conv_weight":
weight_attr = node
elif node.op == "get_attr" and node.target == "conv_bias":
bias_attr = node
elif node.op == "call_function" and node.target == acc_ops.quantized_conv2d:
self.assertEqual(node.target, acc_ops.quantized_conv2d)
self.assertEqual(node.kwargs["input"], ph)
self.assertEqual(node.kwargs["weight"], weight_attr)
self.assertEqual(node.kwargs["bias"], bias_attr)
conv = node
elif node.op == "call_function" and node.target == acc_ops.relu:
self.assertEqual(node.target, acc_ops.relu)
self.assertEqual(node.kwargs["input"], conv)
relu = node
elif node.op == "output":
self.assertEqual(relu, node.args[0])
else:
self.fail(f"Unexpected node: {node.format_node()}")
self.assertTrue(torch.equal(m(input), traced(input)))
def test_embedding_bag(self):
"""
Test that an embedding_bag is traced as expected.
"""
class TestModule(nn.Module):
def __init__(self):
super().__init__()
self.eb = nn.EmbeddingBag(10, 3, mode="sum", include_last_offset=True)
def forward(self, inp: torch.Tensor, offsets: torch.Tensor) -> torch.Tensor:
return self.eb(inp, offsets)
m = TestModule()
inp = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
traced = acc_tracer.trace(m, [inp, offsets])
inp_node = offsets_node = weight_attr = eb_node = None
for node in traced.graph.nodes:
if node.op == "placeholder":
if str(node.target) == "inp":
inp_node = node
elif str(node.target) == "offsets":
offsets_node = node
else:
self.fail(f"Unexpected placeholder {node.target}.")
continue
elif node.op == "get_attr" and node.target == "eb.weight":
weight_attr = node
elif node.op == "call_function":
self.assertEqual(node.target, acc_ops.embedding_bag)
# Note: Normalization called from acc_tracer means we use all kwargs.
self.assertEqual(node.kwargs["input"], inp_node)
self.assertEqual(node.kwargs["offsets"], offsets_node)
self.assertEqual(node.kwargs["weight"], weight_attr)
self.assertEqual(node.kwargs["mode"], "sum")
self.assertEqual(node.kwargs["include_last_offset"], True)
# The rest of these were unspecified, so verify they fell back
# to their respective default values thanks to normalization.
self.assertEqual(node.kwargs["max_norm"], None)
self.assertEqual(node.kwargs["norm_type"], 2.0)
self.assertEqual(node.kwargs["scale_grad_by_freq"], False)
self.assertEqual(node.kwargs["sparse"], False)
self.assertEqual(node.kwargs["per_sample_weights"], None)
eb_node = node
elif node.op == "output":
self.assertEqual(eb_node, node.args[0])
self.assertTrue(torch.equal(m(inp, offsets), traced(inp, offsets)))
def test_embedding_bag_byte_and_4bit_rowwise_offsets(self):
"""
Test that 4 bit quantized embedding_bag is traced as expected.
"""
class TestModule(nn.Module):
def __init__(
self,
op,
q_weights,
per_index_weights,
):
super().__init__()
self.emb = op
self.q_weights = q_weights
self.per_index_weights = per_index_weights
def forward(
self,
indices,
offsets,
):
return self.emb(
self.q_weights,
indices,
offsets,
mode=0,
per_sample_weights=self.per_index_weights,
include_last_offset=True,
)
def run_embedding_bag_test(is_4bit, use_weights):
# generate random indices, offsets, and weights.
num_embeddings = 16
embedding_dim = 32
num_lengths = 10
weights = torch.from_numpy(
(np.random.random_sample((num_embeddings, embedding_dim)) + 1).astype(
np.float32
)
)
q_weights = (
torch.ops.quantized.embedding_bag_4bit_prepack(weights)
if is_4bit
else torch.ops.quantized.embedding_bag_byte_prepack(weights)
)
np_lengths = np.random.randint(0, num_lengths, size=10).astype(np.int32)
num_lengths = np.sum(np_lengths)
indices = torch.from_numpy(
np.random.randint(low=0, high=num_embeddings, size=num_lengths)
).int()
lengths = torch.from_numpy(np_lengths)
offsets = torch.cat([torch.zeros([1]), torch.cumsum(lengths, 0)]).int()
weights = torch.randint(low=0, high=4, size=indices.size())
per_sample_weights = weights.to(torch.float32)
indices = indices.to(torch.int32)
offsets = offsets.to(torch.int32)
inputs = [
indices,
offsets,
]
op = (
torch.ops.quantized.embedding_bag_4bit_rowwise_offsets
if is_4bit
else torch.ops.quantized.embedding_bag_byte_rowwise_offsets
)
m = TestModule(
op,
q_weights,
per_sample_weights,
)
traced = acc_tracer.trace(m, inputs)
print(traced.graph)
expected_target = (
acc_ops.embedding_bag_4bit_rowwise_offsets
if is_4bit
else acc_ops.embedding_bag_byte_rowwise_offsets
)
for node in traced.graph.nodes:
if node.op == "placeholder":
if str(node.target) == "indices":
inp_node = node
elif str(node.target) == "offsets":
offsets_node = node
else:
self.fail(f"Unexpected placeholder {node.target}.")
continue
elif node.op == "get_attr" and node.target == "q_weights":
weight_attr = node
elif node.op == "call_function":
self.assertEqual(node.target, expected_target)
# Note: Normalization called from acc_tracer means we use all kwargs.
self.assertEqual(node.kwargs["indices"], inp_node)
self.assertEqual(node.kwargs["offsets"], offsets_node)
self.assertEqual(node.kwargs["weight"], weight_attr)
self.assertEqual(node.kwargs["mode"], 0)
self.assertEqual(node.kwargs["include_last_offset"], True)
# The rest of these were unspecified, so verify they fell back
# to their respective default values thanks to normalization.
eb_node = node
elif node.op == "output":
self.assertEqual(eb_node, node.args[0])
self.assertTrue(torch.equal(m(indices, offsets), traced(indices, offsets)))
# test 8-bit
run_embedding_bag_test(is_4bit=False, use_weights=True)
# test 4-bit
run_embedding_bag_test(is_4bit=True, use_weights=True)
def test_quantized_batch_norm2d(self):
class TestModule(nn.Module):
def __init__(self):
super().__init__()
self.bn = nn.quantized.BatchNorm2d(3)
def forward(self, a: torch.Tensor) -> torch.Tensor:
return self.bn(a)
m = TestModule()
m.eval()
input = torch.quantize_per_tensor(
torch.randn(1, 3, 1, 1), scale=0.01, zero_point=3, dtype=torch.quint8
)
traced = acc_tracer.trace(m, [input])
ph = weight_attr = bias_attr = bn_mean = bn_var = bn = None
for node in traced.graph.nodes:
if node.op == "placeholder":
self.assertEqual(str(node.target), "a")
ph = node
elif node.op == "get_attr" and node.target == "bn.weight":
weight_attr = node
elif node.op == "get_attr" and node.target == "bn.bias":
bias_attr = node
elif node.op == "get_attr" and node.target == "bn.running_mean":
bn_mean = node
elif node.op == "get_attr" and node.target == "bn.running_var":
bn_var = node
elif node.op == "get_attr" and node.target == "bn.scale":
bn_scale = node
elif node.op == "get_attr" and node.target == "bn.zero_point":
bn_zero_point = node
elif node.op == "call_function":
self.assertEqual(node.target, acc_ops.quantized_batch_norm2d)
self.assertEqual(node.kwargs["input"], ph)
self.assertEqual(node.kwargs["weight"], weight_attr)
self.assertEqual(node.kwargs["bias"], bias_attr)
self.assertEqual(node.kwargs["running_mean"], bn_mean)
self.assertEqual(node.kwargs["running_var"], bn_var)
self.assertEqual(node.kwargs["acc_out_ty"][6]["scale"], bn_scale)
self.assertEqual(node.kwargs["acc_out_ty"][6]["zero_point"], bn_zero_point)
bn = node
elif node.op == "output":
self.assertEqual(bn, node.args[0])
else:
self.fail(f"Unexpected node: {node.format_node()}")
self.assertTrue(torch.equal(m(input), traced(input)))
def test_linear(self):
"""
Test that a linear is traced as expected, i.e. to the functional level and with
kwarg normalization. Also verify that symbolic shape inference worked as part of
the acc_tracer.
"""
class TestModule(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(3, 5, bias=True)
def forward(self, a: torch.Tensor) -> torch.Tensor:
return self.linear(a)
m = TestModule()
test_input = torch.randn(1, 3)
traced = acc_tracer.trace(m, test_input)
ph = weight_attr = bias_attr = linear = None
for node in traced.graph.nodes:
if node.op == "placeholder":
self.assertEqual(str(node.target), "a")
ph = node
elif node.op == "get_attr" and node.target == "linear.weight":
weight_attr = node
elif node.op == "get_attr" and node.target == "linear.bias":
bias_attr = node
elif node.op == "call_function":
self.assertEqual(node.target, acc_ops.linear)
self.assertEqual(node.kwargs["input"], ph)
self.assertEqual(node.kwargs["weight"], weight_attr)
self.assertEqual(node.kwargs["bias"], bias_attr)
linear = node
elif node.op == "output":
self.assertEqual(linear, node.args[0])
else:
self.fail(f"Unexpected node: {node.format_node()}")
self.assertTrue(torch.equal(m(test_input), traced(test_input)))
def test_quantized_linear(self):
class TestModule(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.quantized.Linear(3, 5)
def forward(self, a: torch.Tensor) -> torch.Tensor:
return self.linear(a)
m = TestModule()
input = torch.quantize_per_tensor(
torch.randn(2, 3), scale=0.01, zero_point=3, dtype=torch.quint8
)
traced = acc_tracer.trace(m, [input])
ph = weight_attr = bias_attr = linear = None
for node in traced.graph.nodes:
if node.op == "placeholder":
self.assertEqual(str(node.target), "a")
ph = node
elif node.op == "get_attr" and node.target == "linear_weight":
weight_attr = node
elif node.op == "get_attr" and node.target == "linear_bias":
bias_attr = node
elif node.op == "call_function":
self.assertEqual(node.target, acc_ops.quantized_linear)
self.assertEqual(node.kwargs["input"], ph)
self.assertEqual(node.kwargs["weight"], weight_attr)
self.assertEqual(node.kwargs["bias"], bias_attr)
linear = node
elif node.op == "output":
self.assertEqual(linear, node.args[0])
else:
self.fail(f"Unexpected node: {node.format_node()}")
self.assertTrue(torch.equal(m(input), traced(input)))
@parameterized.expand(
[
param("remove_exceptions_false", remove_exceptions=False),
param("remove_exceptions_true", remove_exceptions=True),
]
)
def test_batch_norm(self, _, remove_exceptions):
"""
Test that a batch norm is traced as expected, i.e. to the functional level
and with kwarg normalization. Note that we also expect to see a
ConditionalExceptionWrapper in the graph that the AST rewriter converted
from `if x: raise y`.
"""
class TestModule(nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(2)
def forward(self, a: torch.Tensor) -> torch.Tensor:
return self.bn(a)
m = TestModule()
input = torch.randn(2, 2, 1, 1)
# Note: Explicitly not removing exceptions so that we can check they
# were found and exist below.
traced = acc_tracer.trace(
m,
[input],
remove_exceptions=remove_exceptions,
)
ph = exception_wrapper = weight = bias = mean = var = bn = None
for node in traced.graph.nodes:
if node.op == "placeholder":
self.assertEqual(str(node.target), "a")
ph = node
elif node.op == "get_attr" and node.target == "bn.weight":
weight = node
elif node.op == "get_attr" and node.target == "bn.bias":
bias = node
elif node.op == "get_attr" and node.target == "bn.running_mean":
mean = node
elif node.op == "get_attr" and node.target == "bn.running_var":
var = node
elif node.op == "call_function" and node.target == acc_ops.batch_norm:
# Note: Normalization called from acc_tracer means we use
# all kwargs.
self.assertEqual(node.kwargs["input"], ph)
self.assertEqual(node.kwargs["weight"], weight)
self.assertEqual(node.kwargs["bias"], bias)
self.assertEqual(node.kwargs["running_mean"], mean)
self.assertEqual(node.kwargs["running_var"], var)
bn = node
elif (
node.op == "call_module"
and node.target == "bn._conditional_exception_wrapper_ValueError"
):
exception_wrapper = node
elif node.op == "output":
self.assertEqual(bn, node.args[0])
self.assertTrue(remove_exceptions or exception_wrapper is not None)
self.assertTrue(torch.equal(m(input), traced(input)))
def test_remove_asserts(self):
"""
Test that a Module with asserts has the asserts automatically removed, as
well as calls to a class method that should be dead.
"""
class TestModule(nn.Module):
def __init__(self):
super().__init__()
def _test_method(self, a):
return a
def forward(self, a: torch.Tensor) -> torch.Tensor:
assert torch.equal(self._test_method(a), a)
return a
m = TestModule()
input = torch.randn(10)
traced = acc_tracer.trace(m, [input], ast_rewriter_allow_list={TestModule})
# Check we have no call_functions. If remove asserts didn't work
# correctly we would see a call to torch._assert, _test_method, and
# torch.equal.
for node in traced.graph.nodes:
self.assertFalse(node.op == "call_function")
self.assertTrue(torch.equal(m(input), traced(input)))
def test_sequential(self):
"""
Test that the tracer works for torch.nn.Sequential.
"""
class TestModule(nn.Module):
def __init__(self):
super().__init__()
self.model = nn.Sequential(nn.Sigmoid(), nn.ReLU())
def forward(self, a: torch.Tensor) -> torch.Tensor:
return self.model(a)
m = TestModule()
input = torch.randn(10)
traced = acc_tracer.trace(m, [input])
for node in traced.graph.nodes:
if node.op == "call_function":
is_sigmoid = node.target == acc_ops.sigmoid
is_relu = node.target == acc_ops.relu
self.assertTrue(is_sigmoid or is_relu)
else:
self.assertTrue(node.op == "placeholder" or node.op == "output")
self.assertTrue(torch.equal(m(input), traced(input)))
def test_unsqueeze(self):
"""
Test that torch.unsqueeze is traced correctly.
"""
self._make_acc_op_function_test(
acc_ops.unsqueeze,
torch.unsqueeze,
validate_same_kwargs=False,
dim=1,
)
def test_stack(self):
"""
Test that torch.stack is traced correctly.
"""
class TestModule(torch.nn.Module):
def forward(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return torch.stack((a, b), dim=1)
a, b = torch.randn(4, 5, 6), torch.randn(4, 5, 6)
mod = TestModule()
traced = acc_tracer.trace(mod, [a, b])
self.assertTrue(torch.equal(mod(a, b), traced(a, b)))
ph_a = ph_b = unsqueeze_a = unsqueeze_b = cat_node = None
for node in traced.graph.nodes:
if node.op == "placeholder":
if str(node.target) == "a":
ph_a = node
else:
self.assertTrue(str(node.target) == "b")
ph_b = node
elif node.op == "call_function":
if node.target == acc_ops.unsqueeze:
if node.kwargs["input"] is ph_a:
unsqueeze_a = node
else:
self.assertEqual(node.kwargs["input"], ph_b)
unsqueeze_b = node
else:
self.assertEqual(node.target, acc_ops.cat)
self.assertEqual(node.kwargs["tensors"], [unsqueeze_a, unsqueeze_b])
cat_node = node
elif node.op == "output":
self.assertEqual(cat_node, node.args[0])
else:
self.fail(f"Unexpected node: {node.format_node()}")
def test_no_raise(self):
"""
self that we can trace `if x: raise y(msg)` when the raise isn't executed.
"""
class TestModule(nn.Module):
def __init__(self):
super().__init__()
def forward(self, a, b):
if torch.equal(a, b):
raise AssertionError("a equaled b!")
return a
m = TestModule()
in_a, in_b = torch.randn(5), torch.randn(5)
traced = acc_tracer.trace(
m,
[in_a, in_b],
remove_exceptions=False,
use_acc_normalization=False,
ast_rewriter_allow_list={TestModule},
)
# Verify the structure of the graph, including the existence of the
# exception_wrapper.
ph_a = exception_wrapper = None
for node in traced.graph.nodes:
if node.op == "placeholder":
if str(node.target) == "a":
ph_a = node
else:
self.assertTrue(str(node.target) == "b")
elif node.op == "call_module":
self.assertEqual(
node.target, "_conditional_exception_wrapper_AssertionError"
)
exception_wrapper = node
elif node.op == "output":
self.assertEqual(ph_a, node.args[0])
self.assertTrue(exception_wrapper is not None)
self.assertTrue(torch.equal(m(in_a, in_b), traced(in_a, in_b)))
def test_yes_raise(self):
"""
Test that we can trace `if x: raise y(msg)` when the raise is executed.
"""
err_str = "a equaled b!"
class TestModule(nn.Module):
def __init__(self):
super().__init__()
self.err_str = err_str
def forward(self, a, b):
if torch.equal(a, b):
raise RuntimeError(self.err_str)
return a
m = TestModule()
# Note: We must use different inputs here in order for shape_prop to work, as
# otherwise the exception is thrown (as expected/checked below).
in_a, in_b = torch.randn(5), torch.randn(5)
traced = acc_tracer.trace(
m,
[in_a, in_b],
remove_exceptions=False,
ast_rewriter_allow_list={TestModule},
)
# Verify the structure of the graph, including the existence of the
# exception_wrapper.
ph_a = exception_wrapper = None
for node in traced.graph.nodes:
if node.op == "placeholder":
if str(node.target) == "a":
ph_a = node
else:
self.assertTrue(str(node.target) == "b")
elif node.op == "call_module":
self.assertEqual(
node.target, "_conditional_exception_wrapper_RuntimeError"
)
exception_wrapper = node
elif node.op == "output":
self.assertEqual(ph_a, node.args[0])
self.assertTrue(exception_wrapper is not None)
def test(mod):
try:
# Note: Use the same input here to ensure the exception is thrown.
mod(in_a, in_a)
self.fail("Shouldn't get here because exception should be thrown.")
except RuntimeError as e:
self.assertEqual(err_str, str(e))
test(m)
test(traced)
def test_remove_raise(self):
"""
Test that we can trace `if x: raise y(msg)` and then remove the exception_wrapper.
"""
class TestModule(nn.Module):
def __init__(self):
super().__init__()
def forward(self, a, b):
if torch.equal(a, b):
raise AssertionError("a equaled b!")
return a
m = TestModule()
in_a, in_b = torch.randn(5), torch.randn(5)
traced = acc_tracer.trace(
m,
[in_a, in_b],
remove_exceptions=True,
ast_rewriter_allow_list={TestModule},
)
# Verify the structure of the graph, including the existence of the
# exception_wrapper.
ph_a = None
for node in traced.graph.nodes:
if node.op == "placeholder":
if str(node.target) == "a":
ph_a = node
else:
self.assertTrue(str(node.target) == "b")
elif node.op == "output":
self.assertEqual(ph_a, node.args[0])
else:
# Should not encounter any call_modules, e.g. to the
# exception_wrapper.
self.assertFalse(node.op == "call_module")
# Note: Using input in_a twice for the tracer version, which would
# trigger the raise if it was still there.
self.assertTrue(torch.equal(m(in_a, in_b), traced(in_a, in_a)))
def test_raise_no_message(self):
"""
Test that we can trace `if x: raise y` when `y` has no message.
"""
class TestModule(nn.Module):
def __init__(self):
super().__init__()
def forward(self, a, b):
if torch.equal(a, b):
raise AssertionError
return a
m = TestModule()
in_a, in_b = torch.randn(5), torch.randn(5)
traced = acc_tracer.trace(
m,
[in_a, in_b],
remove_exceptions=False,
use_acc_normalization=False,
ast_rewriter_allow_list={TestModule},
)
# Verify the structure of the graph, including the existence of the
# exception_wrapper.
ph_a = exception_wrapper = None
for node in traced.graph.nodes:
if node.op == "placeholder":
if str(node.target) == "a":
ph_a = node
else:
self.assertTrue(str(node.target) == "b")
elif node.op == "call_module":
self.assertEqual(
node.target, "_conditional_exception_wrapper_AssertionError"
)
exception_wrapper = node
elif node.op == "output":
self.assertEqual(ph_a, node.args[0])
self.assertTrue(exception_wrapper is not None)
self.assertTrue(torch.equal(m(in_a, in_b), traced(in_a, in_b)))
def test_quantized_add(self):
"""
Test that a quantized_add and acc_ops.quantize_per_tensor are traced as expected,
verifying the acc_out_tys are set as expected.
"""
class TestModule(nn.Module):
def __init__(self):
super().__init__()
self.q_input = torch.nn.quantized.Quantize(
scale=1.0 / 128, zero_point=5, dtype=torch.quint8
)
self.q_other = torch.nn.quantized.Quantize(
scale=1.0 / 128, zero_point=10, dtype=torch.quint8
)
def forward(self, input: torch.Tensor, other: torch.Tensor) -> torch.Tensor:
return torch.ops.quantized.add(
self.q_input(input),
self.q_other(other),
scale=0.05,
zero_point=1,
)
m = TestModule()
input, other = torch.randn(2, 3, 4), torch.randn(2, 3, 4)
traced = acc_tracer.trace(m, [input, other])
input_ph = other_ph = q_input = q_other = q_add = None
for node in traced.graph.nodes:
if node.op == "placeholder":
if str(node.target) == "input":
input_ph = node
else:
self.assertTrue(str(node.target) == "other")
other_ph = node
elif (
node.op == "call_function"
and node.target == acc_ops.quantize_per_tensor
):
qparams = {
"scale": 1.0 / 128,
"zero_point": 5,
}
expected_md = acc_utils.build_raw_tensor_meta(
dtype=torch.quint8,
qparams=qparams,
)
if node.kwargs["input"] == input_ph:
q_input = node
else:
self.assertTrue(node.kwargs["input"] == other_ph)
q_other = node
qparams_copy = qparams.copy()
qparams_copy["zero_point"] = 10
expected_md = expected_md._replace(qparams=qparams_copy)
self.assertEqual(node.kwargs["acc_out_ty"], expected_md)
elif node.op == "call_function" and node.target == acc_ops.quantized_add:
self.assertEqual(node.kwargs["input"], q_input)
self.assertEqual(node.kwargs["other"], q_other)
qparams = {
"scale": 0.05,
"zero_point": 1,
}
expected_md = acc_utils.build_raw_tensor_meta(qparams=qparams)
self.assertEqual(node.kwargs["acc_out_ty"], expected_md)
q_add = node
elif node.op == "output":
self.assertEqual(q_add, node.args[0])
else:
self.fail(f"Unexpected node: {node.format_node()}")
self.assertTrue(torch.equal(m(input, other), traced(input, other)))
def test_quantized_mul(self):
"""
Test that a quantized_mul and acc_ops.quantize_per_tensor are traced as expected,
verifying the acc_out_tys are set as expected.
"""
class TestModule(nn.Module):
def __init__(self):
super().__init__()
self.q_input = torch.nn.quantized.Quantize(
scale=1.0 / 128, zero_point=5, dtype=torch.quint8
)
self.q_other = torch.nn.quantized.Quantize(
scale=1.0 / 128, zero_point=10, dtype=torch.quint8
)
def forward(self, input: torch.Tensor, other: torch.Tensor) -> torch.Tensor:
return torch.ops.quantized.mul(
self.q_input(input),
self.q_other(other),
scale=0.05,
zero_point=1,
)
m = TestModule()
input, other = torch.randn(2, 3, 4), torch.randn(2, 3, 4)
traced = acc_tracer.trace(m, [input, other])
input_ph = other_ph = q_input = q_other = q_add = None
for node in traced.graph.nodes:
if node.op == "placeholder":
if str(node.target) == "input":
input_ph = node
else:
self.assertTrue(str(node.target) == "other")
other_ph = node
elif (
node.op == "call_function"
and node.target == acc_ops.quantize_per_tensor
):
qparams = {
"scale": 1.0 / 128,
"zero_point": 5,
}
expected_md = acc_utils.build_raw_tensor_meta(
dtype=torch.quint8,
qparams=qparams,
)
if node.kwargs["input"] == input_ph:
q_input = node
else:
self.assertTrue(node.kwargs["input"] == other_ph)
q_other = node
qparams_copy = qparams.copy()
qparams_copy["zero_point"] = 10
expected_md = expected_md._replace(qparams=qparams_copy)
self.assertEqual(node.kwargs["acc_out_ty"], expected_md)
elif node.op == "call_function" and node.target == acc_ops.quantized_mul:
self.assertEqual(node.kwargs["input"], q_input)
self.assertEqual(node.kwargs["other"], q_other)
qparams = {
"scale": 0.05,
"zero_point": 1,
}
expected_md = acc_utils.build_raw_tensor_meta(qparams=qparams)
self.assertEqual(node.kwargs["acc_out_ty"], expected_md)
q_add = node
elif node.op == "output":
self.assertEqual(q_add, node.args[0])
else:
self.fail(f"Unexpected node: {node.format_node()}")
self.assertTrue(torch.equal(m(input, other), traced(input, other)))
def test_cat(self):
"""
Test that torch.cat is traced correctly.
"""
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return torch.cat([a, a, b], 0)
m = TestModule()
a, b = torch.randn(2, 2), torch.randn(2, 2)
traced = acc_tracer.trace(m, (a, b))
ph_a = ph_b = cat = None
for node in traced.graph.nodes:
if node.op == "placeholder":
if str(node.target) == "a":
ph_a = node
else:
self.assertTrue(str(node.target) == "b")
ph_b = node
elif node.op == "call_function":
self.assertEqual(node.target, acc_ops.cat)
self.assertEqual(node.kwargs["tensors"][0], ph_a)
self.assertEqual(node.kwargs["tensors"][1], ph_a)
self.assertEqual(node.kwargs["tensors"][2], ph_b)
self.assertEqual(node.kwargs["dim"], 0)
cat = node
elif node.op == "output":
self.assertEqual(cat, node.args[0])
else:
self.fail(f"Unexpected node: {node.format_node()}")
self.assertTrue(torch.equal(m(a, b), traced(a, b)))
def test_square(self):
"""
Test that torch.square is traced correctly.
"""
self._make_acc_op_function_test(acc_ops.mul, torch.square)
def test_reshape(self):
"""
Test that torch.reshape is traced correctly.
"""
self._make_acc_op_function_test(acc_ops.reshape, torch.reshape, (1, -1))
# arg = (1, -1)
self._make_acc_op_function_test(acc_ops.reshape, lambda x: x.reshape(1, -1))
# arg = ((1, -1))
self._make_acc_op_function_test(acc_ops.reshape, lambda x: x.reshape((1, -1)))
def test_transpose(self):
"""
Test that torch.transpose is traced correctly.
"""
self._make_acc_op_function_test(
acc_ops.permute, lambda x: torch.transpose(x, 1, 0)
)
def test_permute(self):
"""
Test that torch.permute is traced correctly.
"""
def torch_permute(a, *dim):
return a.permute(*dim)
self._make_acc_op_function_test(acc_ops.permute, torch_permute, 1, 0)
def test_min_full_reduce(self):
"""
Test that test_min_full_reduce is traced correctly.
"""
self._make_acc_op_function_test(acc_ops.min_full_reduce, torch.min)
def test_matmul(self):
"""
Test that torch.matmul is traced correctly.
"""
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return torch.matmul(a, b)
m = TestModule()
a, b = torch.randn(2, 2), torch.randn(2, 2)
traced = acc_tracer.trace(m, [a, b])
ph_a = ph_b = matmul = None
for node in traced.graph.nodes:
if node.op == "placeholder":
if str(node.target) == "a":
ph_a = node
else:
self.assertTrue(str(node.target) == "b")
ph_b = node
elif node.op == "call_function":
self.assertEqual(node.target, acc_ops.matmul)
self.assertEqual(node.kwargs["input"], ph_a)
self.assertEqual(node.kwargs["other"], ph_b)
matmul = node
elif node.op == "output":
self.assertEqual(matmul, node.args[0])
else:
self.fail(f"Unexpected node: {node.format_node()}")
self.assertTrue(torch.equal(m(a, b), traced(a, b)))
def test_bmm(self):
self._make_acc_op_function_test(
acc_ops.matmul, lambda x: torch.bmm(x, x), input_shape=(2, 4, 4)
)
def test_tile(self):
return self._make_acc_op_function_test(
acc_ops.tile, lambda x: torch.tile(x, (2, 1, 2)), input_shape=(1, 2)
)
def test_dropout(self):
self._make_acc_op_function_test(
None,
lambda x: nn.functional.dropout(x, training=False),
input_shape=(1, 2, 3),
)
def test_hardsigmoid(self):
self._make_acc_op_function_test(
acc_ops.hardsigmoid,
lambda x: nn.functional.hardsigmoid(x),
input_shape=(3, 4, 5),
)
def test_hardtanh(self):
self._make_acc_op_function_test(
acc_ops.hardtanh,
lambda x: nn.functional.hardtanh(x),
input_shape=(3, 4, 5),
)
def test_hardswish(self):
class TestModule(nn.Module):
def forward(self, x: torch.Tensor) -> torch.Tensor:
y = nn.functional.hardswish(x)
return y
m = TestModule()
x = torch.randn(3, 4, 5)
traced = acc_tracer.trace(m, x)
ph_x = hardsigmoid_y = res_y = None
for node in traced.graph.nodes:
if node.op == "placeholder":
ph_x = node
elif node.op == "call_function" and node.target == acc_ops.hardsigmoid:
hardsigmoid_y = node
self.assertEqual(node.kwargs["input"], ph_x)
elif node.op == "call_function" and node.target == acc_ops.mul:
res_y = node
self.assertEqual(node.kwargs["input"], hardsigmoid_y)
self.assertEqual(node.kwargs["other"], ph_x)
elif node.op == "output":
self.assertEqual(node.args[0], res_y)
else:
self.fail(f"Unexpected node: {node.format_node()}")
ref = m(x)
res = traced(x)
torch.testing.assert_allclose(ref, res)
def test_add_with_alpha(self):
"""
Test that normalization works for torch add with alpha, which requires special
normalization handling.
"""
class TestModule(nn.Module):
def __init__(self):
super().__init__()
def forward(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
a1 = torch.add(a, b)
a2 = torch.add(a, b, alpha=1.0)
a3 = torch.add(a, b, alpha=0.5)
return a1, a2, a3
m = TestModule()
input_a = torch.randn(2, 3)
input_b = torch.randn(2, 3)
traced = acc_tracer.trace(m, [input_a, input_b])
ph_a = ph_b = add_1 = add_2 = add_3 = mul = None
for node in traced.graph.nodes:
if node.op == "placeholder":
if str(node.target) == "a":
ph_a = node
elif str(node.target) == "b":
ph_b = node
else:
self.fail(f"Unexpected placeholder {node.target}.")
elif node.op == "call_function" and node.target == acc_ops.mul:
mul = node
self.assertEqual(node.kwargs["input"], ph_b)
self.assertEqual(node.kwargs["other"], 0.5)
elif node.op == "call_function" and node.target == acc_ops.add:
if add_1 is None:
add_1 = node
self.assertEqual(node.kwargs["input"], ph_a)
self.assertEqual(node.kwargs["other"], ph_b)
elif add_2 is None:
add_2 = node
self.assertEqual(node.kwargs["input"], ph_a)
self.assertEqual(node.kwargs["other"], ph_b)
elif add_3 is None:
add_3 = node
self.assertEqual(node.kwargs["input"], ph_a)
self.assertEqual(node.kwargs["other"], mul)
else:
self.fail(f"Unexpected add: {node.format_node()}")
elif node.op == "output":
self.assertEqual(node.args[0][0], add_1)
self.assertEqual(node.args[0][1], add_2)
self.assertEqual(node.args[0][2], add_3)
else:
self.fail(f"Unexpected node: {node.format_node()}")
ref = m(input_a, input_b)
res = traced(input_a, input_b)
self.assertTrue(torch.equal(ref[0], res[0]))
self.assertTrue(torch.equal(ref[1], res[1]))
self.assertTrue(torch.equal(ref[2], res[2]))
def test_leaf_module_list(self):
"""
Test leaf_module_list is working properly.
"""
class LeafModule(nn.Module):
def forward(self, x):
return x
class TestModule(nn.Module):
def __init__(self):
super().__init__()
self.mod = LeafModule()
def forward(self, x):
return self.mod(x)
x = torch.randn(1, 1)
mod = TestModule()
acc_mod = acc_tracer.trace(
mod,
[x],
leaf_module_list={LeafModule},
)
ph = leaf_module = None
for node in acc_mod.graph.nodes:
if node.op == "placeholder":
ph = node
elif node.op == "call_module":
leaf_module = node
self.assertEqual(leaf_module.target, "mod")
self.assertEqual(leaf_module.args[0], ph)
elif node.op == "output":
self.assertEqual(node.args[0], leaf_module)
else:
self.fail(f"Unexpected node: {node.format_node()}")
self.assertTrue(torch.equal(mod(x), acc_mod(x)))
def test_sign(self):
self._make_acc_op_function_test(acc_ops.sign, torch.sign)
def test_relu(self):
self._make_acc_op_function_test(acc_ops.relu, torch.relu)
def test_leaky_relu(self):
self._make_acc_op_function_test(acc_ops.leaky_relu, torch.nn.functional.leaky_relu)
def test_elu(self):
self._make_acc_op_function_test(acc_ops.elu, torch.nn.functional.elu)
def test_selu(self):
self._make_acc_op_function_test(acc_ops.selu, torch.nn.functional.selu)
def test_softsign(self):
self._make_acc_op_function_test(acc_ops.softsign, torch.nn.functional.softsign)
def test_sigmoid(self):
self._make_acc_op_function_test(acc_ops.sigmoid, torch.sigmoid)
def test_sin(self):
self._make_acc_op_function_test(acc_ops.sin, torch.sin)
def test_cos(self):
self._make_acc_op_function_test(acc_ops.cos, torch.cos)
def test_tan(self):
self._make_acc_op_function_test(acc_ops.tan, torch.tan)
def test_sinh(self):
self._make_acc_op_function_test(acc_ops.sinh, torch.sinh)
def test_cosh(self):
self._make_acc_op_function_test(acc_ops.cosh, torch.cosh)
def test_tanh(self):
self._make_acc_op_function_test(acc_ops.tanh, torch.tanh)
def test_asin(self):
self._make_acc_op_function_test(acc_ops.asin, torch.asin)
def test_acos(self):
self._make_acc_op_function_test(acc_ops.acos, torch.acos)
def test_atan(self):
self._make_acc_op_function_test(acc_ops.atan, torch.atan)
def test_exp(self):
self._make_acc_op_function_test(acc_ops.exp, torch.exp)
def test_log(self):
self._make_acc_op_function_test(acc_ops.log, torch.log)
def test_sqrt(self):
self._make_acc_op_function_test(acc_ops.sqrt, torch.sqrt)
def test_reciprocal(self):
self._make_acc_op_function_test(acc_ops.reciprocal, torch.reciprocal)
def test_abs(self):
self._make_acc_op_function_test(acc_ops.abs, torch.abs)
def test_neg(self):
self._make_acc_op_function_test(acc_ops.neg, torch.neg)
def test_floor(self):
self._make_acc_op_function_test(acc_ops.floor, torch.floor)
def test_ceil(self):
self._make_acc_op_function_test(acc_ops.ceil, torch.ceil)
def test_softmax(self):
self._make_acc_op_function_test(acc_ops.softmax, torch.nn.functional.softmax)
def test_tensor_squeeze(self):
self._make_acc_op_function_test(acc_ops.squeeze, lambda x: x.squeeze())
def test_torch_squeeze(self):
self._make_acc_op_function_test(acc_ops.squeeze, lambda x: torch.squeeze(x))
def test_operator_mul(self):
self._make_acc_op_function_test(acc_ops.mul, lambda x: x * 7)
def test_torch_mul(self):
self._make_acc_op_function_test(acc_ops.mul, lambda x: torch.mul(x, 7))
def test_div(self):
self._make_acc_op_function_test(acc_ops.div, lambda x: torch.div(x, 2))
self._make_acc_op_function_test(acc_ops.div, lambda x: x / 2)
def test_floor_div(self):
self._make_acc_op_function_test(acc_ops.floor_div, lambda x: torch.div(x, 2, rounding_mode="floor"))
def test_trunc_div(self):
self._make_acc_op_function_test(acc_ops.trunc_div, lambda x: torch.div(x, 2, rounding_mode="trunc"))
self._make_acc_op_function_test(acc_ops.trunc_div, lambda x: torch.floor_divide(x, 2))
def test_view(self):
"""
Test that Tensor.view is traced correctly.
"""
self._make_acc_op_function_test(acc_ops.reshape, lambda x: x.view(1, -1))
def test_narrow(self):
"""
Test that torch.narrow is traced correctly.
"""
return self._make_acc_op_function_test(
acc_ops.slice_tensor,
torch.narrow,
validate_same_kwargs=False,
dim=1,
start=1,
length=2,
)
def test_pow(self):
self._make_acc_op_function_test(acc_ops.pow, torch.pow, exponent=2)
def test_size(self):
class TestModule(nn.Module):
def __init__(self):
super().__init__()
def forward(self, a):
idx = a.size(1)
return a.shape[idx]
m = TestModule()
a = torch.randn(2, 1, 4)
traced = acc_tracer.trace(m, [a])
ph_a = size_1 = size_2 = getitem_1 = getitem_2 = None
for node in traced.graph.nodes:
if node.op == "placeholder":
self.assertTrue(node.target == "a")
ph_a = node
elif node.op == "call_function" and node.target == acc_ops.size:
if size_1:
size_2 = node
self.assertTrue(size_2.kwargs["input"] is ph_a)
else:
size_1 = node
self.assertTrue(size_1.kwargs["input"] is ph_a)
elif node.op == "call_function" and node.target == acc_ops.getitem:
if getitem_1:
getitem_2 = node
self.assertTrue(getitem_2.kwargs["idx"] == getitem_1)
self.assertTrue(getitem_2.kwargs["input"] == size_2)
else:
getitem_1 = node
self.assertTrue(getitem_1.kwargs["idx"] == 1)
self.assertTrue(getitem_1.kwargs["input"] == size_1)
elif node.op == "output":
self.assertEqual(node.args[0], getitem_2)
else:
self.fail(f"Unexpected node: {node.format_node()}")
ref = m(a)
res = traced(a)
self.assertEqual(ref, res)
def test_flatten(self):
"""
Test that torch.flatten is traced correctly.
"""
self._make_acc_op_function_test(
acc_ops.flatten, torch.flatten, start_dim=1, end_dim=1
)
self._make_acc_op_function_test(acc_ops.flatten, lambda x: x.flatten())
def test_topk_multi_output(self):
"""
Test that torch.topk multi outputs work.
"""
class TestModule(nn.Module):
def __init__(self):
super().__init__()
def forward(self, a: torch.Tensor) -> torch.Tensor:
return torch.topk(a, 3)[1]
m = TestModule()
input_a = torch.randn(10)
traced = acc_tracer.trace(m, [input_a])
ph_a = topk = getitem = None
for node in traced.graph.nodes:
if node.op == "placeholder" and str(node.target) == "a":
ph_a = node
elif node.op == "call_function" and node.target == acc_ops.topk:
topk = node
self.assertEqual(node.kwargs["input"], ph_a)
self.assertEqual(node.kwargs["k"], 3)
elif node.op == "call_function" and node.target == acc_ops.getitem:
getitem = node
self.assertEqual(node.kwargs["input"], topk)
self.assertEqual(node.kwargs["idx"], 1)
elif node.op == "output":
self.assertEqual(node.args[0], getitem)
else:
self.fail(f"Unexpected node: {node.format_node()}")
self.assertTrue(torch.equal(m(input_a), traced(input_a)))
def test_addmm_with_alpha_beta(self):
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(
self, input: torch.Tensor, a: torch.Tensor, b: torch.Tensor
) -> torch.Tensor:
return torch.addmm(input, a, b, alpha=1.2, beta=1.1)
m = TestModule()
input, a, b = torch.randn(2, 2), torch.randn(2, 2), torch.randn(2, 2)
traced = acc_tracer.trace(m, [input, a, b])
ph_in = ph_a = ph_b = mm = add = mm_mul = add_mul = None
for node in traced.graph.nodes:
if node.op == "placeholder":
if str(node.target) == "a":
ph_a = node
elif str(node.target) == "b":
ph_b = node
else:
self.assertTrue(str(node.target) == "input")
ph_in = node
elif node.op == "call_function":
if node.target == acc_ops.matmul:
self.assertEqual(node.kwargs["input"], ph_a)
self.assertEqual(node.kwargs["other"], ph_b)
mm = node
elif node.target == acc_ops.add:
self.assertEqual(node.kwargs["input"], mm_mul)
self.assertEqual(node.kwargs["other"], add_mul)
add = node
elif mm_mul:
self.assertEqual(node.kwargs["input"], ph_in)
self.assertEqual(node.kwargs["other"], 1.1)
add_mul = node
else:
self.assertEqual(node.kwargs["input"], mm)
self.assertEqual(node.kwargs["other"], 1.2)
mm_mul = node
elif node.op == "output":
self.assertEqual(add, node.args[0])
else:
self.fail(f"Unexpected node: {node.format_node()}")
torch.testing.assert_allclose(m(input, a, b), traced(input, a, b))
def test_log1p(self):
class TestModule(torch.nn.Module):
def forward(self, input: torch.Tensor) -> torch.Tensor:
return torch.log1p(input)
m = TestModule().eval()
input = torch.tensor([[1.2, 0.3, -0.4]])
traced = acc_tracer.trace(m, [input])
ph_in = add = log = None
for node in traced.graph.nodes:
if node.op == "placeholder":
self.assertTrue(str(node.target) == "input")
ph_in = node
elif node.op == "call_function":
if node.target == acc_ops.add:
self.assertEqual(node.kwargs["input"], ph_in)
self.assertEqual(node.kwargs["other"], 1)
add = node
else:
self.assertEqual(node.target, acc_ops.log)
self.assertEqual(node.kwargs["input"], add)
log = node
elif node.op == "output":
self.assertEqual(log, node.args[0])
else:
self.fail(f"Unexpected node: {node.format_node()}")
torch.testing.assert_allclose(m(input), traced(input))
def test_addmm(self):
class TestModule(torch.nn.Module):
def forward(
self, input: torch.Tensor, a: torch.Tensor, b: torch.Tensor
) -> torch.Tensor:
return torch.addmm(input, a, b)
m = TestModule()
input, a, b = torch.randn(2, 2), torch.randn(2, 2), torch.randn(2, 2)
traced = acc_tracer.trace(m, [input, a, b])
ph_in = ph_a = ph_b = mm = add = None
for node in traced.graph.nodes:
if node.op == "placeholder":
if str(node.target) == "a":
ph_a = node
elif str(node.target) == "b":
ph_b = node
else:
self.assertTrue(str(node.target) == "input")
ph_in = node
elif node.op == "call_function":
if node.target == acc_ops.matmul:
self.assertEqual(node.kwargs["input"], ph_a)
self.assertEqual(node.kwargs["other"], ph_b)
mm = node
else:
self.assertEqual(node.target, acc_ops.add)
self.assertEqual(node.kwargs["input"], mm)
self.assertEqual(node.kwargs["other"], ph_in)
add = node
elif node.op == "output":
self.assertEqual(add, node.args[0])
else:
self.fail(f"Unexpected node: {node.format_node()}")
self.assertTrue(torch.equal(m(input, a, b), traced(input, a, b)))
def test_gelu(self):
return self._make_acc_op_function_test(acc_ops.gelu, torch.nn.functional.gelu)
@parameterized.expand(
[
(1, True),
(1, False),
(None, False),
]
)
def test_argmin(self, dim, keepdim):
class TestModule(torch.nn.Module):
def __init__(self, dim, keepdim):
super().__init__()
self.dim = dim
self.keepdim = keepdim
def forward(self, input: torch.Tensor) -> torch.Tensor:
return torch.argmin(input, dim=self.dim, keepdim=self.keepdim)
m = TestModule(dim, keepdim)
input = torch.randn(2, 2)
traced = acc_tracer.trace(m, [input])
ph_in = flatten = topk = getitem = squeeze = None
for node in traced.graph.nodes:
if node.op == "placeholder":
self.assertTrue(str(node.target) == "input")
ph_in = node
elif node.op == "call_function":
if node.target == acc_ops.flatten:
self.assertEqual(node.kwargs["input"], ph_in)
flatten = node
elif node.target == acc_ops.topk:
self.assertEqual(
node.kwargs["input"], flatten if flatten else ph_in
)
topk = node
elif node.target == acc_ops.getitem:
self.assertEqual(node.kwargs["input"], topk)
getitem = node
elif node.target == acc_ops.squeeze:
self.assertEqual(node.kwargs["input"], getitem)
squeeze = node
elif node.op == "output":
self.assertEqual(squeeze if squeeze else getitem, node.args[0])
else:
self.fail(f"Unexpected node: {node.format_node()}")
if dim is None:
self.assertTrue(flatten is not None)
if not keepdim:
self.assertTrue(squeeze is not None)
self.assertTrue(torch.equal(m(input), traced(input)))
def test_t(self):
"""
Test Tensor.t() is traced correctly.
"""
self._make_acc_op_function_test(acc_ops.permute, lambda x: x.t())
self._make_acc_op_function_test(
acc_ops.permute, lambda x: x.t(), input_shape=(3,)
)
def test_split_size(self):
self._make_acc_op_function_test(
acc_ops.split,
torch.split,
validate_same_kwargs=False,
split_size_or_sections=2,
dim=1,
)
def test_split_sections(self):
class TestModule(torch.nn.Module):
def forward(self, input: torch.Tensor) -> torch.Tensor:
return torch.split(input, [2, 5, 3], 1)
m = TestModule()
input = torch.randn(1, 10)
traced = acc_tracer.trace(m, [input])
ph_in = slice_node_0 = slice_node_1 = slice_node_2 = None
tuple_construct_node = None
for node in traced.graph.nodes:
if node.op == "placeholder":
self.assertTrue(str(node.target) == "input")
ph_in = node
elif node.op == "call_function":
if node.target == acc_ops.slice_tensor:
self.assertEqual(node.kwargs["input"], ph_in)
if slice_node_0:
if slice_node_1:
slice_node_2 = node
else:
slice_node_1 = node
else:
slice_node_0 = node
else:
self.assertEqual(node.target, acc_ops.tuple_construct)
self.assertEqual(
node.kwargs["tensors"],
(slice_node_0, slice_node_1, slice_node_2),
)
tuple_construct_node = node
elif node.op == "output":
self.assertEqual(tuple_construct_node, node.args[0])
else:
self.fail(f"Unexpected node: {node.format_node()}")
ref_output = m(input)
output = traced(input)
for i, j in zip(ref_output, output):
self.assertTrue(torch.equal(i, j))
def test_list_input(self):
"""
Test that list inputs are traced correctly.
"""
class TestModule(nn.Module):
def __init__(self):
super().__init__()
def forward(self, a: List[torch.Tensor]) -> torch.Tensor:
return a[0] + a[1]
m = TestModule()
input = [torch.randn(2, 3), torch.randn(2, 3)]
traced = acc_tracer.trace(m, [input])
ph = getitem_0 = getitem_1 = add = None
for node in traced.graph.nodes:
if node.op == "placeholder":
self.assertEqual(str(node.target), "a")
ph = node
elif node.op == "call_function" and node.target == acc_ops.getitem:
self.assertTrue(node.kwargs["idx"] == 0 or node.kwargs["idx"] == 1)
if node.kwargs["idx"] == 0:
getitem_0 = node
else:
getitem_1 = node
elif node.op == "call_function":
self.assertEqual(node.target, acc_ops.add)
self.assertEqual(node.kwargs["input"], getitem_0)
self.assertEqual(node.kwargs["other"], getitem_1)
add = node
elif node.op == "output":
self.assertEqual(add, node.args[0])
else:
self.fail(f"Unexpected node: {node.format_node()}")
# Check the tensor metadatas are correct given the input is a list.
self.assertTrue(isinstance(ph.meta["tensor_meta"], list))
self.assertEqual(len(ph.meta["tensor_meta"]), 2)
self.assertEqual(getitem_0.meta["tensor_meta"], ph.meta["tensor_meta"][0])
self.assertEqual(getitem_1.meta["tensor_meta"], ph.meta["tensor_meta"][1])
self.assertTrue(torch.equal(m(input), traced(input)))
def test_mobilenet_v3(self):
"""
Test that we can trace mobilenet v3 small and run/compare against the untraced version.
"""
m = torchvision.models.mobilenet_v3_small(pretrained=True)
self._make_model_unit_test(m, enable_allclose=True)
def test_mobilenet_v2(self):
"""
Test that we can trace mobilenet v2 small and run/compare against the untraced version.
"""
m = torchvision.models.mobilenet_v2(pretrained=True)
self._make_model_unit_test(m)
def test_vgg16(self):
"""
Test that we can trace vgg16 and run/compare against the untraced version.
"""
m = torchvision.models.vgg16(pretrained=True)
self._make_model_unit_test(m)
def test_resnet18(self):
"""
Test that we can trace resnet18 and run/compare against the untraced version.
"""
m = torchvision.models.resnet18(pretrained=True)
self._make_model_unit_test(m)
def test_resnext50_32x4d(self):
"""
Test that we can trace resnext and run/compare against the untraced version.
"""
m = torchvision.models.resnext50_32x4d(pretrained=True)
self._make_model_unit_test(m)
def test_cumsum(self):
self._make_acc_op_function_test(acc_ops.cumsum, torch.cumsum, dim=1)
self._make_acc_op_function_test(
acc_ops.cumsum, torch.cumsum, dim=1, dtype=torch.float
)
def test_chunk(self):
self._make_acc_op_function_test(acc_ops.chunk, torch.chunk, chunks=2, dim=0)
def test_all_acc_ops_registered(self):
self.assertEqual(
acc_normalizer._acc_ops,
{
acc_ops.linear,
acc_ops.max_pool2d,
acc_ops.flatten,
acc_ops.adaptive_avg_pool2d,
acc_ops.avg_pool2d,
acc_ops.add,
acc_ops.min_full_reduce,
acc_ops.min_dim_reduce,
acc_ops.minimum,
acc_ops.cat,
acc_ops.softmax,
acc_ops.sign,
acc_ops.permute,
acc_ops.matmul,
acc_ops.quantize_per_tensor,
acc_ops.quantize_per_channel,
acc_ops.quantized_add,
acc_ops.quantized_mul,
acc_ops.dequantize,
acc_ops.sub,
acc_ops.mul,
acc_ops.div,
acc_ops.floor_div,
acc_ops.trunc_div,
acc_ops.pow,
acc_ops.relu,
acc_ops.leaky_relu,
acc_ops.elu,
acc_ops.selu,
acc_ops.softsign,
acc_ops.tuple_construct,
acc_ops.unsqueeze,
acc_ops.sigmoid,
acc_ops.sum,
acc_ops.max_full_reduce,
acc_ops.max_dim_reduce,
acc_ops.maximum,
acc_ops.sinh,
acc_ops.cosh,
acc_ops.tanh,
acc_ops.asin,
acc_ops.acos,
acc_ops.atan,
acc_ops.exp,
acc_ops.log,
acc_ops.sqrt,
acc_ops.reciprocal,
acc_ops.abs,
acc_ops.neg,
acc_ops.floor,
acc_ops.ceil,
acc_ops.size,
acc_ops.split,
acc_ops.conv2d,
acc_ops.batch_norm,
acc_ops.embedding_bag,
acc_ops.embedding_bag_byte_rowwise_offsets,
acc_ops.embedding_bag_4bit_rowwise_offsets,
acc_ops.contiguous,
acc_ops.pad,
acc_ops.sin,
acc_ops.cos,
acc_ops.tan,
acc_ops.topk,
acc_ops.getitem,
acc_ops.squeeze,
acc_ops.tile,
acc_ops.reshape,
acc_ops.quantized_linear,
acc_ops.quantized_conv2d,
acc_ops.quantized_batch_norm2d,
acc_ops.to_dtype,
acc_ops.clamp,
acc_ops.layer_norm,
acc_ops.linalg_norm,
acc_ops.slice_tensor,
acc_ops.hardsigmoid,
acc_ops.mean,
acc_ops.hardtanh,
acc_ops.gelu,
acc_ops.cumsum,
acc_ops.chunk,
acc_ops.rescale_quantize_per_tensor,
acc_ops.rescale_quantize_per_channel,
},
)
| 37.861809 | 108 | 0.534528 |
7949765f7f07f52e54996497b9e2297b439e784d
| 1,082 |
py
|
Python
|
body_temp_from_external/conv_core_temp.py
|
SoftwareDevEngResearch/body_temp_from_external
|
2f560ccc51e9f3c5bde25630b811586972fe884c
|
[
"MIT"
] | 1 |
2020-02-15T05:30:07.000Z
|
2020-02-15T05:30:07.000Z
|
body_temp_from_external/conv_core_temp.py
|
SoftwareDevEngResearch/body_temp_from_external
|
2f560ccc51e9f3c5bde25630b811586972fe884c
|
[
"MIT"
] | null | null | null |
body_temp_from_external/conv_core_temp.py
|
SoftwareDevEngResearch/body_temp_from_external
|
2f560ccc51e9f3c5bde25630b811586972fe884c
|
[
"MIT"
] | 3 |
2018-04-18T23:39:00.000Z
|
2020-03-15T14:46:20.000Z
|
#!/usr/bin/env python
# Author: Makenzie Brian
# Date: April 23, 2018
# Class: ME 599
# File: conv_core_temp.py
# Description: converts csv to estimated core temperature
def conv_file(file):
"""converts csv to array for use in later functions
args: filename as .csv"""
with open(file, 'r') as f1:
file_contents = []
for line in f1:
file_contents.append(line.strip().split(",")) # strip \n and split into values to put in array
return file_contents
def conv_core_temperature(temp_array):
"""converts array of temperature values to an average value
args: array of temperatures as floats or ints"""
average_temp = float(sum(temp_array)) / float(max(temp_array))
est_core_temp = average_temp * 1.052 # bad est b/c need external tmp etc.
# core_temp = skin_temp + (heat_flux * (d/lambda) but can't get d and lamba sooooo
# upon further inspection may need to have it learn this based on known datasets....
if est_core_temp < 0:
return "INVALID VALUE"
else:
return est_core_temp
| 31.823529 | 107 | 0.676525 |
7949772a970b146a5acf5d7c83a9b3f8ea54785c
| 49,865 |
py
|
Python
|
parser_email.py
|
splunk-soar-connectors/parser
|
6c7bf67590dd09a0b6a6182eb56312531f1fef09
|
[
"Apache-2.0"
] | null | null | null |
parser_email.py
|
splunk-soar-connectors/parser
|
6c7bf67590dd09a0b6a6182eb56312531f1fef09
|
[
"Apache-2.0"
] | 5 |
2022-01-07T23:27:42.000Z
|
2022-02-03T03:27:17.000Z
|
parser_email.py
|
splunk-soar-connectors/parser
|
6c7bf67590dd09a0b6a6182eb56312531f1fef09
|
[
"Apache-2.0"
] | 1 |
2022-03-15T00:22:02.000Z
|
2022-03-15T00:22:02.000Z
|
# File: parser_email.py
#
# Copyright (c) 2017-2022 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
import email
import hashlib
import mimetypes
import operator
import os
import re
import shutil
import socket
import sys
import tempfile
from collections import OrderedDict
from email.header import decode_header, make_header
from html import unescape
from urllib.parse import urlparse
import magic
import phantom.app as phantom
import phantom.rules as ph_rules
import phantom.utils as ph_utils
import simplejson as json
from bs4 import BeautifulSoup, UnicodeDammit
from django.core.validators import URLValidator
from phantom.vault import Vault
from requests.structures import CaseInsensitiveDict
# Any globals added here, should be initialized in the init() function
_base_connector = None
_config = dict()
_email_id_contains = list()
_container = dict()
_artifacts = list()
_attachments = list()
_python_version = None
_tmp_dirs = list()
_container_common = {
"run_automation": False # Don't run any playbooks, when this artifact is added
}
_artifact_common = {
"run_automation": False # Don't run any playbooks, when this artifact is added
}
FILE_EXTENSIONS = {
'.vmsn': ['os memory dump', 'vm snapshot file'],
'.vmss': ['os memory dump', 'vm suspend file'],
'.js': ['javascript'],
'.doc': ['doc'],
'.docx': ['doc'],
'.xls': ['xls'],
'.xlsx': ['xls'],
}
MAGIC_FORMATS = [
(re.compile('^PE.* Windows'), ['pe file', 'hash']),
(re.compile('^MS-DOS executable'), ['pe file', 'hash']),
(re.compile('^PDF '), ['pdf']),
(re.compile('^MDMP crash'), ['process dump']),
(re.compile('^Macromedia Flash'), ['flash']),
]
PARSER_DEFAULT_ARTIFACT_COUNT = 100
PARSER_DEFAULT_CONTAINER_COUNT = 100
HASH_FIXED_PHANTOM_VERSION = "2.0.201"
OFFICE365_APP_ID = "a73f6d32-c9d5-4fec-b024-43876700daa6"
EXCHANGE_ONPREM_APP_ID = "badc5252-4a82-4a6d-bc53-d1e503857124"
IMAP_APP_ID = "9f2e9f72-b0e5-45d6-92a7-09ef820476c1"
PROC_EMAIL_JSON_FILES = "files"
PROC_EMAIL_JSON_BODIES = "bodies"
PROC_EMAIL_JSON_DATE = "date"
PROC_EMAIL_JSON_FROM = "from"
PROC_EMAIL_JSON_SUBJECT = "subject"
PROC_EMAIL_JSON_TO = "to"
PROC_EMAIL_JSON_START_TIME = "start_time"
PROC_EMAIL_JSON_EXTRACT_ATTACHMENTS = "extract_attachments"
PROC_EMAIL_JSON_EXTRACT_URLS = "extract_urls"
PROC_EMAIL_JSON_EXTRACT_IPS = "extract_ips"
PROC_EMAIL_JSON_EXTRACT_DOMAINS = "extract_domains"
PROC_EMAIL_JSON_EXTRACT_HASHES = "extract_hashes"
PROC_EMAIL_JSON_RUN_AUTOMATION = "run_automation"
PROC_EMAIL_JSON_IPS = "ips"
PROC_EMAIL_JSON_HASHES = "hashes"
PROC_EMAIL_JSON_URLS = "urls"
PROC_EMAIL_JSON_DOMAINS = "domains"
PROC_EMAIL_JSON_MSG_ID = "message_id"
PROC_EMAIL_JSON_EMAIL_HEADERS = "email_headers"
PROC_EMAIL_CONTENT_TYPE_MESSAGE = "message/rfc822"
URI_REGEX = r"h(?:tt|xx)p[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+#]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
EMAIL_REGEX = r"\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,}\b"
EMAIL_REGEX2 = r'".*"@[A-Z0-9.-]+\.[A-Z]{2,}\b'
HASH_REGEX = r"\b[0-9a-fA-F]{32}\b|\b[0-9a-fA-F]{40}\b|\b[0-9a-fA-F]{64}\b"
IP_REGEX = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}'
IPV6_REGEX = r'\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|'
IPV6_REGEX += r'(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)'
IPV6_REGEX += r'(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))'
IPV6_REGEX += r'|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})'
IPV6_REGEX += r'|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|'
IPV6_REGEX += r'(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})'
IPV6_REGEX += r'|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|'
IPV6_REGEX += r'(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})'
IPV6_REGEX += r'|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)'
IPV6_REGEX += r'(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|'
IPV6_REGEX += r'(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})'
IPV6_REGEX += r'|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)'
IPV6_REGEX += r'(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|'
IPV6_REGEX += r'(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})'
IPV6_REGEX += r'|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)'
IPV6_REGEX += r'(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|'
IPV6_REGEX += r'(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)'
IPV6_REGEX += r'(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?\s*'
DEFAULT_SINGLE_PART_EML_FILE_NAME = 'part_1.text'
def _get_string(input_str, charset):
global _python_version
try:
if input_str:
if _python_version == 2:
input_str = UnicodeDammit(input_str).unicode_markup.encode(charset)
else:
input_str = UnicodeDammit(input_str).unicode_markup.encode(charset).decode(charset)
except Exception:
try:
input_str = str(make_header(decode_header(input_str)))
except Exception:
input_str = _decode_uni_string(input_str, input_str)
_base_connector.debug_print(
"Error occurred while converting to string with specific encoding {}".format(input_str))
return input_str
def _get_error_message_from_exception(e):
""" This method is used to get appropriate error message from the exception.
:param e: Exception object
:return: error message
"""
try:
if e.args:
if len(e.args) > 1:
error_code = e.args[0]
error_msg = e.args[1]
elif len(e.args) == 1:
error_code = "Error code unavailable"
error_msg = e.args[0]
else:
error_code = "Error code unavailable"
error_msg = "Error message unavailable. Please check the asset configuration and|or action parameters."
except Exception:
error_code = "Error code unavailable"
error_msg = "Error message unavailable. Please check the asset configuration and|or action parameters."
return error_code, error_msg
def _is_ip(input_ip):
if ph_utils.is_ip(input_ip):
return True
if is_ipv6(input_ip):
return True
return False
def _refang_url(url):
parsed = urlparse(url)
scheme = parsed.scheme
# Replace hxxp/hxxps with http/https
if scheme == "hxxp":
parsed = parsed._replace(scheme='http')
elif scheme == "hxxps":
parsed = parsed._replace(scheme='https')
refang_url = parsed.geturl()
return refang_url
def _clean_url(url):
url = url.strip('>),.]\r\n')
# Check before splicing, find returns -1 if not found
# _and_ you will end up splicing on -1 (incorrectly)
if '<' in url:
url = url[:url.find('<')]
if '>' in url:
url = url[:url.find('>')]
url = _refang_url(url)
return url
def is_ipv6(input_ip):
try:
socket.inet_pton(socket.AF_INET6, input_ip)
except Exception: # not a valid v6 address
return False
return True
uri_regexc = re.compile(URI_REGEX)
email_regexc = re.compile(EMAIL_REGEX, re.IGNORECASE)
email_regexc2 = re.compile(EMAIL_REGEX2, re.IGNORECASE)
hash_regexc = re.compile(HASH_REGEX)
ip_regexc = re.compile(IP_REGEX)
ipv6_regexc = re.compile(IPV6_REGEX)
def _get_file_contains(file_path):
contains = []
ext = os.path.splitext(file_path)[1]
contains.extend(FILE_EXTENSIONS.get(ext, []))
magic_str = magic.from_file(file_path)
for regex, cur_contains in MAGIC_FORMATS:
if regex.match(magic_str):
contains.extend(cur_contains)
return contains
def _debug_print(*args):
if _base_connector and hasattr(_base_connector, 'debug_print'):
_base_connector.debug_print(*args)
return
def _extract_urls_domains(file_data, urls, domains):
if (not _config[PROC_EMAIL_JSON_EXTRACT_DOMAINS]) and (not _config[PROC_EMAIL_JSON_EXTRACT_URLS]):
return
# try to load the email
try:
soup = BeautifulSoup(file_data, "html.parser")
except Exception as e:
error_code, error_msg = _get_error_message_from_exception(e)
err = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
_debug_print("Handled exception", err)
return
uris = []
# get all tags that have hrefs and srcs
links = soup.find_all(href=True)
srcs = soup.find_all(src=True)
if links or srcs:
uri_text = []
if links:
for x in links:
# work on the text part of the link, they might be http links different from the href
# and were either missed by the uri_regexc while parsing text or there was no text counterpart
# in the email
uri_text.append(_clean_url(x.get_text()))
# it's html, so get all the urls
if not x['href'].startswith('mailto:'):
uris.append(x['href'])
if srcs:
for x in srcs:
uri_text.append(_clean_url(x.get_text()))
# it's html, so get all the urls
uris.append(x['src'])
if uri_text:
uri_text = [x for x in uri_text if x.startswith('http')]
if uri_text:
uris.extend(uri_text)
else:
# To unescape html escaped body
file_data = unescape(file_data)
# Parse it as a text file
uris = re.findall(uri_regexc, file_data)
if uris:
uris = [_clean_url(x) for x in uris]
validate_url = URLValidator(schemes=['http', 'https'])
validated_urls = list()
for url in uris:
try:
validate_url(url)
validated_urls.append(url)
except Exception:
pass
if _config[PROC_EMAIL_JSON_EXTRACT_URLS]:
# add the uris to the urls
urls |= set(validated_urls)
if _config[PROC_EMAIL_JSON_EXTRACT_DOMAINS]:
for uri in validated_urls:
domain = phantom.get_host_from_url(uri)
if domain and not _is_ip(domain):
domains.add(domain)
# work on any mailto urls if present
if links:
mailtos = [x['href'] for x in links if x['href'].startswith('mailto:')]
for curr_email in mailtos:
domain = curr_email[curr_email.find('@') + 1:]
if domain and not _is_ip(domain):
domains.add(domain)
return
def _get_ips(file_data, ips):
# First extract what looks like an IP from the file, this is a faster operation
ips_in_mail = re.findall(ip_regexc, file_data)
ip6_in_mail = re.findall(ipv6_regexc, file_data)
if ip6_in_mail:
for ip6_tuple in ip6_in_mail:
ip6s = [x for x in ip6_tuple if x]
ips_in_mail.extend(ip6s)
# Now validate them
if ips_in_mail:
ips_in_mail = set(ips_in_mail)
# match it with a slower and difficult regex.
# TODO: Fix this with a one step approach.
ips_in_mail = [x for x in ips_in_mail if _is_ip(x)]
if ips_in_mail:
ips |= set(ips_in_mail)
def _handle_body(body, parsed_mail, body_index, email_id):
local_file_path = body['file_path']
charset = body.get('charset')
ips = parsed_mail[PROC_EMAIL_JSON_IPS]
hashes = parsed_mail[PROC_EMAIL_JSON_HASHES]
urls = parsed_mail[PROC_EMAIL_JSON_URLS]
domains = parsed_mail[PROC_EMAIL_JSON_DOMAINS]
file_data = None
try:
with open(local_file_path, 'r') as f:
file_data = f.read()
except Exception:
with open(local_file_path, 'rb') as f:
file_data = f.read()
_debug_print("Reading file data using binary mode")
if (file_data is None) or (len(file_data) == 0):
return phantom.APP_ERROR
file_data = UnicodeDammit(file_data).unicode_markup.encode('utf-8').decode('utf-8')
_parse_email_headers_as_inline(file_data, parsed_mail, charset, email_id)
if _config[PROC_EMAIL_JSON_EXTRACT_DOMAINS]:
emails = []
emails.extend(re.findall(email_regexc, file_data))
emails.extend(re.findall(email_regexc2, file_data))
for curr_email in emails:
domain = curr_email[curr_email.rfind('@') + 1:]
if domain and not ph_utils.is_ip(domain):
domains.add(domain)
_extract_urls_domains(file_data, urls, domains)
if _config[PROC_EMAIL_JSON_EXTRACT_IPS]:
_get_ips(file_data, ips)
if _config[PROC_EMAIL_JSON_EXTRACT_HASHES]:
hashs_in_mail = re.findall(hash_regexc, file_data)
if hashs_in_mail:
hashes |= set(hashs_in_mail)
return phantom.APP_SUCCESS
def _add_artifacts(cef_key, input_set, artifact_name, start_index, artifacts):
added_artifacts = 0
for entry in input_set:
# ignore empty entries
if not entry:
continue
artifact = {}
artifact.update(_artifact_common)
artifact['source_data_identifier'] = start_index + added_artifacts
artifact['cef'] = {cef_key: entry}
artifact['name'] = artifact_name
_debug_print('Artifact:', artifact)
artifacts.append(artifact)
added_artifacts += 1
return added_artifacts
def _parse_email_headers_as_inline(file_data, parsed_mail, charset, email_id):
# remove the 'Forwarded Message' from the email text and parse it
p = re.compile(r'(?<=\r\n).*Forwarded Message.*\r\n', re.IGNORECASE)
email_text = p.sub('', file_data.strip())
mail = email.message_from_string(email_text)
# Get the array
# email_headers = parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS]
_parse_email_headers(parsed_mail, mail, charset, add_email_id=email_id)
# email_headers.append(mail.items())
return phantom.APP_SUCCESS
def _add_email_header_artifacts(email_header_artifacts, start_index, artifacts):
added_artifacts = 0
for artifact in email_header_artifacts:
artifact['source_data_identifier'] = start_index + added_artifacts
artifacts.append(artifact)
added_artifacts += 1
return added_artifacts
def _create_artifacts(parsed_mail):
# get all the artifact data in their own list objects
ips = parsed_mail[PROC_EMAIL_JSON_IPS]
hashes = parsed_mail[PROC_EMAIL_JSON_HASHES]
urls = parsed_mail[PROC_EMAIL_JSON_URLS]
domains = parsed_mail[PROC_EMAIL_JSON_DOMAINS]
email_headers = parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS]
# set the default artifact dict
artifact_id = 0
# add artifacts
added_artifacts = _add_artifacts('sourceAddress', ips, 'IP Artifact', artifact_id, _artifacts)
artifact_id += added_artifacts
added_artifacts = _add_artifacts('fileHash', hashes, 'Hash Artifact', artifact_id, _artifacts)
artifact_id += added_artifacts
added_artifacts = _add_artifacts('requestURL', urls, 'URL Artifact', artifact_id, _artifacts)
artifact_id += added_artifacts
# domains = [x.decode('idna') for x in domains]
added_artifacts = _add_artifacts('destinationDnsDomain', domains, 'Domain Artifact', artifact_id, _artifacts)
artifact_id += added_artifacts
added_artifacts = _add_email_header_artifacts(email_headers, artifact_id, _artifacts)
artifact_id += added_artifacts
return phantom.APP_SUCCESS
def _decode_uni_string(input_str, def_name):
# try to find all the decoded strings, we could have multiple decoded strings
# or a single decoded string between two normal strings separated by \r\n
# YEAH...it could get that messy
encoded_strings = re.findall(r'=\?.*?\?=', input_str, re.I)
# return input_str as is, no need to do any conversion
if not encoded_strings:
return input_str
# get the decoded strings
try:
decoded_strings = [decode_header(x)[0] for x in encoded_strings]
decoded_strings = [{'value': x[0], 'encoding': x[1]} for x in decoded_strings]
except Exception as e:
error_code, error_msg = _get_error_message_from_exception(e)
_debug_print("Decoding: {0}. Error code: {1}. Error message: {2}".format(encoded_strings, error_code, error_msg))
return def_name
# convert to dict for safe access, if it's an empty list, the dict will be empty
decoded_strings = dict(enumerate(decoded_strings))
new_str = ''
new_str_create_count = 0
for i, encoded_string in enumerate(encoded_strings):
decoded_string = decoded_strings.get(i)
if not decoded_string:
# nothing to replace with
continue
value = decoded_string.get('value')
encoding = decoded_string.get('encoding')
if not encoding or not value:
# nothing to replace with
continue
try:
if encoding != 'utf-8':
value = str(value, encoding)
except Exception:
pass
try:
# commenting the existing approach due to a new approach being deployed below
# substitute the encoded string with the decoded one
# input_str = input_str.replace(encoded_string, value)
# make new string insted of replacing in the input string because issue find in PAPP-9531
if value:
new_str += UnicodeDammit(value).unicode_markup
new_str_create_count += 1
except Exception:
pass
# replace input string with new string because issue find in PAPP-9531
if new_str and new_str_create_count == len(encoded_strings):
_debug_print("Creating a new string entirely from the encoded_strings and assiging into input_str")
input_str = new_str
return input_str
def _get_container_name(parsed_mail, email_id):
# Create the default name
def_cont_name = "Email ID: {0}".format(email_id)
# get the subject from the parsed mail
subject = parsed_mail.get(PROC_EMAIL_JSON_SUBJECT)
# if no subject then return the default
if not subject:
return def_cont_name
try:
return str(make_header(decode_header(subject)))
except Exception:
return _decode_uni_string(subject, def_cont_name)
def _handle_if_body(content_disp, content_id, content_type, part, bodies, file_path, parsed_mail, file_name):
process_as_body = False
# if content disposition is None then assume that it is
if content_disp is None:
process_as_body = True
# if content disposition is inline
elif content_disp.lower().strip() == 'inline':
if ('text/html' in content_type) or ('text/plain' in content_type):
process_as_body = True
if not process_as_body:
return phantom.APP_SUCCESS, True
part_payload = part.get_payload(decode=True)
if not part_payload:
return phantom.APP_SUCCESS, False
charset = part.get_content_charset()
with open(file_path, 'wb') as f:
f.write(part_payload)
bodies.append({'file_path': file_path, 'charset': charset, 'content-type': content_type})
_add_body_in_email_headers(parsed_mail, file_path, charset, content_type, file_name)
return phantom.APP_SUCCESS, False
def _handle_part(part, part_index, tmp_dir, extract_attach, parsed_mail):
bodies = parsed_mail[PROC_EMAIL_JSON_BODIES]
# get the file_name
file_name = part.get_filename()
content_disp = part.get('Content-Disposition')
content_type = part.get('Content-Type')
content_id = part.get('Content-ID')
if file_name is None:
# init name and extension to default values
name = "part_{0}".format(part_index)
extension = ".{0}".format(part_index)
# Try to create an extension from the content type if possible
if content_type is not None:
extension = mimetypes.guess_extension(re.sub(';.*', '', content_type))
# Try to create a name from the content id if possible
if content_id is not None:
name = content_id
file_name = "{0}{1}".format(name, extension)
else:
try:
file_name = str(make_header(decode_header(file_name)))
except Exception:
file_name = _decode_uni_string(file_name, file_name)
# Remove any chars that we don't want in the name
try:
file_path = "{0}/{1}_{2}".format(tmp_dir, part_index,
file_name.translate(None, ''.join(['<', '>', ' '])))
except TypeError: # py3
file_path = "{0}/{1}_{2}".format(tmp_dir, part_index, file_name.translate(
file_name.maketrans('', '', ''.join(['<', '>', ' ']))))
_debug_print("file_path: {0}".format(file_path))
# is the part representing the body of the email
status, process_further = _handle_if_body(
content_disp, content_id, content_type, part, bodies, file_path, parsed_mail, file_name)
if not process_further:
return phantom.APP_SUCCESS
# is this another email as an attachment
if (content_type is not None) and (content_type.find(PROC_EMAIL_CONTENT_TYPE_MESSAGE) != -1):
return phantom.APP_SUCCESS
# # This is an attachment, first check if it is another email or not
if extract_attach:
_handle_attachment(part, file_name, file_path, parsed_mail)
return phantom.APP_SUCCESS
def _handle_attachment(part, file_name, file_path, parsed_mail):
files = parsed_mail[PROC_EMAIL_JSON_FILES]
if not _config[PROC_EMAIL_JSON_EXTRACT_ATTACHMENTS]:
return phantom.APP_SUCCESS
part_base64_encoded = part.get_payload()
headers = _get_email_headers_from_part(part)
attach_meta_info = dict()
if headers:
attach_meta_info = {'headers': dict(headers)}
for curr_attach in _attachments:
if curr_attach.get('should_ignore', False):
continue
try:
attach_content = curr_attach['content']
except Exception:
continue
if attach_content.strip().replace('\r\n', '') == part_base64_encoded.strip().replace('\r\n', ''):
attach_meta_info.update(dict(curr_attach))
del attach_meta_info['content']
curr_attach['should_ignore'] = True
part_payload = part.get_payload(decode=True)
if not part_payload:
return phantom.APP_SUCCESS
try:
with open(file_path, 'wb') as f:
f.write(part_payload)
except IOError as e:
error_code, error_msg = _get_error_message_from_exception(e)
try:
if "File name too long" in error_msg:
new_file_name = "ph_long_file_name_temp"
file_path = "{}{}".format(remove_child_info(file_path).rstrip(
file_name.replace('<', '').replace('>', '').replace(' ', '')), new_file_name)
_debug_print("Original filename: {}".format(file_name))
_base_connector.debug_print(
"Modified filename: {}".format(new_file_name))
with open(file_path, 'wb') as long_file:
long_file.write(part_payload)
else:
_debug_print(
"Error occurred while adding file to Vault. Error Details: {}".format(error_msg))
return
except Exception as e:
error_code, error_msg = _get_error_message_from_exception(e)
_debug_print(
"Error occurred while adding file to Vault. Error Details: {}".format(error_msg))
return
except Exception as e:
error_code, error_msg = _get_error_message_from_exception(e)
_debug_print(
"Error occurred while adding file to Vault. Error Details: {}".format(error_msg))
return
file_hash = hashlib.sha1(part_payload).hexdigest()
files.append({'file_name': file_name, 'file_path': file_path,
'file_hash': file_hash, 'meta_info': attach_meta_info})
def remove_child_info(file_path):
if file_path.endswith('_True'):
return file_path.rstrip('_True')
else:
return file_path.rstrip('_False')
def _get_email_headers_from_part(part, charset=None):
email_headers = list(part.items())
# TODO: the next 2 ifs can be condensed to use 'or'
if charset is None:
charset = part.get_content_charset()
if charset is None:
charset = 'utf8'
if not email_headers:
return {}
# Convert the header tuple into a dictionary
headers = CaseInsensitiveDict()
try:
[headers.update({x[0]: _get_string(x[1], charset)}) for x in email_headers]
except Exception as e:
error_code, error_msg = _get_error_message_from_exception(e)
err = "Error occurred while converting the header tuple into a dictionary"
_debug_print("{}. {}. {}".format(err, error_code, error_msg))
# Handle received seperately
try:
received_headers = [_get_string(x[1], charset) for x in email_headers if x[0].lower() == 'received']
except Exception as e:
error_code, error_msg = _get_error_message_from_exception(e)
err = "Error occurred while handling the received header tuple separately"
_debug_print("{}. {}. {}".format(err, error_code, error_msg))
if received_headers:
headers['Received'] = received_headers
# handle the subject string, if required add a new key
subject = headers.get('Subject')
if subject:
try:
headers['decodedSubject'] = str(make_header(decode_header(subject)))
except Exception:
headers['decodedSubject'] = _decode_uni_string(subject, subject)
return dict(headers)
def _parse_email_headers(parsed_mail, part, charset=None, add_email_id=None):
global _email_id_contains
email_header_artifacts = parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS]
headers = _get_email_headers_from_part(part, charset)
if not headers:
return 0
# Parse email keys first
cef_artifact = {}
cef_types = {}
if headers.get('From'):
emails = headers['From']
if emails:
cef_artifact.update({'fromEmail': emails})
if headers.get('To'):
emails = headers['To']
if emails:
cef_artifact.update({'toEmail': emails})
message_id = headers.get('Message-ID')
# if the header did not contain any email addresses and message ID then ignore this artifact
if not cef_artifact and not message_id:
return 0
cef_types.update({'fromEmail': ['email'], 'toEmail': ['email']})
if headers:
cef_artifact['emailHeaders'] = headers
# Adding the email id as a cef artifact crashes the UI when trying to show the action dialog box
# so not adding this right now. All the other code to process the emailId is there, but the refraining
# from adding the emailId
# add_email_id = False
if add_email_id:
cef_artifact['emailId'] = add_email_id
if _email_id_contains:
cef_types.update({'emailId': _email_id_contains})
artifact = {}
artifact.update(_artifact_common)
artifact['name'] = 'Email Artifact'
artifact['cef'] = cef_artifact
artifact['cef_types'] = cef_types
email_header_artifacts.append(artifact)
return len(email_header_artifacts)
def _add_body_in_email_headers(parsed_mail, file_path, charset, content_type, file_name):
if not content_type:
_debug_print('Unable to update email headers with the invalid content_type {}'.format(content_type))
return
# Add email_bodies to email_headers
email_headers = parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS]
try:
with open(file_path, 'r') as f:
body_content = f.read()
except Exception:
with open(file_path, 'rb') as f:
body_content = f.read()
_debug_print("Reading file data using binary mode")
# Add body to the last added Email artifact
body_content = UnicodeDammit(body_content).unicode_markup.encode('utf-8').decode('utf-8').replace('\u0000', '')
_debug_print('Processing email part with content_type: {}'.format(content_type))
IMAGE_CONTENT_TYPES = ['image/jpeg', 'image/png']
if any(t for t in IMAGE_CONTENT_TYPES if t in content_type):
_debug_print('Saving image {} to files'.format(file_name))
try:
file_hash = hashlib.sha1(body_content.encode()).hexdigest()
files = parsed_mail[PROC_EMAIL_JSON_FILES]
files.append({'file_name': file_name, 'file_path': file_path, 'file_hash': file_hash})
except Exception as e:
_debug_print("Error occurred while adding file {} to files. Error Details: {}".format(file_name, e))
return
if 'text/plain' in content_type:
try:
email_headers[-1]['cef']['bodyText'] = _get_string(
body_content, charset)
except Exception as e:
try:
email_headers[-1]['cef']['bodyText'] = str(make_header(decode_header(body_content)))
except Exception:
email_headers[-1]['cef']['bodyText'] = _decode_uni_string(body_content, body_content)
error_code, error_msg = _get_error_message_from_exception(e)
err = "Error occurred while parsing text/plain body content for creating artifacts"
_debug_print("{}. {}. {}".format(err, error_code, error_msg))
elif 'text/html' in content_type:
try:
email_headers[-1]['cef']['bodyHtml'] = _get_string(
body_content, charset)
except Exception as e:
try:
email_headers[-1]['cef']['bodyHtml'] = str(make_header(decode_header(body_content)))
except Exception:
email_headers[-1]['cef']['bodyHtml'] = _decode_uni_string(body_content, body_content)
error_code, error_msg = _get_error_message_from_exception(e)
err = "Error occurred while parsing text/html body content for creating artifacts"
_debug_print("{}. {}. {}".format(err, error_code, error_msg))
else:
if not email_headers[-1]['cef'].get('bodyOther'):
email_headers[-1]['cef']['bodyOther'] = {}
try:
email_headers[-1]['cef']['bodyOther'][content_type] = _get_string(
body_content, charset)
except Exception as e:
try:
email_headers[-1]['cef']['bodyOther'][content_type] = str(make_header(decode_header(body_content)))
except Exception:
email_headers[-1]['cef']['bodyOther'][content_type] = _decode_uni_string(body_content, body_content)
error_code, error_msg = _get_error_message_from_exception(e)
err = "Error occurred while parsing bodyOther content for creating artifacts"
_debug_print("{}. {}. {}".format(err, error_code, error_msg))
def _handle_mail_object(mail, email_id, rfc822_email, tmp_dir, start_time_epoch):
parsed_mail = OrderedDict()
# Create a tmp directory for this email, will extract all files here
tmp_dir = tmp_dir
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
extract_attach = _config[PROC_EMAIL_JSON_EXTRACT_ATTACHMENTS]
charset = mail.get_content_charset()
_debug_print('mail file_name: {}'.format(mail.get_filename()))
_debug_print('mail charset: {}'.format(charset))
_debug_print('mail subject: {}'.format(mail.get('Subject', '')))
if charset is None:
charset = 'utf8'
# Extract fields and place it in a dictionary
parsed_mail[PROC_EMAIL_JSON_SUBJECT] = mail.get('Subject', '')
parsed_mail[PROC_EMAIL_JSON_FROM] = mail.get('From', '')
parsed_mail[PROC_EMAIL_JSON_TO] = mail.get('To', '')
parsed_mail[PROC_EMAIL_JSON_DATE] = mail.get('Date', '')
parsed_mail[PROC_EMAIL_JSON_MSG_ID] = mail.get('Message-ID', '')
parsed_mail[PROC_EMAIL_JSON_FILES] = files = []
parsed_mail[PROC_EMAIL_JSON_BODIES] = bodies = []
parsed_mail[PROC_EMAIL_JSON_START_TIME] = start_time_epoch
parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS] = []
# parse the parts of the email
if mail.is_multipart():
for i, part in enumerate(mail.walk()):
add_email_id = None
if i == 0:
add_email_id = email_id
_parse_email_headers(parsed_mail, part, add_email_id=add_email_id)
# parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS].append(part.items())
if part.is_multipart():
continue
try:
ret_val = _handle_part(part, i, tmp_dir, extract_attach, parsed_mail)
except Exception as e:
error_code, error_msg = _get_error_message_from_exception(e)
err = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
_debug_print("ErrorExp in _handle_part # {0}".format(i), err)
continue
if phantom.is_fail(ret_val):
continue
else:
_parse_email_headers(parsed_mail, mail, add_email_id=email_id)
# parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS].append(mail.items())
file_path = "{0}/{1}".format(tmp_dir, DEFAULT_SINGLE_PART_EML_FILE_NAME)
file_name = mail.get_filename() or mail.get('Subject', DEFAULT_SINGLE_PART_EML_FILE_NAME)
with open(file_path, 'wb') as f:
f.write(mail.get_payload(decode=True))
bodies.append({'file_path': file_path, 'charset': mail.get_content_charset(), 'content-type': 'text/plain'})
_add_body_in_email_headers(parsed_mail, file_path, mail.get_content_charset(), 'text/plain', file_name)
# get the container name
container_name = _get_container_name(parsed_mail, email_id)
if container_name is None:
return phantom.APP_ERROR
# Add the container
# first save the container, to do that copy things from parsed_mail to a new object
container = {}
container_data = dict(parsed_mail)
# delete the header info, we don't make it a part of the container json
del(container_data[PROC_EMAIL_JSON_EMAIL_HEADERS])
container.update(_container_common)
_container['source_data_identifier'] = email_id
_container['name'] = container_name
_container['data'] = {'raw_email': rfc822_email}
# Create the sets before handling the bodies If both the bodies add the same ip
# only one artifact should be created
parsed_mail[PROC_EMAIL_JSON_IPS] = set()
parsed_mail[PROC_EMAIL_JSON_HASHES] = set()
parsed_mail[PROC_EMAIL_JSON_URLS] = set()
parsed_mail[PROC_EMAIL_JSON_DOMAINS] = set()
# For bodies
for i, body in enumerate(bodies):
if not body:
continue
try:
_handle_body(body, parsed_mail, i, email_id)
except Exception as e:
error_code, error_msg = _get_error_message_from_exception(e)
err = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
_debug_print("ErrorExp in _handle_body # {0}: {1}".format(i, err))
continue
# Files
_attachments.extend(files)
_create_artifacts(parsed_mail)
return phantom.APP_SUCCESS
def _init():
global _base_connector
global _config
global _container
global _artifacts
global _attachments
global _email_id_contains
global _python_version
_base_connector = None
_email_id_contains = list()
_config = None
_container = dict()
_artifacts = list()
_attachments = list()
try:
_python_version = int(sys.version_info[0])
except Exception:
raise Exception("Error occurred while getting the Phantom server's Python major version.")
def _set_email_id_contains(email_id):
global _base_connector
global _email_id_contains
if not _base_connector:
return
try:
email_id = _get_string(email_id, 'utf-8')
except Exception:
email_id = str(email_id)
_base_connector.debug_print(email_id)
if _base_connector.get_app_id() == EXCHANGE_ONPREM_APP_ID and email_id.endswith('='):
_email_id_contains = ["exchange email id"]
elif _base_connector.get_app_id() == OFFICE365_APP_ID and email_id.endswith('='):
_email_id_contains = ["office 365 email id"]
elif _base_connector.get_app_id() == IMAP_APP_ID and email_id.isdigit():
_email_id_contains = ["imap email id"]
elif ph_utils.is_sha1(email_id):
_email_id_contains = ["vault id"]
_base_connector.debug_print(_email_id_contains)
return
def _del_tmp_dirs():
"""Remove any tmp_dirs that were created."""
global _tmp_dirs
for tmp_dir in _tmp_dirs:
shutil.rmtree(tmp_dir, ignore_errors=True)
def _int_process_email(rfc822_email, email_id, start_time_epoch):
global _base_connector
global _config
global _tmp_dirs
mail = email.message_from_string(rfc822_email)
ret_val = phantom.APP_SUCCESS
phantom_home_dir = _base_connector.get_phantom_home()
if os.path.isdir(phantom_home_dir) and phantom_home_dir == "/home/phanru/phantomcyber":
tmp_dir = tempfile.mkdtemp(prefix='ph_email_phparser', dir=Vault.get_vault_tmp_dir())
else:
tmp_dir = tempfile.mkdtemp(prefix='ph_email_phparser')
_tmp_dirs.append(tmp_dir)
try:
ret_val = _handle_mail_object(mail, email_id, rfc822_email, tmp_dir, start_time_epoch)
except Exception as e:
error_code, error_msg = _get_error_message_from_exception(e)
err = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
message = "ErrorExp in _handle_mail_object: {0}".format(err)
_debug_print(message)
return phantom.APP_ERROR, message, []
results = [{'container': _container, 'artifacts': _artifacts, 'files': _attachments, 'temp_directory': tmp_dir}]
return ret_val, "Email Parsed", results
def process_email(base_connector, rfc822_email, email_id, config, label, container_id, epoch):
try:
_init()
except Exception as e:
return phantom.APP_ERROR, {'message': str(e), 'content_id': None}
global _base_connector
global _config
_base_connector = base_connector
_config = config
try:
_set_email_id_contains(email_id)
except Exception as e:
error_code, error_msg = _get_error_message_from_exception(e)
err = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
_base_connector.debug_print("Handled Exception while setting contains for email ID", err)
pass
ret_val, message, results = _int_process_email(rfc822_email, email_id, epoch)
if not ret_val:
_del_tmp_dirs()
return phantom.APP_ERROR, {'message': message, 'content_id': None}
try:
cid, artifacts, successful_artifacts = _parse_results(
results, label, container_id, _config[PROC_EMAIL_JSON_RUN_AUTOMATION])
except Exception:
_del_tmp_dirs()
raise
return (
phantom.APP_SUCCESS,
{
'message': 'Email Processed',
'container_id': cid,
'artifacts': artifacts,
'successful_artifacts': successful_artifacts,
})
def _parse_results(results, label, update_container_id, run_automation=True):
global _base_connector
param = _base_connector.get_current_param()
container_count = PARSER_DEFAULT_CONTAINER_COUNT
artifact_count = PARSER_DEFAULT_ARTIFACT_COUNT
if param:
container_count = param.get(phantom.APP_JSON_CONTAINER_COUNT, PARSER_DEFAULT_CONTAINER_COUNT)
artifact_count = param.get(phantom.APP_JSON_ARTIFACT_COUNT, PARSER_DEFAULT_ARTIFACT_COUNT)
results = results[:container_count]
for result in results:
if not update_container_id:
container = result.get('container')
if not container:
continue
container.update(_container_common)
container['label'] = label
try:
(ret_val, message, container_id) = _base_connector.save_container(container)
except Exception as e:
error_code, error_msg = _get_error_message_from_exception(e)
err = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
_base_connector.debug_print("Handled Exception while saving container", err)
continue
_base_connector.debug_print("save_container returns, value: {0}, reason: {1}, id: {2}".format(ret_val, message, container_id))
if phantom.is_fail(ret_val):
message = "Failed to add Container for id: {0}, error msg: {1}".format(container['source_data_identifier'], message)
_base_connector.debug_print(message)
continue
if not container_id:
message = "save_container did not return a container_id"
_base_connector.debug_print(message)
continue
else:
container_id = update_container_id
files = result.get('files')
_debug_print('# of files to process: {}'.format(len(files)))
successful_artifacts = []
failed_artifacts = []
vault_artifacts = []
vault_ids = list()
# Generate and save Vault artifacts from files
vault_artifacts_added = 0
for curr_file in files:
# Generate a new Vault artifact for the file and save it to a container
ret_val, added_to_vault, vault_artifact = _handle_file(
curr_file, vault_ids, container_id, vault_artifacts_added)
vault_artifacts.append(vault_artifact)
if added_to_vault:
vault_artifacts_added += 1
successful_artifacts.append(vault_artifact)
else:
failed_artifacts.append(vault_artifact)
artifacts = result.get('artifacts')
if not artifacts:
continue
if not _base_connector.is_poll_now():
artifacts = artifacts[:artifact_count]
len_artifacts = len(artifacts)
_base_connector.debug_print(len_artifacts)
for j, artifact in enumerate(artifacts):
if not artifact:
continue
# add the container id to the artifact
artifact['container_id'] = container_id
_base_connector.debug_print(artifact['container_id'])
_set_sdi((j + vault_artifacts_added), artifact)
if run_automation:
# if it is the last artifact of the last container
if (j + 1) == len_artifacts:
# mark it such that active playbooks get executed
artifact['run_automation'] = True
ret_val, status_string, artifact_id = _base_connector.save_artifact(artifact)
_base_connector.debug_print(
"save_artifact returns, value: {0}, reason: {1}, id: {2}".format(ret_val, status_string, artifact_id))
if phantom.is_fail(ret_val):
failed_artifacts.append(artifact)
else:
successful_artifacts.append(artifact)
# artifacts should represent all found artifacts from the email
artifacts.extend(vault_artifacts)
_debug_print('total # of artifacts to process: {}'.format(len(artifacts)))
_debug_print('# of successful processed artifacts: {}'.format(len(successful_artifacts)))
_debug_print('failed artifacts: {}'.format(failed_artifacts))
# delete any temp directories that were created by the email parsing function
[shutil.rmtree(x['temp_directory'], ignore_errors=True) for x in results if x.get('temp_directory')]
return container_id, artifacts, successful_artifacts
def _add_vault_hashes_to_dictionary(cef_artifact, vault_id):
_, _, vault_info = ph_rules.vault_info(vault_id=vault_id)
vault_info = list(vault_info)
if not vault_info:
return phantom.APP_ERROR, "Vault ID not found"
# The return value is a list, each item represents an item in the vault
# matching the vault id, the info that we are looking for (the hashes)
# will be the same for every entry, so just access the first one
try:
metadata = vault_info[0].get('metadata')
except Exception:
return phantom.APP_ERROR, "Failed to get vault item metadata"
try:
cef_artifact['fileHashSha256'] = metadata['sha256']
except Exception:
pass
try:
cef_artifact['fileHashMd5'] = metadata['md5']
except Exception:
pass
try:
cef_artifact['fileHashSha1'] = metadata['sha1']
except Exception:
pass
return phantom.APP_SUCCESS, "Mapped hash values"
def _handle_file(curr_file, vault_ids, container_id, artifact_id):
file_name = curr_file.get('file_name')
local_file_path = curr_file['file_path']
contains = _get_file_contains(local_file_path)
# lets move the data into the vault
vault_attach_dict = {}
if not file_name:
file_name = os.path.basename(local_file_path)
_base_connector.debug_print("Vault file name: {0}".format(file_name))
_base_connector.debug_print("Vault file path: {0}".format(local_file_path))
vault_attach_dict[phantom.APP_JSON_ACTION_NAME] = _base_connector.get_action_name()
vault_attach_dict[phantom.APP_JSON_APP_RUN_ID] = _base_connector.get_app_run_id()
file_name = _decode_uni_string(file_name, file_name)
try:
success, message, vault_id = ph_rules.vault_add(file_location=local_file_path, container=container_id,
file_name=file_name, metadata=vault_attach_dict)
except Exception as e:
error_code, error_msg = _get_error_message_from_exception(e)
err = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
_base_connector.debug_print(phantom.APP_ERR_FILE_ADD_TO_VAULT.format(err))
return phantom.APP_ERROR, phantom.APP_ERROR
if not success:
_base_connector.debug_print("Failed to add file to Vault: {0}".format(json.dumps(message)))
return phantom.APP_ERROR, phantom.APP_ERROR
# add the vault id artifact to the container
cef_artifact = {}
if file_name:
cef_artifact.update({'fileName': file_name})
if vault_id:
cef_artifact.update({'vaultId': vault_id,
'cs6': vault_id,
'cs6Label': 'Vault ID'})
# now get the rest of the hashes and add them to the cef artifact
_add_vault_hashes_to_dictionary(cef_artifact, vault_id)
if not cef_artifact:
return phantom.APP_SUCCESS, phantom.APP_ERROR
artifact = {}
artifact.update(_artifact_common)
artifact['container_id'] = container_id
artifact['name'] = 'Vault Artifact'
artifact['cef'] = cef_artifact
if contains:
artifact['cef_types'] = {'vaultId': contains, 'cs6': contains}
_set_sdi(artifact_id, artifact)
ret_val, status_string, artifact_id = _base_connector.save_artifact(artifact)
_base_connector.debug_print(
"save_artifact returns, value: {0}, reason: {1}, id: {2}".format(ret_val, status_string, artifact_id))
return phantom.APP_SUCCESS, ret_val, artifact
def _set_sdi(default_id, input_dict):
if 'source_data_identifier' in input_dict:
del input_dict['source_data_identifier']
dict_hash = None
# first get the phantom version
phantom_version = _base_connector.get_product_version()
if not phantom_version:
dict_hash = _create_dict_hash(input_dict)
else:
ver_cmp = operator.eq(phantom_version, HASH_FIXED_PHANTOM_VERSION)
if ver_cmp is False:
dict_hash = _create_dict_hash(input_dict)
if dict_hash:
input_dict['source_data_identifier'] = dict_hash
else:
# Remove this code once the backend has fixed PS-4216 _and_ it has been
# merged into next so that 2.0 and 2.1 has the code
input_dict['source_data_identifier'] = _create_dict_hash(input_dict)
return phantom.APP_SUCCESS
def _get_fips_enabled():
try:
from phantom_common.install_info import is_fips_enabled
except ImportError:
return False
fips_enabled = is_fips_enabled()
if fips_enabled:
_debug_print('FIPS is enabled')
else:
_debug_print('FIPS is not enabled')
return fips_enabled
def _create_dict_hash(input_dict):
input_dict_str = None
if not input_dict:
return None
try:
input_dict_str = json.dumps(input_dict, sort_keys=True)
except Exception as e:
error_code, error_msg = _get_error_message_from_exception(e)
err = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
_base_connector.debug_print('Handled exception in _create_dict_hash', err)
return None
fips_enabled = _get_fips_enabled()
# if fips is not enabled, we should continue with our existing md5 usage for generating hashes
# to not impact existing customers
dict_hash = UnicodeDammit(input_dict_str).unicode_markup.encode()
if not fips_enabled:
dict_hash = hashlib.md5(dict_hash)
else:
dict_hash = hashlib.sha256(dict_hash)
return dict_hash.hexdigest()
| 34.895031 | 138 | 0.657616 |
794979a2b775d77bfb824d6a65b037ac2bfa8d00
| 12,174 |
py
|
Python
|
read_ohara.py
|
jfblanchard/optical-tools
|
25ce0f2238e1c22b6694d00d359e4243a33db30b
|
[
"Apache-2.0"
] | null | null | null |
read_ohara.py
|
jfblanchard/optical-tools
|
25ce0f2238e1c22b6694d00d359e4243a33db30b
|
[
"Apache-2.0"
] | null | null | null |
read_ohara.py
|
jfblanchard/optical-tools
|
25ce0f2238e1c22b6694d00d359e4243a33db30b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Parse the ohara downloaded catalog (csv format), and create a new cleaned up
data frame with glass type and sellmeier coeffs for all glass types. Output
in json format.
Ohara download version 20171130
Credits: Hover text snippet found here:
https://stackoverflow.com/questions/7908636/
possible-to-make-labels-appear-when-hovering-over-a-point-in-matplotlib
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#ohara has 140 glasses in 20171130 version
df = pd.read_csv('OHARA_20171130_6.csv', header=[0, 1])
#glass names are in column 1
glass = df[df.columns[1]].values
for i in range(len(glass)):
glass[i] = glass[i].replace(" ","") #make consitent with no spaces
#Index at sodium d-line (Nd) is in column 16
nd = df[df.columns[16]].values
columns = ['Nd', 'A1', 'A2', 'A3', 'B1', 'B2', 'B3']
# Create a new data frame with just the glass type, Nd, and sellmeiers.
# Todo: maybe add other properties.
# best format - pickled df, json, hdf5, yml?
df_sell = pd.DataFrame(index=glass,columns=columns)
df_sell = df_sell.fillna(0)
abbe = df[df.columns[26]].values
A1 = df[df.columns[60]].values
A2 = df[df.columns[61]].values
A3 = df[df.columns[62]].values
B1 = df[df.columns[63]].values
B2 = df[df.columns[64]].values
B3 = df[df.columns[65]].values
df_sell['Glass'] = glass
df_sell['Abbe'] = abbe
df_sell['Nd'] = nd
df_sell['A1'] = A1
df_sell['A2'] = A2
df_sell['A3'] = A3
df_sell['B1'] = B1
df_sell['B2'] = B2
df_sell['B3'] = B3
#plot
sns.set_style(style='whitegrid')
fig,ax = plt.subplots()
plt.title('Index vs. Abbe Number for Ohara Glass')
plt.ylabel('Refractive Index (Nd)')
plt.xlabel('Abbe Number')
plt.gca().invert_xaxis()
sc = plt.scatter(abbe, nd)
#annotations
annot = ax.annotate("", xy=(0,0), xytext=(10,10),textcoords="offset points",
bbox=dict(boxstyle="round", fc="w"),
arrowprops=dict(arrowstyle="->"))
annot.set_visible(False)
def update_annot(ind):
pos = sc.get_offsets()[ind["ind"][0]]
annot.xy = pos
text = "{}, {}".format(" ".join([glass[n] for n in ind["ind"]]),
" ".join(str([nd[n] for n in ind["ind"]])))
annot.set_text(text)
#annot.get_bbox_patch().set_facecolor(cmap(norm(c[ind["ind"][0]])))
annot.get_bbox_patch().set_alpha(0.4)
def hover(event):
vis = annot.get_visible()
if event.inaxes == ax:
cont, ind = sc.contains(event)
if cont:
update_annot(ind)
annot.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annot.set_visible(False)
fig.canvas.draw_idle()
#connect the hover function and show the plot
fig.canvas.mpl_connect("motion_notify_event", hover)
plt.show()
#save the data frame as json and pickle
path = os.getcwd()
df_sell.to_json(path + '/ohara_glasses.json')
pd.to_pickle(df_sell, path + '/ohara_glasses.pkl')
#later add schott glasses too
#schottdf = pd.read_csv('schott-optical-glass-06032017.csv') #utf-8 error
# Reference: index, glass types
#0 S-FPL51
#1 S-FPL53
#2 S-FPL55
#3 S-FPM2
#4 S-FPM3
#5 S-FSL5
#6 S-BSL7
#7 S-BSM2
#8 S-BSM4
#9 S-BSM10
#10 S-BSM14
#11 S-BSM15
#12 S-BSM16
#13 S-BSM18
#14 S-BSM22
#15 S-BSM25
#16 S-BSM28
#17 S-BSM71
#18 S-BSM81
#19 S-NSL3
#20 S-NSL5
#21 S-NSL36
#22 S-BAL2
#23 S-BAL3
#24 S-BAL12
#25 S-BAL14
#26 S-BAL35
#27 S-BAL41
#28 S-BAL42
#29 S-BAM4
#30 S-BAM12
#31 S-BAH10
#32 S-BAH11
#33 S-BAH27
#34 S-BAH28
#35 S-BAH32
#36 S-PHM52
#37 S-PHM53
#38 S-TIL1
#39 S-TIL2
#40 S-TIL6
#41 S-TIL25
#42 S-TIL26
#43 S-TIL27
#44 S-TIM1
#45 S-TIM2
#46 S-TIM5
#47 S-TIM8
#48 S-TIM22
#49 S-TIM25
#50 S-TIM27
#51 S-TIM28
#52 S-TIM35
#53 S-TIM39
#54 S-TIH1
#55 S-TIH3
#56 S-TIH4
#57 S-TIH6
#58 S-TIH10
#59 S-TIH11
#60 S-TIH13
#61 S-TIH14
#62 S-TIH18
#63 S-TIH23
#64 S-TIH53
#65 S-TIH53W
#66 S-TIH57
#67 S-LAL7
#68 S-LAL8
#69 S-LAL9
#70 S-LAL10
#71 S-LAL12
#72 S-LAL13
#73 S-LAL14
#74 S-LAL18
#75 S-LAL19
#76 S-LAL20
#77 S-LAL54
#78 S-LAL54Q
#79 S-LAL58
#80 S-LAL59
#81 S-LAL61
#82 S-LAM2
#83 S-LAM3
#84 S-LAM7
#85 S-LAM52
#86 S-LAM54
#87 S-LAM55
#88 S-LAM58
#89 S-LAM59
#90 S-LAM60
#91 S-LAM61
#92 S-LAM66
#93 S-LAM73
#94 S-LAH51
#95 S-LAH52
#96 S-LAH52Q
#97 S-LAH53
#98 S-LAH53V
#99 S-LAH55V
#100 S-LAH55VS
#101 S-LAH58
#102 S-LAH59
#103 S-LAH60
#104 S-LAH60V
#105 S-LAH63
#106 S-LAH63Q
#107 S-LAH64
#108 S-LAH65V
#109 S-LAH65VS
#110 S-LAH66
#111 S-LAH71
#112 S-LAH79
#113 S-LAH88
#114 S-LAH89
#115 S-LAH92
#116 S-LAH93
#117 S-LAH95
#118 S-LAH96
#119 S-LAH97
#120 S-YGH51
#121 S-FTM16
#122 S-NBM51
#123 S-NBH5
#124 S-NBH8
#125 S-NBH51
#126 S-NBH52
#127 S-NBH52V
#128 S-NBH53
#129 S-NBH53V
#130 S-NBH55
#131 S-NBH56
#132 S-NBH57
#133 S-NPH1
#134 S-NPH1W
#135 S-NPH2
#136 S-NPH3
#137 S-NPH4
#138 S-NPH5
#139 S-NPH53
# Parsed raw columns from csv ------------------------------------------
#0 ('Unnamed: 0_level_0', 'Unnamed: 0_level_1')
#1 ('Unnamed: 1_level_0', 'Glass ')
#2 ('Unnamed: 2_level_0', 'Code(d)')
#3 ('Unnamed: 3_level_0', 'Code(e)')
#4 ('REFRACTIVE INDICES', 'n2325')
#5 ('Unnamed: 5_level_0', 'n1970')
#6 ('Unnamed: 6_level_0', 'n1530')
#7 ('Unnamed: 7_level_0', 'n1129')
#8 ('REFRACTIVE INDICES', 'nt')
#9 ('Unnamed: 9_level_0', 'ns')
#10 ('Unnamed: 10_level_0', "nA'")
#11 ('Unnamed: 11_level_0', 'nr')
#12 ('REFRACTIVE INDICES', 'nC')
#13 ('Unnamed: 13_level_0', "nC'")
#14 ('Unnamed: 14_level_0', 'nHe-Ne')
#15 ('Unnamed: 15_level_0', 'nD')
#16 ('REFRACTIVE INDICES', 'nd')
#17 ('Unnamed: 17_level_0', 'ne')
#18 ('Unnamed: 18_level_0', 'nF')
#19 ('Unnamed: 19_level_0', "nF'")
#20 ('REFRACTIVE INDICES', 'nHe-Cd')
#21 ('Unnamed: 21_level_0', 'ng')
#22 ('Unnamed: 22_level_0', 'nh')
#23 ('Unnamed: 23_level_0', 'ni')
#24 ('ABBE', '?d')
#25 ('Unnamed: 25_level_0', '?e')
#26 ('ABBE', '?d').1
#27 ('Unnamed: 27_level_0', '?e')
#28 ('DISPERSIONS', 'nF-nC')
#29 ('Unnamed: 29_level_0', 'nF-nC')
#30 ('Unnamed: 30_level_0', "nF'-nC'")
#31 ('PARTIAL DISPERSIONS', 'nC-nt')
#32 ('Unnamed: 32_level_0', "nC-nA'")
#33 ('Unnamed: 33_level_0', 'nd-nC')
#34 ('Unnamed: 34_level_0', 'ne-nC')
#35 ('Unnamed: 35_level_0', 'ng-nd')
#36 ('Unnamed: 36_level_0', 'ng-nF')
#37 ('PARTIAL DISPERSIONS', 'nh-ng')
#38 ('Unnamed: 38_level_0', 'ni-ng')
#39 ('Unnamed: 39_level_0', "nC'-nt")
#40 ('Unnamed: 40_level_0', "ne-nC'")
#41 ('Unnamed: 41_level_0', "nF'-ne")
#42 ('Unnamed: 42_level_0', "ni-nF'")
#43 ('RELATIVE PARTIAL DISPERSIONS', '?C,t')
#44 ('Unnamed: 44_level_0', "?C,A'")
#45 ('Unnamed: 45_level_0', '?d,C')
#46 ('Unnamed: 46_level_0', '?e,C')
#47 ('Unnamed: 47_level_0', '?g,d')
#48 ('Unnamed: 48_level_0', '?g,F')
#49 ('RELATIVE PARTIAL DISPERSIONS', '?h,g')
#50 ('Unnamed: 50_level_0', '?i,g')
#51 ('Unnamed: 51_level_0', "?'C',t")
#52 ('Unnamed: 52_level_0', "?'e,C'")
#53 ('Unnamed: 53_level_0', "?'F',e")
#54 ('Unnamed: 54_level_0', "?'i,F'")
#55 ('Deviation of Relative Partial Dispesions', '??C,t')
#56 ('Unnamed: 56_level_0', "??C,A'")
#57 ('Unnamed: 57_level_0', '??g,d')
#58 ('Unnamed: 58_level_0', '??g,F')
#59 ('Unnamed: 59_level_0', '??i,g')
#60 ('CONSTANTS OF DISPERSION FORMULA (Sellmeier)', 'A1')
#61 ('Unnamed: 61_level_0', 'A2')
#62 ('Unnamed: 62_level_0', 'A3')
#63 ('Unnamed: 63_level_0', 'B1')
#64 ('Unnamed: 64_level_0', 'B2')
#65 ('Unnamed: 65_level_0', 'B3')
#66 ('CONSTANTS OF DISPERSION FORMULA (Cauchy)', 'A0')
#67 ('Unnamed: 67_level_0', 'A1')
#68 ('Unnamed: 68_level_0', 'A2')
#69 ('Unnamed: 69_level_0', 'A3')
#70 ('Unnamed: 70_level_0', 'A4')
#71 ('Unnamed: 71_level_0', 'A5')
#72 ('COLORING', '?80')
#73 ('Unnamed: 73_level_0', '(?70)')
#74 ('Unnamed: 74_level_0', '?5')
#75 ('INTERNAL TRANSMISSION COLORING', '?0.80')
#76 ('Unnamed: 76_level_0', '?0.05')
#77 ('CCI', 'B')
#78 ('Unnamed: 78_level_0', 'G')
#79 ('Unnamed: 79_level_0', 'R')
#80 ('INTERNAL TRANSMISSION (?/10mm Thick) ', '280')
#81 ('Unnamed: 81_level_0', '290')
#82 ('Unnamed: 82_level_0', '300')
#83 ('Unnamed: 83_level_0', '310')
#84 ('Unnamed: 84_level_0', '320')
#85 ('Unnamed: 85_level_0', '330')
#86 ('Unnamed: 86_level_0', '340')
#87 ('Unnamed: 87_level_0', '350')
#88 ('Unnamed: 88_level_0', '360')
#89 ('INTERNAL TRANSMISSION (?/10mm Thick) ', '370')
#90 ('Unnamed: 90_level_0', '380')
#91 ('Unnamed: 91_level_0', '390')
#92 ('Unnamed: 92_level_0', '400')
#93 ('Unnamed: 93_level_0', '420')
#94 ('Unnamed: 94_level_0', '440')
#95 ('Unnamed: 95_level_0', '460')
#96 ('Unnamed: 96_level_0', '480')
#97 ('INTERNAL TRANSMISSION (?/10mm Thick) ', '500')
#98 ('Unnamed: 98_level_0', '550')
#99 ('Unnamed: 99_level_0', '600')
#100 ('Unnamed: 100_level_0', '650')
#101 ('Unnamed: 101_level_0', '700')
#102 ('Unnamed: 102_level_0', '800')
#103 ('Unnamed: 103_level_0', '900')
#104 ('Unnamed: 104_level_0', '1000')
#105 ('INTERNAL TRANSMISSION (?/10mm Thick) ', '1200')
#106 ('Unnamed: 106_level_0', '1400')
#107 ('Unnamed: 107_level_0', '1600')
#108 ('Unnamed: 108_level_0', '1800')
#109 ('Unnamed: 109_level_0', '2000')
#110 ('Unnamed: 110_level_0', '2200')
#111 ('Unnamed: 111_level_0', '2400')
#112 ('dn/dT relative (10-6 / ?)', 't(-40~-20)')
#113 ('Unnamed: 113_level_0', 't(-20~0)')
#114 ('Unnamed: 114_level_0', 't(0~20)')
#115 ('Unnamed: 115_level_0', 't(20~40)')
#116 ('Unnamed: 116_level_0', 't(40~60)')
#117 ('Unnamed: 117_level_0', 't(60~80)')
#118 ('dn/dT relative (10-6 / ?)', "C'(-40~-20)")
#119 ('Unnamed: 119_level_0', "C'(-20~0)")
#120 ('Unnamed: 120_level_0', "C'(0~20)")
#121 ('Unnamed: 121_level_0', "C'(20~40)")
#122 ('Unnamed: 122_level_0', "C'(40~60)")
#123 ('Unnamed: 123_level_0', "C'(60~80)")
#124 ('dn/dT relative (10-6 / ?)', 'He-Ne(-40~-20)')
#125 ('Unnamed: 125_level_0', 'He-Ne(20~0)')
#126 ('Unnamed: 126_level_0', 'He-Ne(0~20)')
#127 ('Unnamed: 127_level_0', 'He-Ne(20~40)')
#128 ('Unnamed: 128_level_0', 'He-Ne(40~60)')
#129 ('Unnamed: 129_level_0', 'He-Ne(60~80)')
#130 ('dn/dT relative (10-6 / ?)', 'D(-40~-20)')
#131 ('Unnamed: 131_level_0', 'D(-20~0)')
#132 ('Unnamed: 132_level_0', 'D(0~20)')
#133 ('Unnamed: 133_level_0', 'D(20~40)')
#134 ('Unnamed: 134_level_0', 'D(40~60)')
#135 ('Unnamed: 135_level_0', 'D(60~80)')
#136 ('dn/dT relative (10-6 / ?)', 'e(-40~-20)')
#137 ('Unnamed: 137_level_0', 'e(-20~0)')
#138 ('Unnamed: 138_level_0', 'e(0~20)')
#139 ('Unnamed: 139_level_0', 'e(20~40)')
#140 ('Unnamed: 140_level_0', 'e(40~60)')
#141 ('Unnamed: 141_level_0', 'e(60~80)')
#142 ('dn/dT relative (10-6 / ?)', "F'(-40~-20)")
#143 ('Unnamed: 143_level_0', "F'(-20~0)")
#144 ('Unnamed: 144_level_0', "F'(0~20)")
#145 ('Unnamed: 145_level_0', "F'(20~40)")
#146 ('Unnamed: 146_level_0', "F'(40~60)")
#147 ('Unnamed: 147_level_0', "F'(60~80)")
#148 ('dn/dT relative (10-6 / ?)', 'g(-40~-20)')
#149 ('Unnamed: 149_level_0', 'g(-20~0)')
#150 ('Unnamed: 150_level_0', 'g(0~20)')
#151 ('Unnamed: 151_level_0', 'g(20~40)')
#152 ('Unnamed: 152_level_0', 'g(40~60)')
#153 ('Unnamed: 153_level_0', 'g(60~80)')
#154 ('Constants of dn/dT', ' D0')
#155 ('Unnamed: 155_level_0', ' D1')
#156 ('Unnamed: 156_level_0', ' D2')
#157 ('Unnamed: 157_level_0', ' E0')
#158 ('Unnamed: 158_level_0', ' E1')
#159 ('Unnamed: 159_level_0', '?TK')
#160 ('Thermal Properties', 'StP(?)')
#161 ('Unnamed: 161_level_0', 'AP(?)')
#162 ('Unnamed: 162_level_0', 'Tg(?)')
#163 ('Unnamed: 163_level_0', 'At(?)')
#164 ('Unnamed: 164_level_0', 'SP(?)')
#165 ('CTE?(10-7/?)', '(-30~+70)')
#166 ('Unnamed: 166_level_0', '(100~300)')
#167 ('Conductivity', 'k(W/m?K)')
#168 ('Mechanical Properties', "Young's (E) ")
#169 ('Unnamed: 169_level_0', 'Rigidity (G)')
#170 ('Unnamed: 170_level_0', "Poisson's(?)")
#171 ('Unnamed: 171_level_0', 'Knoop (Hk)')
#172 ('Unnamed: 172_level_0', 'Group')
#173 ('Unnamed: 173_level_0', 'Abrasion(Aa)')
#174 ('Unnamed: 174_level_0', '?')
#175 ('Chemical Properties', 'RW(P)')
#176 ('Unnamed: 176_level_0', 'RA(P)')
#177 ('Unnamed: 177_level_0', 'W(S)max')
#178 ('Unnamed: 178_level_0', 'W(S)min')
#179 ('Unnamed: 179_level_0', 'SR')
#180 ('Unnamed: 180_level_0', 'PR')
#181 ('Bubble Grp', 'B')
#182 ('Spec. Gravity', 'd')
| 27.295964 | 95 | 0.616231 |
794979b3c196de0436899f1d7dca55c2f1d4f582
| 471 |
py
|
Python
|
python/soma_workflow/check_requirement/__main__.py
|
denisri/soma-workflow
|
bc6f2f50d34437e86e850cb0d05ff26b041d560d
|
[
"CECILL-B"
] | null | null | null |
python/soma_workflow/check_requirement/__main__.py
|
denisri/soma-workflow
|
bc6f2f50d34437e86e850cb0d05ff26b041d560d
|
[
"CECILL-B"
] | 44 |
2018-10-30T16:57:10.000Z
|
2022-03-15T10:54:57.000Z
|
python/soma_workflow/check_requirement/__main__.py
|
populse/soma-workflow
|
e6d3e3c33ad41107ee3c959adc4832e6edd047f4
|
[
"CECILL-B"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
if __name__ == '__main__':
from soma_workflow import configuration
import sys
if len(sys.argv) >= 2:
resource_id = sys.argv[1]
else:
resource_id = None
config = configuration.Configuration.load_from_file(resource_id)
if config is not None and config.get_scheduler_type() == 'drmaa':
# import drmaa test
from . import drmaa
drmaa.test_drmaa()
| 27.705882 | 69 | 0.658174 |
79497bb599ff35a8f443bf1bb7a0bf25eca6121d
| 511 |
py
|
Python
|
Stream-3/Full-Stack-Development/9.Django-Blog-Part-Five/2.Create-A-Blog-Post-Form/Blog_prj/Blog_app/urls.py
|
GunnerJnr/_CodeInstitute
|
efba0984a3dc71558eef97724c85e274a712798c
|
[
"MIT"
] | 4 |
2017-10-10T14:00:40.000Z
|
2021-01-27T14:08:26.000Z
|
Stream-3/Full-Stack-Development/9.Django-Blog-Part-Five/2.Create-A-Blog-Post-Form/Blog_prj/Blog_app/urls.py
|
GunnerJnr/_CodeInstitute
|
efba0984a3dc71558eef97724c85e274a712798c
|
[
"MIT"
] | 115 |
2019-10-24T11:18:33.000Z
|
2022-03-11T23:15:42.000Z
|
Stream-3/Full-Stack-Development/9.Django-Blog-Part-Five/2.Create-A-Blog-Post-Form/Blog_prj/Blog_app/urls.py
|
GunnerJnr/_CodeInstitute
|
efba0984a3dc71558eef97724c85e274a712798c
|
[
"MIT"
] | 5 |
2017-09-22T21:42:39.000Z
|
2020-02-07T02:18:11.000Z
|
# coding=utf-8
from django.conf.urls import url
import views
from .views import redirect_root
urlpatterns = [
url(r'^$', redirect_root, name='home'),
url(r'^blog/$', views.post_list, name='post-list'),
url(r'^blog/(?P<slug>[-\w]+)$', views.post_detail, name='post-detail'),
url(r'^blog/top-five/$', views.display_top_five_posts, name='top-five'),
url(r'^blog/post/new/$', views.new_post, name='new_post'),
url(r'^blog/(?P<slug>[-\w]+)/edit-post$', views.edit_post, name='edit-post'),
]
| 36.5 | 81 | 0.64775 |
79497cda7d49ab24a0e02c9aa9c6304b74668d54
| 7,433 |
py
|
Python
|
carsteer.py
|
kneave/RedBoard
|
642f4a00d983c98bdb71c6a4bed96e66b522b9a0
|
[
"MIT"
] | 21 |
2019-10-07T22:55:36.000Z
|
2020-12-09T20:07:03.000Z
|
carsteer.py
|
kneave/RedBoard
|
642f4a00d983c98bdb71c6a4bed96e66b522b9a0
|
[
"MIT"
] | 5 |
2019-09-05T14:15:36.000Z
|
2020-03-17T20:21:42.000Z
|
carsteer.py
|
kneave/RedBoard
|
642f4a00d983c98bdb71c6a4bed96e66b522b9a0
|
[
"MIT"
] | 5 |
2018-05-02T16:38:28.000Z
|
2020-01-02T15:06:02.000Z
|
# Control a robot with a Rock Candy or PiHut PS3 controller.
# The left stick controls the speed and direction of both motors - push up to go forwards and down for backwards.
# The right stick is for steering - push the stick left or right to steer.
# Author: Neil Lambeth. neil@redrobotics.co.uk @NeilRedRobotics
from __future__ import print_function # Make print work with python 2 & 3
from evdev import InputDevice, ecodes
import redboard
dev = InputDevice('/dev/input/event0')
#print(dev)
device = str(dev).find('Rock Candy') # Look for a Rock Candy or PiHut controller
if device != -1:
print ('Controller: Rock Candy PS3 Gamepad')
controller = 1
else:
print ('Controller: PiHut PS3 Gamepad')
controller = 2
# Button mapping for different controllers
if controller == 1: # Rock Candy
triangle, x, square, circle = 307, 305, 304, 306
R1, R2, R3 = 309, 311, 315
L1, L2, L3 = 308, 310, 314
select, start, home = 312, 313, 316
if controller == 2: # PiHut
triangle, x, square, circle = 308, 304, 307, 305
R1, R2, R3 = 311, 313, 318
L1, L2, L3 = 310, 312, 317
select, start, home = 314, 315, 316
# Set up variables
RX = 0
LX = 0
RY = 0
RY = 0
LeftY = 0
RightX = 0
RightX_R = 0
RightX_L = 0
Leftmotor = 0
Rightmotor = 0
LM_OLD = 0
RM_OLD = 0
turbo = False
invertX = False
# Read gamepad buttons
for event in dev.read_loop():
#print(event) # Uncomment to show all button data
if event.type == ecodes.EV_KEY:
#print(event.code) # Uncomment to show each keycode
if event.value == 1: # Button pressed
if event.code == triangle:
print ('triangle')
elif event.code == x:
print ('X')
elif event.code == square:
print ('Square')
elif event.code == circle:
print ('Circle')
elif event.code == R1:
print ('R1 - Turbo On')
turbo = True
elif event.code == R2:
print ('R2')
elif event.code == R3:
print ('R3')
elif event.code == L1:
print ('L1')
elif event.code == L2:
print ('L2')
elif event.code == L3:
print ('L3')
elif event.code == select and invertX == False:
print ('Invert X')
invertX = True
elif event.code == select and invertX == True:
print ('Normal X')
invertX = False
elif event.code == start:
print ('Start')
elif event.code == home:
print ('Home')
if event.value == 0: # Button released
if event.code == R1: # Turbo Off
print ('R1 - Turbo Off')
turbo = False
if event.type == ecodes.EV_ABS:
print('')
print('---------------------------------')
# Dpad
if event.code == 16:
if event.value == -1:
print ('Dpad LEFT')
if event.value == 1:
print ('Dpad RIGHT')
if event.code == 17:
if event.value == -1:
print ('Dpad UP')
if event.value == 1:
print ('Dpad DOWN')
# Analogue sticks
if event.code == 1: # Left analogue Vertical stick
# The analogue stick gives a value between 0-255
# Convert the value to 0-127 for forwards
# and 0- -127 for backwards
LY = event.value
if LY < 128: # Forwards
LeftY = 127 - LY
#print('LY =',LY)
#print('LeftY = ',LeftY)
elif LY >= 128: # Backwards
LeftY = LY - 128
LeftY = -LeftY # Make negative
#print('LY =',LY)
#print('LeftY = ',LeftY)
elif event.code == 0: # Left analogue Horizontal stick
LX = event.value
if LX < 128: # Left
L_Left = 127 - LX
print ('L_Left = ',L_Left)
elif LX >= 127: # Right
L_Right = LX - 128
print ('L_Right = ',L_Right)
elif event.code == 5: # Right analogue Vertical stick
RY = event.value
if RY <= 128: # Forwards
R_Fwd = 127 - RY
print ('R_Fwd = ',R_Fwd)
elif RY >= 127: # Backwards
R_Fwd = RY - 128
R_Fwd = -R_Fwd # Make negative
print ('R_Rev = ',R_Fwd)
elif event.code == 2: # Right analogue Horizontal stick
# The analogue stick gives a value between 0-255
# Convert the value to 0-127 for left
# and 0-127 for right
RX = event.value
if RX < 128: # Left
RightX_L = 127 - RX
#print('RX =',RX)
#print('RightX_Left = ',RightX_L)
if RX > 128: # Right
RightX_R = RX - 128
#print('RX = ',RX)
#print('RightX_Right = ',RightX_R)
if RX == 128: # Make sure both values are zero if stick is in the centre
RightX_L = 0
RightX_R = 0
# Prepare the values to send to the motors
if LeftY == 0: #Turn on the spot if not going forwards or backwards
if RX <= 128: # Turn Left
Leftmotor = -RightX_L # Reverse motor to turn on the spot
Rightmotor = RightX_L
elif RX >= 127: # Turn Right
Leftmotor = RightX_R
Rightmotor = -RightX_R # Reverse motor to turn on the spot
elif LY <= 128: # Forwards
print ('Forwards')
Leftmotor = LeftY - RightX_L # Mix steering values
if Leftmotor <1: # Stop motor going backwards
Leftmotor = 0;
Rightmotor = LeftY - RightX_R # Mix steering values
if Rightmotor <1: # Stop motor going backwards
Rightmotor = 0;
elif LY >= 127: # Backwards
print('Backwards')
Leftmotor = LeftY + RightX_L # Mix steering values
if Leftmotor >-1: # Stop motor going forwards
Leftmotor = 0;
Rightmotor = LeftY + RightX_R # Mix steering values
if Rightmotor >-1: # Stop motor going forwards
Rightmotor = 0;
if turbo == True: # Double speed for turbo
LM = Leftmotor * 2
RM = Rightmotor * 2
else: # Normal speed
LM = Leftmotor
RM = Rightmotor
if LM != LM_OLD or RM != RM_OLD: # Only print motor speeds if they have changed
print ('Left motor =',LM)
print ('Right motor =',RM)
LM_OLD = LM
RM_OLD = RM
# Set motor speed and direction
if invertX == True: # Reverse steering controls
print('Inverted steering')
redboard.M2_8bit(RM)
redboard.M1_8bit(LM)
else: # Normal steering controls
print ('Normal steering')
redboard.M2_8bit(LM)
redboard.M1_8bit(RM)
| 28.262357 | 113 | 0.493206 |
79497eacb4225881a9345cd093eb3eb6da0ad20e
| 6,579 |
py
|
Python
|
kubernetes/client/models/v1_replica_set_condition.py
|
kevingessner/python
|
3f4d09d260cf0839fae8173852c69e0419188454
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_replica_set_condition.py
|
kevingessner/python
|
3f4d09d260cf0839fae8173852c69e0419188454
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_replica_set_condition.py
|
kevingessner/python
|
3f4d09d260cf0839fae8173852c69e0419188454
|
[
"Apache-2.0"
] | 1 |
2018-07-19T16:37:20.000Z
|
2018-07-19T16:37:20.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.9.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ReplicaSetCondition(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'last_transition_time': 'datetime',
'message': 'str',
'reason': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'last_transition_time': 'lastTransitionTime',
'message': 'message',
'reason': 'reason',
'status': 'status',
'type': 'type'
}
def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None):
"""
V1ReplicaSetCondition - a model defined in Swagger
"""
self._last_transition_time = None
self._message = None
self._reason = None
self._status = None
self._type = None
self.discriminator = None
if last_transition_time is not None:
self.last_transition_time = last_transition_time
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
self.status = status
self.type = type
@property
def last_transition_time(self):
"""
Gets the last_transition_time of this V1ReplicaSetCondition.
The last time the condition transitioned from one status to another.
:return: The last_transition_time of this V1ReplicaSetCondition.
:rtype: datetime
"""
return self._last_transition_time
@last_transition_time.setter
def last_transition_time(self, last_transition_time):
"""
Sets the last_transition_time of this V1ReplicaSetCondition.
The last time the condition transitioned from one status to another.
:param last_transition_time: The last_transition_time of this V1ReplicaSetCondition.
:type: datetime
"""
self._last_transition_time = last_transition_time
@property
def message(self):
"""
Gets the message of this V1ReplicaSetCondition.
A human readable message indicating details about the transition.
:return: The message of this V1ReplicaSetCondition.
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""
Sets the message of this V1ReplicaSetCondition.
A human readable message indicating details about the transition.
:param message: The message of this V1ReplicaSetCondition.
:type: str
"""
self._message = message
@property
def reason(self):
"""
Gets the reason of this V1ReplicaSetCondition.
The reason for the condition's last transition.
:return: The reason of this V1ReplicaSetCondition.
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""
Sets the reason of this V1ReplicaSetCondition.
The reason for the condition's last transition.
:param reason: The reason of this V1ReplicaSetCondition.
:type: str
"""
self._reason = reason
@property
def status(self):
"""
Gets the status of this V1ReplicaSetCondition.
Status of the condition, one of True, False, Unknown.
:return: The status of this V1ReplicaSetCondition.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this V1ReplicaSetCondition.
Status of the condition, one of True, False, Unknown.
:param status: The status of this V1ReplicaSetCondition.
:type: str
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`")
self._status = status
@property
def type(self):
"""
Gets the type of this V1ReplicaSetCondition.
Type of replica set condition.
:return: The type of this V1ReplicaSetCondition.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this V1ReplicaSetCondition.
Type of replica set condition.
:param type: The type of this V1ReplicaSetCondition.
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`")
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ReplicaSetCondition):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.298755 | 105 | 0.583675 |
79497eb9481b5b8e18bb58b89e8e984ce66b4c08
| 1,603 |
py
|
Python
|
src/commercetools/importapi/client/import_operations/by_project_key_product_variant_patches_import_sink_key_by_import_sink_key_import_operations_by_id_request_builder.py
|
lime-green/commercetools-python-sdk
|
63b77f6e5abe43e2b3ebbf3cdbbe00c7cf80dca6
|
[
"MIT"
] | 1 |
2021-04-07T20:01:30.000Z
|
2021-04-07T20:01:30.000Z
|
src/commercetools/importapi/client/import_operations/by_project_key_product_variant_patches_import_sink_key_by_import_sink_key_import_operations_by_id_request_builder.py
|
lime-green/commercetools-python-sdk
|
63b77f6e5abe43e2b3ebbf3cdbbe00c7cf80dca6
|
[
"MIT"
] | null | null | null |
src/commercetools/importapi/client/import_operations/by_project_key_product_variant_patches_import_sink_key_by_import_sink_key_import_operations_by_id_request_builder.py
|
lime-green/commercetools-python-sdk
|
63b77f6e5abe43e2b3ebbf3cdbbe00c7cf80dca6
|
[
"MIT"
] | null | null | null |
# Generated file, please do not change!!!
import typing
from ...models.errors import ErrorResponse
from ...models.importoperations import ImportOperation
if typing.TYPE_CHECKING:
from ...base_client import BaseClient
class ByProjectKeyProductVariantPatchesImportSinkKeyByImportSinkKeyImportOperationsByIdRequestBuilder:
_client: "BaseClient"
_project_key: str
_import_sink_key: str
_id: str
def __init__(
self,
project_key: str,
import_sink_key: str,
id: str,
client: "BaseClient",
):
self._project_key = project_key
self._import_sink_key = import_sink_key
self._id = id
self._client = client
def get(
self,
*,
headers: typing.Dict[str, str] = None,
options: typing.Dict[str, typing.Any] = None,
) -> "ImportOperation":
"""Retrieves the import operation with the given id."""
headers = {} if headers is None else headers
response = self._client._get(
endpoint=f"/{self._project_key}/product-variant-patches/importSinkKey={self._import_sink_key}/import-operations/{self._id}",
params={},
headers=headers,
options=options,
)
if response.status_code == 200:
return ImportOperation.deserialize(response.json())
elif response.status_code in (404, 503):
obj = ErrorResponse.deserialize(response.json())
raise self._client._create_exception(obj, response)
raise ValueError("Unhandled status code %s", response.status_code)
| 32.06 | 136 | 0.650655 |
79497f042372b5623c948cdf5f365645d21e656b
| 531 |
py
|
Python
|
djstripe/migrations/__init__.py
|
TigerDX/dj-stripe
|
2fd4897abaedf2d9faa3dd5af86402dae3ab86a3
|
[
"BSD-3-Clause"
] | null | null | null |
djstripe/migrations/__init__.py
|
TigerDX/dj-stripe
|
2fd4897abaedf2d9faa3dd5af86402dae3ab86a3
|
[
"BSD-3-Clause"
] | null | null | null |
djstripe/migrations/__init__.py
|
TigerDX/dj-stripe
|
2fd4897abaedf2d9faa3dd5af86402dae3ab86a3
|
[
"BSD-3-Clause"
] | 1 |
2021-08-30T10:51:49.000Z
|
2021-08-30T10:51:49.000Z
|
"""
Migrations have been built for Django=>1.7 versions. Alternative migrations
for Django<1.7 users are provided with the ``south_migrations`` dir.
"""
SOUTH_ERROR_MESSAGE = """\n
For South support, customize the SOUTH_MIGRATION_MODULES setting like so:
SOUTH_MIGRATION_MODULES = {
'djstripe': 'djstripe.south_migrations',
}
"""
try:
from django.db import migrations # noqa
except ImportError:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(SOUTH_ERROR_MESSAGE)
| 27.947368 | 75 | 0.753296 |
79497f35aca134ae3e71ae1b1b611a241cd4bd1c
| 541 |
py
|
Python
|
smii/modeling/setup.py
|
ar4/smii
|
b7eee03f2a4c8f56f6dde61738e8aa1090621ba3
|
[
"MIT"
] | 3 |
2018-07-02T15:50:34.000Z
|
2019-02-28T11:42:34.000Z
|
smii/modeling/setup.py
|
ar4/smii
|
b7eee03f2a4c8f56f6dde61738e8aa1090621ba3
|
[
"MIT"
] | null | null | null |
smii/modeling/setup.py
|
ar4/smii
|
b7eee03f2a4c8f56f6dde61738e8aa1090621ba3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('modeling', parent_package, top_path)
config.add_subpackage('propagators')
config.add_subpackage('store_wavefield')
config.add_subpackage('record_receivers')
config.add_subpackage('wavelets')
config.add_subpackage('imaging_condition')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(configuration=configuration)
| 36.066667 | 64 | 0.763401 |
7949807f891fb6cd60ca8adcff9e44301647a928
| 7,214 |
py
|
Python
|
run.py
|
ucynthy12/Vault
|
812261b49dfb126c59da031f0fe618008fe4f871
|
[
"Unlicense"
] | null | null | null |
run.py
|
ucynthy12/Vault
|
812261b49dfb126c59da031f0fe618008fe4f871
|
[
"Unlicense"
] | null | null | null |
run.py
|
ucynthy12/Vault
|
812261b49dfb126c59da031f0fe618008fe4f871
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3.6
from user import User
from credential import Credential
from art import *
import random
import sys
from simple_colors import *
def create_user(fname,lname,pswd,c_pswd):
"""
Function to create a new user
"""
new_user= User(fname,lname,pswd,c_pswd)
return new_user
def save_users(user):
"""
Function to save user
"""
user.save_user()
def create_account(ac_name,us_name,ac_pswd):
"""
Function to create a new account
"""
new_account= Credential(ac_name,us_name,ac_pswd)
return new_account
def save_accounts(account):
"""
Function to save account
"""
account.save_account()
def delete_account(account):
"""
Function to delete account
"""
account.delete_account()
def find_account(account):
"""
Function that finds an account name and returns the account
"""
return Credential.find_by_account(account)
def check_existing_account(account):
"""
Function that check if an account exists with that account name and return a Boolean
"""
return Credential.account_exist(account)
def display_accounts():
"""
Function that returns all the saved accounts
"""
return Credential.display_accounts()
def generate_random_pswd():
"""
Function to generate a random password
"""
random= Credential.generate_random()
return random
def main():
tprint("The Vault")
# print(green('The Vault','bold'))
print('\n')
# print("\u001b[34m"+"Hello Welcome to your password locker.\n What is your name?"+"\u001b[34m")
print(blue('Hello Welcome to')+ green(' The Vault','bold')+ blue(' your password locker.'))
print(blue('What is your name?'))
user_name=input()
print('\n')
print(blue(f"Hello {user_name}, Fill in your information to create a password locker account."))
print('\n')
print(blue('New Password'))
print(blue("-"*15))
print(blue("First Name"))
f_name=input()
print(blue("Last Name"))
l_name=input()
print(blue("Enter password"))
passWord=input()
print(blue("Confirm password"))
cPassword=input()
save_users(create_user(f_name,l_name,passWord,cPassword))
print("\n")
print(green(f"{f_name}","bright")+ blue(" Your new password locker was created succesfully!Your new password is ")+green(f'{passWord}','bold'))
print('-'*85)
print("\n")
print(blue("Please login with your password "))
login=input()
print('\n')
if login== passWord:
while True:
print(blue("Use these short codes :","underlined"))
print(blue(" cp -----create a password \n dp -----display password \n fp -----find password \n dlt ----- delete password \n ex ----- exit the account list"))
print('\n')
short_code= input().lower()
print('\n')
if short_code == 'cp':
print(blue('New Credential','underlined'))
# print('-'*15)
print(blue('Account name '))
accountName=input()
print(blue('Username '))
userName=input()
print('\n')
print(blue("Use these short codes:","underlined"))
print(blue(" gp ---- generate your own password \n rp ---- generate random password "))
short_codes= input().lower()
print('\n')
if short_codes == 'gp':
print(blue('Password','underlined'))
password=input()
save_accounts(create_account(accountName,userName,password))
print('\n')
print(blue(f"{accountName}",["bold","underlined"]))
print(blue("Username is ")+ green(f'{userName}','bright'))
print(blue("Your password is ")+ green(f'{password}','bright'))
print('\n')
elif short_codes == 'rp':
password=generate_random_pswd()
save_accounts(create_account(accountName,userName,password))
print('\n')
print(blue(f"{accountName}",['bold','underlined']))
print(blue("Username is ")+ green(f'{userName}','bright'))
print(blue("Your password is ")+ green(f'{password}','bright'))
print('\n')
else:
print(blue("I didn't get that. Please choose the right code!"))
elif short_code == "dp":
if display_accounts():
print("\n")
print (blue('Here is a list of all passwords:','underlined'))
for accounts in display_accounts():
print(green(f"{accounts.acc_name}","bold")+"----"+ green(f"{accounts.username}","italic")+"----"+green(f"{accounts.acc_pswd}","bright"))
print('\n')
else:
print('\n')
print(blue("You don't seem to have any password saved"))
print('\n')
elif short_code == "fp":
print('\n')
print(blue("Enter the account name you want to search for","blink"))
search_account= input()
if check_existing_account(search_account):
search_account= find_account(search_account)
print('\n')
print(blue(f"{search_account.acc_name}","underlined"))
print(blue("Username is ")+ green(f"{search_account.username}","bright"))
print(blue("Password is ")+ green(f"{search_account.acc_pswd}","bright"))
print('\n')
else:
print(blue("The password for this account doesn't exist in your vault"))
print('\n')
elif short_code == "dlt":
print('\n')
print(blue('Please enter account name you wish to delete'))
print('\n')
dlt_account=input()
if find_account(dlt_account):
search_account= find_account(dlt_account)
print("-"*20)
search_account.delete_account()
print('\n')
print(blue("Your password for :")+green(f'{search_account.acc_name}','bold')+blue(" was successfully deleted!"))
print('\n')
else:
print(blue('This password does not exist in your Vault'))
elif short_code =='ex':
print('\n')
print(blue('Thank You for using The Vault','bold'))
print('\n')
print(blue('Bye .....','bold'))
break
else:
print(blue("I really didn't get that. Try again"))
print('\n')
else:
print(blue("Your password doesn't match"))
if __name__ == '__main__':
main()
| 31.779736 | 170 | 0.525229 |
79498093c1a9c50aba17b5b1e3238c9805ac3864
| 150 |
py
|
Python
|
my_pic_hub/apps.py
|
asandelarvine/My_Pic_Hub
|
3cf11c01485e5bee75b3b3937525cedc1d55d473
|
[
"MIT"
] | null | null | null |
my_pic_hub/apps.py
|
asandelarvine/My_Pic_Hub
|
3cf11c01485e5bee75b3b3937525cedc1d55d473
|
[
"MIT"
] | null | null | null |
my_pic_hub/apps.py
|
asandelarvine/My_Pic_Hub
|
3cf11c01485e5bee75b3b3937525cedc1d55d473
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class MyPicHubConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'my_pic_hub'
| 21.428571 | 56 | 0.766667 |
794980c0c42debd9d7d5258ed91132242ddd8be1
| 1,129 |
py
|
Python
|
ums/userapp/views/base.py
|
hookehu/web
|
637047ff47bf5df6ee3152e6976162bb8e85531c
|
[
"MIT"
] | null | null | null |
ums/userapp/views/base.py
|
hookehu/web
|
637047ff47bf5df6ee3152e6976162bb8e85531c
|
[
"MIT"
] | null | null | null |
ums/userapp/views/base.py
|
hookehu/web
|
637047ff47bf5df6ee3152e6976162bb8e85531c
|
[
"MIT"
] | null | null | null |
#-*- coding:utf-8 -*-
import functools
from functools import update_wrapper
from django.views.generic import View
from django.utils.decorators import method_decorator, classonlymethod
class BaseView(View):
need_site_permission = False
url = None
def __init__(self, request, *args, **kwargs):
View.__init__(self, **kwargs)
self.request = request
print request.method
self.request_method = request.method.lower()
self.init_request(*args, **kwargs)
def init_request(self, *args, **kwargs):
pass
def get(self, request, *args, **kwargs):
pass
def post(self, request, *args, **kwargs):
pass
@classonlymethod
def as_view(cls):
def view(request, *args, **kwargs):
self = cls(request, *args, **kwargs)
if hasattr(self, 'get') and not hasattr(self, 'head'):
self.head = self.get
print self.request_method
if self.request_method == 'get':
handler = self.get
else:
handler = self.post
return handler(request, *args, **kwargs)
# take name and docstring from class
update_wrapper(view, cls, updated=())
view.need_site_permission = cls.need_site_permission
return view
| 24.021277 | 69 | 0.708592 |
794981e5a40dea009311f7db51ee3fec5154019b
| 2,820 |
py
|
Python
|
docs/source/conf.py
|
extra2000/qeeqbox-honeypots-podman
|
aeeb4bd83b8479e611294cde2cc48af2fe226f32
|
[
"MIT"
] | null | null | null |
docs/source/conf.py
|
extra2000/qeeqbox-honeypots-podman
|
aeeb4bd83b8479e611294cde2cc48af2fe226f32
|
[
"MIT"
] | null | null | null |
docs/source/conf.py
|
extra2000/qeeqbox-honeypots-podman
|
aeeb4bd83b8479e611294cde2cc48af2fe226f32
|
[
"MIT"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import subprocess as sp
sys.path.insert(0, os.path.abspath(os.path.join('.')))
from version import version as __version__
# -- Project information -----------------------------------------------------
project = 'Qeeqbox Honeypots Podman'
copyright = '2021, extra2000'
author = 'nick@extra2000.io'
version = __version__
release = __version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.githubpages',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'sphinx.ext.autodoc',
'sphinxcontrib.programoutput',
'sphinx_rtd_dark_mode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
pygments_style = 'sphinx'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_logo = os.path.join('_static', 'logo.svg')
html_theme_options = {
'logo_only': False,
'display_version': True,
'prev_next_buttons_location': 'top',
'style_external_links': False,
'style_nav_header_background': 'linear-gradient(#608081, #536f70)',
# Toc options
'collapse_navigation': False,
'sticky_navigation': True,
'navigation_depth': 4,
'includehidden': True,
'titles_only': False
}
html_context = {
'display_github': True,
'github_user': 'extra2000',
'github_repo': 'qeeqbox-honeypots-podman',
'github_version': 'master/docs/source/',
}
| 30.652174 | 79 | 0.675887 |
794981ff7947e35ef0202c6c50e5cb34a5083580
| 8,840 |
py
|
Python
|
Walkoff/main.py
|
R3dFruitRollUp/WALKOFF-Apps
|
d92dec666c253c474116f041df163a8149e06b2b
|
[
"CC0-1.0"
] | null | null | null |
Walkoff/main.py
|
R3dFruitRollUp/WALKOFF-Apps
|
d92dec666c253c474116f041df163a8149e06b2b
|
[
"CC0-1.0"
] | null | null | null |
Walkoff/main.py
|
R3dFruitRollUp/WALKOFF-Apps
|
d92dec666c253c474116f041df163a8149e06b2b
|
[
"CC0-1.0"
] | 1 |
2018-12-01T19:37:43.000Z
|
2018-12-01T19:37:43.000Z
|
import logging
from apps import App, action
import requests
from requests.exceptions import Timeout
import json
from core.config.paths import certificate_path
import time
logger = logging.getLogger(__name__)
class Unauthorized(Exception):
pass
class UnknownResponse(Exception):
pass
class NotConnected(Exception):
pass
DEFAULT_TIMEOUT = 2
class Main(App):
def __init__(self, name=None, device=None):
App.__init__(self, name, device)
self.is_connected = False
self.headers = None
self.refresh_token = None
self.username = self.device_fields['username']
self.walkoff_address = self.device_fields['ip']
port = self.device_fields['port']
if port:
self.walkoff_address += ':{}'.format(port)
self.is_https = self.walkoff_address.startswith('https')
@action
def connect(self, timeout=DEFAULT_TIMEOUT):
try:
response = self._request('post', '/api/auth', timeout,
data=dict(username=self.username,
password=self.device.get_encrypted_field('password')))
except Timeout:
return 'Connection timed out', 'TimedOut'
status_code = response.status_code
if status_code == 404:
return 'Could not locate Walkoff instance', 'WalkoffNotFound'
elif status_code == 401:
return 'Invalid login', 'AuthenticationError'
elif status_code == 201:
response = response.json()
self.refresh_token = response['refresh_token']
self.reset_authorization(response['access_token'])
self.is_connected = True
return 'Success'
else:
return 'Unknown response {}'.format(status_code), 'UnknownResponse'
@action
def disconnect(self, timeout=DEFAULT_TIMEOUT):
if self.is_connected:
try:
self._request('post', '/api/auth/logout', timeout, headers=self.headers,
data=dict(refresh_token=self.refresh_token))
return 'Success'
except Timeout:
return 'Connection timed out', 'TimedOut'
else:
return 'Not connected to Walkoff', 'NotConnected'
@action
def is_connected(self):
return self.is_connected
@action
def get_all_workflows(self, timeout=DEFAULT_TIMEOUT):
return self.standard_request('get', '/api/playbooks', timeout, headers=self.headers)
@action
def get_workflow_uid(self, playbook_name, workflow_name, timeout=DEFAULT_TIMEOUT):
try:
response = self.request_with_refresh('get', '/api/playbooks', timeout, headers=self.headers)
except Timeout:
return 'Connection timed out', 'TimedOut'
except Unauthorized:
return 'Unauthorized credentials', 'Unauthorized'
except NotConnected:
return 'Not connected to Walkoff', 'NotConnected'
except UnknownResponse:
return 'Unknown error occurred', 'UnknownResponse'
else:
response = response.json()
playbook = next((playbook for playbook in response if playbook['name'] == playbook_name), None)
if playbook is None:
return 'Playbook not found', 'WorkflowNotFound'
workflow = next((workflow for workflow in playbook['workflows'] if workflow['name'] == workflow_name), None)
if workflow is None:
return 'Workflow not found', 'WorkflowNotFound'
else:
return workflow['uid']
@action
def trigger(self, names=None, inputs=None, data=None, tags=None, timeout=DEFAULT_TIMEOUT):
trigger_data = {}
if names:
trigger_data['names'] = names
if inputs:
trigger_data['inputs'] = inputs
if data:
trigger_data['data'] = data
if tags:
trigger_data['tags'] = tags
return self.standard_request('post', '/api/triggers/execute', timeout, headers=self.headers, data=data)
@action
def get_workflow_results(self, timeout=DEFAULT_TIMEOUT):
return self.standard_request('get', '/api/workflowresults', timeout, headers=self.headers)
@action
def wait_for_workflow_completion(self, execution_uid, timeout=60*5, request_timeout=DEFAULT_TIMEOUT, wait_between_requests=0.1):
if timeout < request_timeout:
return 'Function timeout must be greater than request timeout', 'InvalidInput'
elif timeout < wait_between_requests:
return 'Function timeout must be greater than wait between requests', 'InvalidInput'
start = time.time()
while time.time() - start < timeout:
try:
response = self.request_with_refresh('get', '/api/workflowresults/{}'.format(execution_uid), timeout, headers=self.headers)
if response.status_code == 200:
response = response.json()
if response['status'] == 'completed':
return response
time.sleep(wait_between_requests)
except Timeout:
return 'Connection timed out', 'TimedOut'
except Unauthorized:
return 'Unauthorized credentials', 'Unauthorized'
except NotConnected:
return 'Not connected to Walkoff', 'NotConnected'
except UnknownResponse:
return 'Unknown error occurred', 'UnknownResponse'
def standard_request(self, method, address, timeout, headers=None, data=None, **kwargs):
try:
response = self.request_with_refresh(method, address, timeout, headers=headers, data=data, **kwargs)
if response.status_code == 400:
return 'Bad Request', 'BadRequest'
return response.json()
except Timeout:
return 'Connection timed out', 'TimedOut'
except Unauthorized:
return 'Unauthorized credentials', 'Unauthorized'
except NotConnected:
return 'Not connected to Walkoff', 'NotConnected'
except UnknownResponse:
return 'Unknown error occurred', 'UnknownResponse'
def _format_request_args(self, address, timeout, headers=None, data=None, **kwargs):
address = '{0}{1}'.format(self.walkoff_address, address)
args = kwargs
args['timeout'] = timeout
if not (self.headers is None and headers is None):
args['headers'] = headers if headers is not None else self.headers
if data is not None:
args['json'] = data
if self.is_https:
args['verify'] = certificate_path
return address, args
def _request(self, method, address, timeout, headers=None, data=None, **kwargs):
address, args = self._format_request_args(address, timeout, headers, data, **kwargs)
if method == 'put':
return requests.put(address, **args)
elif method == 'post':
return requests.post(address, **args)
elif method == 'get':
return requests.get(address, **args)
elif method == 'delete':
return requests.delete(address, **args)
def request_with_refresh(self, method, address, timeout, headers=None, data=None, **kwargs):
if self.is_connected:
response = self._request(method, address, timeout, headers, data, **kwargs)
if response.status_code != 401:
return response
else:
self.refresh_token(timeout)
response = self._request(method, address, timeout, headers, data, **kwargs)
if response.status_code == 401:
self.is_connected = False
raise Unauthorized
else:
return response
else:
raise NotConnected
def refresh_token(self, timeout):
headers = {'Authorization': 'Bearer {}'.format(self.refresh_token)}
response = self._post('/api/auth/refresh', timeout, headers=headers)
if response.status_code == 401:
raise Unauthorized
elif response.status_code == 201:
key = response.json()
self.reset_authorization(key['access_token'])
else:
raise UnknownResponse
def reset_authorization(self, token):
self.headers = {'Authorization': 'Bearer {}'.format(token)}
def shutdown(self):
try:
self._request('post', '/api/auth/logout', DEFAULT_TIMEOUT, headers=self.headers,
data=dict(refresh_token=self.refresh_token))
except Timeout:
logger.warning('Could not log out. Connection timed out')
| 39.81982 | 139 | 0.609276 |
794982074f85fe37f8d04ff55d776b1456b4ac93
| 3,098 |
py
|
Python
|
indico/core/db/sqlalchemy/descriptions.py
|
uxmaster/indico
|
ecd19f17ef6fdc9f5584f59c87ec647319ce5d31
|
[
"MIT"
] | 1 |
2019-11-03T11:34:16.000Z
|
2019-11-03T11:34:16.000Z
|
indico/core/db/sqlalchemy/descriptions.py
|
NP-compete/indico
|
80db7ca0ef9d1f3240a16b9ff2d84bf0bf26c549
|
[
"MIT"
] | null | null | null |
indico/core/db/sqlalchemy/descriptions.py
|
NP-compete/indico
|
80db7ca0ef9d1f3240a16b9ff2d84bf0bf26c549
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from indico.core.db import db
from indico.core.db.sqlalchemy import PyIntEnum
from indico.util.string import MarkdownText, PlainText, RichMarkup
from indico.util.struct.enum import RichIntEnum
class RenderMode(RichIntEnum):
"""Rendering formats that a description can be written in."""
__titles__ = [None, 'HTML', 'Markdown', 'Plain Text']
html = 1
markdown = 2
plain_text = 3
RENDER_MODE_WRAPPER_MAP = {
RenderMode.html: RichMarkup,
RenderMode.markdown: MarkdownText,
RenderMode.plain_text: PlainText
}
class RenderModeMixin(object):
"""Mixin to add a plaintext/html/markdown-enabled column."""
possible_render_modes = {RenderMode.plain_text}
default_render_mode = RenderMode.plain_text
@declared_attr
def render_mode(cls):
# Only add the column if there's a choice
# between several alternatives
if len(cls.possible_render_modes) > 1:
return db.Column(
PyIntEnum(RenderMode),
default=cls.default_render_mode,
nullable=False
)
else:
return cls.default_render_mode
@classmethod
def _render_getter(cls, attr_name):
def _getter(self):
selected_mode = (self.default_render_mode
if len(self.possible_render_modes) == 1 or self.render_mode is None
else self.render_mode)
description_wrapper = RENDER_MODE_WRAPPER_MAP[selected_mode]
return description_wrapper(getattr(self, attr_name))
return _getter
@classmethod
def _render_setter(cls, attr_name):
def _setter(self, value):
setattr(self, attr_name, value)
return _setter
@classmethod
def _render_expression(cls, attr_name):
def _expression(cls):
return getattr(cls, attr_name)
return _expression
@classmethod
def create_hybrid_property(cls, attr_name):
"""Create a hybrid property that does the rendering of the column.
:param attr_name: a name for the attribute the unprocessed value can be
accessed through (e.g. `_description`).
"""
return hybrid_property(cls._render_getter(attr_name), fset=cls._render_setter(attr_name),
expr=cls._render_expression(attr_name))
class DescriptionMixin(RenderModeMixin):
marshmallow_aliases = {'_description': 'description'}
@declared_attr
def _description(cls):
return db.Column(
'description',
db.Text,
nullable=False,
default=''
)
description = RenderModeMixin.create_hybrid_property('_description')
| 30.98 | 97 | 0.662686 |
7949822f7a946e6b3c767729ff7593a77f67f2f0
| 2,328 |
py
|
Python
|
tests/acceptance/cors-test.py
|
gevious/flask_slither
|
bf1fd1e58224c19883f4b19c5f727f47ee9857da
|
[
"MIT"
] | 11 |
2015-05-26T10:31:02.000Z
|
2017-06-18T07:55:06.000Z
|
tests/acceptance/cors-test.py
|
gevious/flask_slither
|
bf1fd1e58224c19883f4b19c5f727f47ee9857da
|
[
"MIT"
] | null | null | null |
tests/acceptance/cors-test.py
|
gevious/flask_slither
|
bf1fd1e58224c19883f4b19c5f727f47ee9857da
|
[
"MIT"
] | 1 |
2017-11-28T01:22:19.000Z
|
2017-11-28T01:22:19.000Z
|
# -*- coding: utf-8 -*-
# The cors test ensures that the CORS functionality is working for resources.
from bson.objectid import ObjectId
from flask import Flask
from flask_slither import register_resource
from flask_slither.resources import BaseResource
from pymongo import MongoClient
import json
import unittest
class CorsResource(BaseResource):
db_collection = 'cors'
cors_enabled = True
class CorsTest(unittest.TestCase):
def setUp(self):
self.app = Flask('Cors')
self.app.config['TESTING'] = True
self.client = self.app.test_client()
register_resource(self.app, CorsResource, url="cors")
self.db_client = MongoClient('localhost', 27017)
self.db = self.db_client['test_slither']
self._load_fixtures()
def tearDown(self):
self.db['cors'].drop()
self.db_client.close()
self.client = None
self.app = None
def _load_fixtures(self):
fixtures = [
{'name': "Cors single record"},
]
for f in fixtures:
self.db['cors'].insert(f)
def test_basic_check(self):
"""Check header of get response to ensure it matches CORS spec"""
with self.app.test_client() as c:
r = c.open('/cors', method='OPTIONS')
self.assertEquals(r.status_code, 200)
expected_headers = {
# 'access-control-allow-origin': "http://localhost",
'access-control-allow-methods':
"GET, POST, PUT, PATCH, DELETE, OPTIONS",
'access-control-max-age': "21600",
}
for k, v in expected_headers.items():
self.assertEquals(
r.headers.get(k), v, "Bad header: {}".format(k))
self.assertFalse('access-control-allow-headers' in r.headers)
def test_check_rq_header(self):
"""Check header of get response to ensure it matches CORS spec"""
with self.app.test_client() as c:
headers = {'access-control-request-headers': "Authorization"}
r = c.open('/cors', method='OPTIONS',
headers=headers)
self.assertEquals(r.status_code, 200)
self.assertEquals(r.headers['access-control-allow-headers'],
'Authorization')
| 33.73913 | 77 | 0.598797 |
794982bba66fef9fdf365dad228cc37a67d2037e
| 15,572 |
py
|
Python
|
ironic/tests/unit/drivers/modules/redfish/test_utils.py
|
ljmcgann/ironic
|
09f79416e2820cf0fcef001c4c956b7732b7e7ca
|
[
"Apache-2.0"
] | null | null | null |
ironic/tests/unit/drivers/modules/redfish/test_utils.py
|
ljmcgann/ironic
|
09f79416e2820cf0fcef001c4c956b7732b7e7ca
|
[
"Apache-2.0"
] | null | null | null |
ironic/tests/unit/drivers/modules/redfish/test_utils.py
|
ljmcgann/ironic
|
09f79416e2820cf0fcef001c4c956b7732b7e7ca
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import os
from unittest import mock
from oslo_config import cfg
from oslo_utils import importutils
import requests
from ironic.common import exception
from ironic.drivers.modules.redfish import utils as redfish_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as obj_utils
sushy = importutils.try_import('sushy')
INFO_DICT = db_utils.get_test_redfish_info()
class RedfishUtilsTestCase(db_base.DbTestCase):
def setUp(self):
super(RedfishUtilsTestCase, self).setUp()
# Default configurations
self.config(enabled_hardware_types=['redfish'],
enabled_power_interfaces=['redfish'],
enabled_boot_interfaces=['redfish-virtual-media'],
enabled_management_interfaces=['redfish'])
# Redfish specific configurations
self.config(connection_attempts=1, group='redfish')
self.node = obj_utils.create_test_node(
self.context, driver='redfish', driver_info=INFO_DICT)
self.parsed_driver_info = {
'address': 'https://example.com',
'system_id': '/redfish/v1/Systems/FAKESYSTEM',
'username': 'username',
'password': 'password',
'verify_ca': True,
'auth_type': 'auto',
'node_uuid': self.node.uuid
}
def test_parse_driver_info(self):
response = redfish_utils.parse_driver_info(self.node)
self.assertEqual(self.parsed_driver_info, response)
def test_parse_driver_info_default_scheme(self):
self.node.driver_info['redfish_address'] = 'example.com'
response = redfish_utils.parse_driver_info(self.node)
self.assertEqual(self.parsed_driver_info, response)
def test_parse_driver_info_default_scheme_with_port(self):
self.node.driver_info['redfish_address'] = 'example.com:42'
self.parsed_driver_info['address'] = 'https://example.com:42'
response = redfish_utils.parse_driver_info(self.node)
self.assertEqual(self.parsed_driver_info, response)
def test_parse_driver_info_missing_info(self):
for prop in redfish_utils.REQUIRED_PROPERTIES:
self.node.driver_info = INFO_DICT.copy()
self.node.driver_info.pop(prop)
self.assertRaises(exception.MissingParameterValue,
redfish_utils.parse_driver_info, self.node)
def test_parse_driver_info_invalid_address(self):
for value in ['/banana!', 42]:
self.node.driver_info['redfish_address'] = value
self.assertRaisesRegex(exception.InvalidParameterValue,
'Invalid Redfish address',
redfish_utils.parse_driver_info, self.node)
@mock.patch.object(os.path, 'isdir', autospec=True)
def test_parse_driver_info_path_verify_ca(self,
mock_isdir):
mock_isdir.return_value = True
fake_path = '/path/to/a/valid/CA'
self.node.driver_info['redfish_verify_ca'] = fake_path
self.parsed_driver_info['verify_ca'] = fake_path
response = redfish_utils.parse_driver_info(self.node)
self.assertEqual(self.parsed_driver_info, response)
mock_isdir.assert_called_once_with(fake_path)
@mock.patch.object(os.path, 'isfile', autospec=True)
def test_parse_driver_info_valid_capath(self, mock_isfile):
mock_isfile.return_value = True
fake_path = '/path/to/a/valid/CA.pem'
self.node.driver_info['redfish_verify_ca'] = fake_path
self.parsed_driver_info['verify_ca'] = fake_path
response = redfish_utils.parse_driver_info(self.node)
self.assertEqual(self.parsed_driver_info, response)
mock_isfile.assert_called_once_with(fake_path)
def test_parse_driver_info_invalid_value_verify_ca(self):
# Integers are not supported
self.node.driver_info['redfish_verify_ca'] = 123456
self.assertRaisesRegex(exception.InvalidParameterValue,
'Invalid value type',
redfish_utils.parse_driver_info, self.node)
def test_parse_driver_info_invalid_system_id(self):
# Integers are not supported
self.node.driver_info['redfish_system_id'] = 123
self.assertRaisesRegex(exception.InvalidParameterValue,
'The value should be a path',
redfish_utils.parse_driver_info, self.node)
def test_parse_driver_info_missing_system_id(self):
self.node.driver_info.pop('redfish_system_id')
redfish_utils.parse_driver_info(self.node)
def test_parse_driver_info_valid_string_value_verify_ca(self):
for value in ('0', 'f', 'false', 'off', 'n', 'no'):
self.node.driver_info['redfish_verify_ca'] = value
response = redfish_utils.parse_driver_info(self.node)
parsed_driver_info = copy.deepcopy(self.parsed_driver_info)
parsed_driver_info['verify_ca'] = False
self.assertEqual(parsed_driver_info, response)
for value in ('1', 't', 'true', 'on', 'y', 'yes'):
self.node.driver_info['redfish_verify_ca'] = value
response = redfish_utils.parse_driver_info(self.node)
self.assertEqual(self.parsed_driver_info, response)
def test_parse_driver_info_invalid_string_value_verify_ca(self):
for value in ('xyz', '*', '!123', '123'):
self.node.driver_info['redfish_verify_ca'] = value
self.assertRaisesRegex(exception.InvalidParameterValue,
'The value should be a Boolean',
redfish_utils.parse_driver_info, self.node)
def test_parse_driver_info_valid_auth_type(self):
for value in 'basic', 'session', 'auto':
self.node.driver_info['redfish_auth_type'] = value
response = redfish_utils.parse_driver_info(self.node)
self.parsed_driver_info['auth_type'] = value
self.assertEqual(self.parsed_driver_info, response)
def test_parse_driver_info_invalid_auth_type(self):
for value in 'BasiC', 'SESSION', 'Auto':
self.node.driver_info['redfish_auth_type'] = value
self.assertRaisesRegex(exception.InvalidParameterValue,
'The value should be one of ',
redfish_utils.parse_driver_info, self.node)
def test_parse_driver_info_with_root_prefix(self):
test_redfish_address = 'https://example.com/test/redfish/v0/'
self.node.driver_info['redfish_address'] = test_redfish_address
self.parsed_driver_info['root_prefix'] = '/test/redfish/v0/'
response = redfish_utils.parse_driver_info(self.node)
self.assertEqual(self.parsed_driver_info, response)
@mock.patch.object(sushy, 'Sushy', autospec=True)
@mock.patch('ironic.drivers.modules.redfish.utils.'
'SessionCache._sessions', {})
def test_get_system(self, mock_sushy):
fake_conn = mock_sushy.return_value
fake_system = fake_conn.get_system.return_value
response = redfish_utils.get_system(self.node)
self.assertEqual(fake_system, response)
fake_conn.get_system.assert_called_once_with(
'/redfish/v1/Systems/FAKESYSTEM')
@mock.patch.object(sushy, 'Sushy', autospec=True)
@mock.patch('ironic.drivers.modules.redfish.utils.'
'SessionCache._sessions', {})
def test_get_system_resource_not_found(self, mock_sushy):
fake_conn = mock_sushy.return_value
fake_conn.get_system.side_effect = (
sushy.exceptions.ResourceNotFoundError('GET',
'/',
requests.Response()))
self.assertRaises(exception.RedfishError,
redfish_utils.get_system, self.node)
fake_conn.get_system.assert_called_once_with(
'/redfish/v1/Systems/FAKESYSTEM')
@mock.patch.object(sushy, 'Sushy', autospec=True)
@mock.patch('ironic.drivers.modules.redfish.utils.'
'SessionCache._sessions', {})
def test_get_system_multiple_systems(self, mock_sushy):
self.node.driver_info.pop('redfish_system_id')
fake_conn = mock_sushy.return_value
redfish_utils.get_system(self.node)
fake_conn.get_system.assert_called_once_with(None)
@mock.patch('time.sleep', autospec=True)
@mock.patch.object(sushy, 'Sushy', autospec=True)
@mock.patch('ironic.drivers.modules.redfish.utils.'
'SessionCache._sessions', {})
def test_get_system_resource_connection_error_retry(self, mock_sushy,
mock_sleep):
# Redfish specific configurations
self.config(connection_attempts=3, group='redfish')
fake_conn = mock_sushy.return_value
fake_conn.get_system.side_effect = sushy.exceptions.ConnectionError()
self.assertRaises(exception.RedfishConnectionError,
redfish_utils.get_system, self.node)
expected_get_system_calls = [
mock.call(self.parsed_driver_info['system_id']),
mock.call(self.parsed_driver_info['system_id']),
mock.call(self.parsed_driver_info['system_id']),
]
fake_conn.get_system.assert_has_calls(expected_get_system_calls)
mock_sleep.assert_called_with(
redfish_utils.CONF.redfish.connection_retry_interval)
@mock.patch.object(sushy, 'Sushy', autospec=True)
@mock.patch('ironic.drivers.modules.redfish.utils.'
'SessionCache._sessions', {})
def test_ensure_session_reuse(self, mock_sushy):
redfish_utils.get_system(self.node)
redfish_utils.get_system(self.node)
self.assertEqual(1, mock_sushy.call_count)
@mock.patch.object(sushy, 'Sushy', autospec=True)
def test_ensure_new_session_address(self, mock_sushy):
self.node.driver_info['redfish_address'] = 'http://bmc.foo'
redfish_utils.get_system(self.node)
self.node.driver_info['redfish_address'] = 'http://bmc.bar'
redfish_utils.get_system(self.node)
self.assertEqual(2, mock_sushy.call_count)
@mock.patch.object(sushy, 'Sushy', autospec=True)
def test_ensure_new_session_username(self, mock_sushy):
self.node.driver_info['redfish_username'] = 'foo'
redfish_utils.get_system(self.node)
self.node.driver_info['redfish_username'] = 'bar'
redfish_utils.get_system(self.node)
self.assertEqual(2, mock_sushy.call_count)
@mock.patch.object(sushy, 'Sushy', autospec=True)
@mock.patch('ironic.drivers.modules.redfish.utils.'
'SessionCache.AUTH_CLASSES', autospec=True)
@mock.patch('ironic.drivers.modules.redfish.utils.SessionCache._sessions',
collections.OrderedDict())
def test_ensure_basic_session_caching(self, mock_auth, mock_sushy):
self.node.driver_info['redfish_auth_type'] = 'basic'
mock_session_or_basic_auth = mock_auth['auto']
redfish_utils.get_system(self.node)
mock_sushy.assert_called_with(
mock.ANY, verify=mock.ANY,
auth=mock_session_or_basic_auth.return_value,
)
self.assertEqual(len(redfish_utils.SessionCache._sessions), 1)
@mock.patch.object(sushy, 'Sushy', autospec=True)
def test_expire_old_sessions(self, mock_sushy):
cfg.CONF.set_override('connection_cache_size', 10, 'redfish')
for num in range(20):
self.node.driver_info['redfish_username'] = 'foo-%d' % num
redfish_utils.get_system(self.node)
self.assertEqual(mock_sushy.call_count, 20)
self.assertEqual(len(redfish_utils.SessionCache._sessions), 10)
@mock.patch.object(sushy, 'Sushy', autospec=True)
@mock.patch('ironic.drivers.modules.redfish.utils.'
'SessionCache._sessions', {})
def test_disabled_sessions_cache(self, mock_sushy):
cfg.CONF.set_override('connection_cache_size', 0, 'redfish')
for num in range(2):
self.node.driver_info['redfish_username'] = 'foo-%d' % num
redfish_utils.get_system(self.node)
self.assertEqual(mock_sushy.call_count, 2)
self.assertEqual(len(redfish_utils.SessionCache._sessions), 0)
@mock.patch.object(sushy, 'Sushy', autospec=True)
@mock.patch('ironic.drivers.modules.redfish.utils.'
'SessionCache.AUTH_CLASSES', autospec=True)
@mock.patch('ironic.drivers.modules.redfish.utils.'
'SessionCache._sessions', {})
def test_auth_auto(self, mock_auth, mock_sushy):
redfish_utils.get_system(self.node)
mock_session_or_basic_auth = mock_auth['auto']
mock_session_or_basic_auth.assert_called_with(
username=self.parsed_driver_info['username'],
password=self.parsed_driver_info['password']
)
mock_sushy.assert_called_with(
self.parsed_driver_info['address'],
auth=mock_session_or_basic_auth.return_value,
verify=True)
@mock.patch.object(sushy, 'Sushy', autospec=True)
@mock.patch('ironic.drivers.modules.redfish.utils.'
'SessionCache.AUTH_CLASSES', autospec=True)
@mock.patch('ironic.drivers.modules.redfish.utils.'
'SessionCache._sessions', {})
def test_auth_session(self, mock_auth, mock_sushy):
self.node.driver_info['redfish_auth_type'] = 'session'
mock_session_auth = mock_auth['session']
redfish_utils.get_system(self.node)
mock_session_auth.assert_called_with(
username=self.parsed_driver_info['username'],
password=self.parsed_driver_info['password']
)
mock_sushy.assert_called_with(
mock.ANY, verify=mock.ANY,
auth=mock_session_auth.return_value
)
@mock.patch.object(sushy, 'Sushy', autospec=True)
@mock.patch('ironic.drivers.modules.redfish.utils.'
'SessionCache.AUTH_CLASSES', autospec=True)
@mock.patch('ironic.drivers.modules.redfish.utils.'
'SessionCache._sessions', {})
def test_auth_basic(self, mock_auth, mock_sushy):
self.node.driver_info['redfish_auth_type'] = 'basic'
mock_basic_auth = mock_auth['basic']
redfish_utils.get_system(self.node)
mock_basic_auth.assert_called_with(
username=self.parsed_driver_info['username'],
password=self.parsed_driver_info['password']
)
sushy.Sushy.assert_called_with(
mock.ANY, verify=mock.ANY,
auth=mock_basic_auth.return_value
)
| 45.532164 | 78 | 0.666517 |
79498385d6877af53ae31256dc8da03564d19172
| 5,347 |
py
|
Python
|
dev_tools/check.py
|
joshp112358/Cirq
|
c4fac27a9849e589ee05b4f702f2d7c9049aaeea
|
[
"Apache-2.0"
] | 2 |
2019-04-02T09:16:28.000Z
|
2019-05-25T18:35:19.000Z
|
dev_tools/check.py
|
joshp112358/Cirq
|
c4fac27a9849e589ee05b4f702f2d7c9049aaeea
|
[
"Apache-2.0"
] | 36 |
2019-04-03T23:03:51.000Z
|
2019-05-15T23:49:01.000Z
|
dev_tools/check.py
|
joshp112358/Cirq
|
c4fac27a9849e589ee05b4f702f2d7c9049aaeea
|
[
"Apache-2.0"
] | 2 |
2019-04-03T22:55:05.000Z
|
2019-04-24T23:24:53.000Z
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, Optional, cast, Set
import abc
import os.path
from dev_tools import env_tools, shell_tools
class CheckResult:
"""Output of a status check that passed, failed, or error'ed."""
def __init__(self,
check: 'Check',
success: bool,
message: str,
unexpected_error: Optional[Exception]) -> None:
self.check = check
self.success = success
self.message = message
self.unexpected_error = unexpected_error
def __str__(self):
outcome = ('ERROR' if self.unexpected_error
else 'pass' if self.success
else 'FAIL')
msg = self.unexpected_error if self.unexpected_error else self.message
result = '{}: {} ({})'.format(outcome, self.check.context(), msg)
return shell_tools.highlight(
result,
shell_tools.GREEN if self.success else shell_tools.RED)
class Check(metaclass=abc.ABCMeta):
"""A status check that can performed in a python environment."""
def __init__(self, *dependencies):
self.dependencies = dependencies
@abc.abstractmethod
def command_line_switch(self) -> str:
"""Used to identify this check from the command line."""
@abc.abstractmethod
def context(self) -> str:
"""The name of this status check, as shown on github."""
@abc.abstractmethod
def perform_check(self,
env: env_tools.PreparedEnv,
verbose: bool) -> Tuple[bool, str]:
"""Evaluates the status check and returns a pass/fail with message.
Args:
env: Describes a prepared python 3 environment in which to run.
verbose: When set, more progress output is produced.
Returns:
A tuple containing a pass/fail boolean and then a details message.
"""
def needs_python2_env(self):
return False
def run(self,
env: env_tools.PreparedEnv,
verbose: bool,
previous_failures: Set['Check']) -> CheckResult:
"""Evaluates this check.
Args:
env: The prepared python environment to run the check in.
verbose: When set, more progress output is produced.
previous_failures: Checks that have already run and failed.
Returns:
A CheckResult instance.
"""
# Skip if a dependency failed.
if previous_failures.intersection(self.dependencies):
print(shell_tools.highlight(
'Skipped ' + self.command_line_switch(),
shell_tools.YELLOW))
return CheckResult(
self, False, 'Skipped due to dependency failing.', None)
print(shell_tools.highlight(
'Running ' + self.command_line_switch(),
shell_tools.GREEN))
try:
success, message = self.perform_check(env, verbose=verbose)
result = CheckResult(self, success, message, None)
except Exception as ex:
result = CheckResult(self, False, 'Unexpected error.', ex)
print(shell_tools.highlight(
'Finished ' + self.command_line_switch(),
shell_tools.GREEN if result.success else shell_tools.RED))
if verbose:
print(result)
return result
def pick_env_and_run_and_report(self, env: env_tools.PreparedEnv,
verbose: bool,
previous_failures: Set['Check']
) -> CheckResult:
"""Evaluates this check in python 3 or 2.7, and reports to github.
If the prepared environments are not linked to a github repository,
with a known access token, reporting to github is skipped.
Args:
env: A prepared python 3 environment.
verbose: When set, more progress output is produced.
previous_failures: Checks that have already run and failed.
Returns:
A CheckResult instance.
"""
env.report_status_to_github('pending', 'Running...', self.context())
chosen_env = cast(env_tools.PreparedEnv, env)
os.chdir(cast(str, chosen_env.destination_directory))
result = self.run(chosen_env, verbose, previous_failures)
if result.unexpected_error is not None:
env.report_status_to_github('error',
'Unexpected error.',
self.context())
else:
env.report_status_to_github(
'success' if result.success else 'failure',
result.message,
self.context())
return result
| 35.410596 | 78 | 0.607069 |
794983d1b37b99882de3f91703d022c3549c37ba
| 3,300 |
py
|
Python
|
mark_scripts/topicsandcollocations/lx.py
|
syhw/contextual_word_segmentation
|
401ce2030db814783b9447534006d6b7387527e4
|
[
"MIT"
] | 2 |
2016-06-29T10:32:49.000Z
|
2020-11-17T20:18:43.000Z
|
scripts/lx.py
|
syhw/contextual_word_segmentation
|
401ce2030db814783b9447534006d6b7387527e4
|
[
"MIT"
] | null | null | null |
scripts/lx.py
|
syhw/contextual_word_segmentation
|
401ce2030db814783b9447534006d6b7387527e4
|
[
"MIT"
] | null | null | null |
"""lx.py -- Mark Johnson, 24th Febuary 2005
lx contains utility functions for the other programs
in this directory."""
import csv, os, os.path
def incr(d, k, inc=1):
"""incr adds inc to the value of d[k] if d[k] is defined,
or sets d[k] to inc if d[k] is undefined.
d is the dictionary being incremented.
k is the dictionary key whose value is incremented.
inc is the size of the increment (default 1)."""
if k in d:
d[k] += inc
else:
d[k] = inc
def incr2(d, k1, k2, inc=1):
"""incr2 adds inc to the value of d[k1][k2] if d[k1][k2] is defined,
or sets d[k1][k2] to inc if d[k1][k2] is undefined.
d is the dictionary of dictionaries being incremented.
k1, k2 are the dictionary keys whose value is incremented.
inc is the size of the increment (default 1)."""
if k1 in d:
dk1 = d[k1]
if k2 in dk1:
dk1[k2] += inc
else:
dk1[k2] = inc
else:
d[k1] = {k2:inc}
def incr3(d, k1, k2, k3, inc=1):
"""incr3 adds inc to the value of d[k1][k2][k3] if it is defined,
otherwise it sets d[k1][k2][k3] to inc.
d is the dictionary of dictionaries being incremented.
k1, k2, k3 are the dictionary keys whose value is incremented.
inc is the size of the increment (default 1). """
if k1 in d:
dk1 = d[k1]
if k2 in dk1:
dk1k2 = dk1[k2]
if k3 in dk1k2:
dk1k2[k3] += inc
else:
dk1k2[k3] = inc
else:
dk1[k2] = {k3:inc}
else:
d[k1] = {k2:{k3:inc}}
def second(xs):
"""second() returns the second element in a sequence.
This is mainly usefule as the value of the key argument
to sort and sorted."""
return xs[1]
def count_elements(xs, dct=None):
"""Given a sequence xs of elements, return a dictionary dct of
mapping elements to the number of times they appear in items. If
dct is not None, use dct as this dictionary."""
if dct==None:
dct = {}
for item in xs:
incr(dct, item)
return dct
# Finding all files that meet a condition
def findfiles(topdir, file_re):
"""Returns a list of filenames below dir whose names match filenameregex."""
filenames = []
for root, dirs, files in os.walk(topdir):
for file in files:
if file_re.match(file):
filenames.append(os.path.join(root, file))
return filenames
def writecsvfile(filename, data, header=None):
"""writecsvfile writes data to a file in a format that can be
easily imported to a spreadsheet. Specifically, it writes data to
csvfilename.csv, with header at the top. If Header != None, it
also checks that each tuple in data has same length as header.
CSV stands for Comma Separated Values, and CSV files are generally
readable by spreadsheet programs like Excel."""
outf = csv.writer(file(filename+".csv", "wb"))
if header != None:
outf.writerow(header)
for row in data:
outf.writerow(row)
if header != None and len(header) != len(row):
print "## Error in zipf:writecsv(): header = %s, row = %s" % (header,row)
| 33 | 86 | 0.591515 |
794985b2dbb77e4d7691753432c53ddf3ad31377
| 2,818 |
py
|
Python
|
tensorflow/python/ops/dequantize_op_test.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 848 |
2019-12-03T00:16:17.000Z
|
2022-03-31T22:53:17.000Z
|
tensorflow/python/ops/dequantize_op_test.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 656 |
2019-12-03T00:48:46.000Z
|
2022-03-31T18:41:54.000Z
|
tensorflow/python/ops/dequantize_op_test.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 506 |
2019-12-03T00:46:26.000Z
|
2022-03-30T10:34:56.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Dequantize Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class DequantizeOpTest(test.TestCase):
def __init__(self, method_name="runTest"):
super(DequantizeOpTest, self).__init__(method_name)
def _testDequantizeOp(self, inputs, min_range, max_range, dtype):
with self.cached_session():
input_op = constant_op.constant(inputs, shape=[len(inputs)], dtype=dtype)
dequantized = array_ops.dequantize(input_op, min_range, max_range)
tf_ans = self.evaluate(dequantized)
# TODO(vrv): Add support for DT_QINT32 quantization if needed.
type_dict = {
dtypes.quint8: np.uint8,
dtypes.qint8: np.int8,
dtypes.quint16: np.uint16,
dtypes.qint16: np.int16
}
self.assertTrue(dtype in type_dict.keys())
v_max = np.iinfo(type_dict[dtype]).max
v_min = np.iinfo(type_dict[dtype]).min
self.assertTrue(min_range >= v_min)
self.assertTrue(max_range <= v_max)
type_range = v_max - v_min
if v_min < 0:
half_range = (type_range + 1) / 2
else:
half_range = 0.0
np_ans = ((inputs.astype(np.float32) + half_range) *
(max_range - min_range) / type_range) + min_range
self.assertAllClose(tf_ans, np_ans, rtol=1e-5, atol=1e-5)
def testBasicQuint8(self):
self._testDequantizeOp(np.array([0, 128, 255]), 0.0, 6.0, dtypes.quint8)
self._testDequantizeOp(np.array([0, 128, 255]), 0.0, 123.456, dtypes.quint8)
self._testDequantizeOp(
np.array([0, 4, 42, 108, 243]), 5.0, 200.2, dtypes.quint8)
def testBasicQint8(self):
self._testDequantizeOp(np.array([-128, 0, 127]), -1.0, 2.0, dtypes.qint8)
self._testDequantizeOp(np.array([-2, 4, -17]), -5.0, -3.0, dtypes.qint8)
self._testDequantizeOp(np.array([0, -4, 42, -108]), 5.0, 40.0, dtypes.qint8)
if __name__ == "__main__":
test.main()
| 37.078947 | 80 | 0.688786 |
79498608b6eaa2acb8693d94bddd6b4c52d36e60
| 1,894 |
py
|
Python
|
redrugs/tests/functional/test_root.py
|
dlmcguinness/redrugs
|
d49857b1f291c0b3bdd0b25417ba42068ca73c62
|
[
"MIT"
] | 5 |
2015-01-23T15:28:59.000Z
|
2017-10-23T12:01:36.000Z
|
redrugs/tests/functional/test_root.py
|
dlmcguinness/redrugs
|
d49857b1f291c0b3bdd0b25417ba42068ca73c62
|
[
"MIT"
] | 14 |
2015-02-03T21:20:58.000Z
|
2015-10-13T02:32:18.000Z
|
redrugs/tests/functional/test_root.py
|
dlmcguinness/redrugs
|
d49857b1f291c0b3bdd0b25417ba42068ca73c62
|
[
"MIT"
] | 5 |
2015-01-28T17:55:44.000Z
|
2018-12-03T02:47:05.000Z
|
# -*- coding: utf-8 -*-
"""
Functional test suite for the root controller.
This is an example of how functional tests can be written for controllers.
As opposed to a unit-test, which test a small unit of functionality,
functional tests exercise the whole application and its WSGI stack.
Please read http://pythonpaste.org/webtest/ for more information.
"""
from nose.tools import assert_true
from redrugs.tests import TestController
class TestRootController(TestController):
"""Tests for the method in the root controller."""
def test_index(self):
"""The front page is working properly"""
response = self.app.get('/')
msg = 'TurboGears 2 is rapid web application development toolkit '\
'designed to make your life easier.'
# You can look for specific strings:
assert_true(msg in response)
# You can also access a BeautifulSoup'ed response in your tests
# (First run $ easy_install BeautifulSoup
# and then uncomment the next two lines)
#links = response.html.findAll('a')
#print links
#assert_true(links, "Mummy, there are no links here!")
def test_environ(self):
"""Displaying the wsgi environ works"""
response = self.app.get('/environ.html')
assert_true('The keys in the environment are: ' in response)
def test_data(self):
"""The data display demo works with HTML"""
response = self.app.get('/data.html?a=1&b=2')
expected1 = """<td>a</td>
<td>1</td>"""
expected2 = """<td>b</td>
<td>2</td>"""
assert expected1 in response, response
assert expected2 in response, response
def test_data_json(self):
"""The data display demo works with JSON"""
resp = self.app.get('/data.json?a=1&b=2')
assert '"a": "1", "b": "2"' in resp, resp
| 32.655172 | 75 | 0.635164 |
794986f1ffa50f50977fe6ad7b6480235b0936a2
| 1,036 |
py
|
Python
|
fairness/fairness_check.py
|
Tomcli/ffdl-knative
|
b68edaaa1717ac34c946e25d24198590012b0e20
|
[
"Apache-2.0"
] | 2 |
2019-01-18T16:10:50.000Z
|
2019-10-24T11:42:31.000Z
|
fairness/fairness_check.py
|
Tomcli/ffdl-knative
|
b68edaaa1717ac34c946e25d24198590012b0e20
|
[
"Apache-2.0"
] | null | null | null |
fairness/fairness_check.py
|
Tomcli/ffdl-knative
|
b68edaaa1717ac34c946e25d24198590012b0e20
|
[
"Apache-2.0"
] | null | null | null |
import json
import argparse
from app import fairness_check
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--s3_url', type=str, help='Object storage endpoint')
parser.add_argument('--bucket_name', type=str, help='Object storage bucket name')
parser.add_argument('--s3_username', type=str, help='Object storage access key id')
parser.add_argument('--s3_password', type=str, help='Object storage access key secret')
parser.add_argument('--model_id', type=str, help='Training model ID')
parser.add_argument('--metric_path', type=str, help='Path for robustness check output')
args = parser.parse_args()
s3_url = args.s3_url
bucket_name = args.bucket_name
s3_username = args.s3_username
s3_password = args.s3_password
metric_path = args.metric_path
model_id = args.model_id
metrics = fairness_check(s3_url, bucket_name, s3_username, s3_password, model_id)
with open(metric_path, "w") as report:
report.write(json.dumps(metrics))
| 38.37037 | 91 | 0.724903 |
7949878648cf58b251ae02447bc8c7b4ec031103
| 12,239 |
py
|
Python
|
apero/recipes/nirps_ha/cal_preprocess_nirps_ha.py
|
njcuk9999/apero-drs
|
83b043e9f277a011b03e0227c77307961b200901
|
[
"MIT"
] | 1 |
2021-03-09T17:49:31.000Z
|
2021-03-09T17:49:31.000Z
|
apero/recipes/nirps_ha/cal_preprocess_nirps_ha.py
|
njcuk9999/apero-drs
|
83b043e9f277a011b03e0227c77307961b200901
|
[
"MIT"
] | 43 |
2020-10-06T18:42:24.000Z
|
2022-03-28T21:23:10.000Z
|
apero/recipes/nirps_ha/cal_preprocess_nirps_ha.py
|
njcuk9999/apero-drs
|
83b043e9f277a011b03e0227c77307961b200901
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# CODE DESCRIPTION HERE
Created on 2019-03-05 16:38
@author: ncook
Version 0.0.1
"""
import numpy as np
import os
from apero import core
from apero import lang
from apero.core import constants
from apero.science import preprocessing as pp
from apero.io import drs_image
from apero.io import drs_fits
from apero.core.instruments.spirou import file_definitions
# =============================================================================
# Define variables
# =============================================================================
__NAME__ = 'cal_preprocess_nirps_ha.py'
__INSTRUMENT__ = 'NIRPS_HA'
# Get constants
Constants = constants.load(__INSTRUMENT__)
# Get version and author
__version__ = Constants['DRS_VERSION']
__author__ = Constants['AUTHORS']
__date__ = Constants['DRS_DATE']
__release__ = Constants['DRS_RELEASE']
# Get Logging function
WLOG = core.wlog
# Get the text types
TextEntry = lang.drs_text.TextEntry
# Raw prefix
RAW_PREFIX = file_definitions.raw_prefix
# =============================================================================
# Define functions
# =============================================================================
# All recipe code goes in _main
# Only change the following from here:
# 1) function calls (i.e. main(arg1, arg2, **kwargs)
# 2) fkwargs (i.e. fkwargs=dict(arg1=arg1, arg2=arg2, **kwargs)
# 3) config_main outputs value (i.e. None, pp, reduced)
# Everything else is controlled from recipe_definition
def main(directory=None, files=None, **kwargs):
"""
Main function for cal_preprocess_spirou.py
:param directory: string, the night name sub-directory
:param files: list of strings or string, the list of files to process
:param kwargs: any additional keywords
:type directory: str
:type files: list[str]
:keyword debug: int, debug level (0 for None)
:returns: dictionary of the local space
:rtype: dict
"""
# assign function calls (must add positional)
fkwargs = dict(directory=directory, files=files, **kwargs)
# ----------------------------------------------------------------------
# deal with command line inputs / function call inputs
recipe, params = core.setup(__NAME__, __INSTRUMENT__, fkwargs)
# solid debug mode option
if kwargs.get('DEBUG0000', False):
return recipe, params
# ----------------------------------------------------------------------
# run main bulk of code (catching all errors)
llmain, success = core.run(__main__, recipe, params)
# ----------------------------------------------------------------------
# End Message
# ----------------------------------------------------------------------
return core.end_main(params, llmain, recipe, success, outputs='None')
def __main__(recipe, params):
# ----------------------------------------------------------------------
# Main Code
# ----------------------------------------------------------------------
# Get hot pixels for corruption check
hotpixels = pp.get_hot_pixels(params)
# get skip parmaeter
skip = params['SKIP_DONE_PP']
# ----------------------------------------------------------------------
# Loop around input files
# ----------------------------------------------------------------------
# get files
infiles = params['INPUTS']['FILES'][1]
# Number of files
num_files = len(params['INPUTS']['FILES'][1])
# storage for output files
output_names = []
# loop around number of files
for it in range(num_files):
# ------------------------------------------------------------------
# add level to recipe log
log1 = recipe.log.add_level(params, 'num', it)
# ------------------------------------------------------------------
# print file iteration progress
core.file_processing_update(params, it, num_files)
# ge this iterations file
file_instance = infiles[it]
# ------------------------------------------------------------------
# Fix the nirps header
# ------------------------------------------------------------------
# certain keys may not be in some spirou files
file_instance = drs_fits.fix_header(params, recipe, file_instance)
# ------------------------------------------------------------------
# identification of file drs type
# ------------------------------------------------------------------
# identify this iterations file type
cond, infile = pp.drs_infile_id(params, recipe, file_instance)
# ------------------------------------------------------------------
# if it wasn't found skip this file, if it was print a message
if cond:
eargs = [infile.name]
WLOG(params, 'info', TextEntry('40-010-00001', args=eargs))
else:
eargs = [infile.filename]
WLOG(params, 'info', TextEntry('40-010-00002', args=eargs))
continue
# get data from file instance
image = np.array(infile.data)
# ------------------------------------------------------------------
# Get out file and check skip
# ------------------------------------------------------------------
# get the output drs file
oargs = [params, recipe, infile, recipe.outputs['PP_FILE'], RAW_PREFIX]
found, outfile = pp.drs_outfile_id(*oargs)
# construct out filename
outfile.construct_filename(params, infile=infile)
# if we didn't find the output file we should log this error
if not found:
eargs = [outfile.name]
WLOG(params, 'error', TextEntry('00-010-00003', args=eargs))
if skip:
if os.path.exists(outfile.filename):
wargs = [infile.filename]
WLOG(params, 'info', TextEntry('40-010-00012', args=wargs))
continue
# ----------------------------------------------------------------------
# Check for pixel shift and/or corrupted files
# ----------------------------------------------------------------------
# storage
snr_hotpix, rms_list = [], []
shiftdy, shiftdx = 0.0, 0.0
# do this iteratively as if there is a shift need to re-workout QC
for iteration in range(2):
# get pass condition
cout = pp.test_for_corrupt_files(params, image, hotpixels)
snr_hotpix, rms_list = cout[0], cout[1]
shiftdx, shiftdy = cout[2], cout[3]
# use dx/dy to shift the image back to where the engineering flat
# is located
if shiftdx != 0 or shiftdy != 0:
# log process
wmsg = TextEntry('40-010-00013', args=[shiftdx, shiftdy])
WLOG(params, '', wmsg)
# shift image
image = np.roll(image, [shiftdy], axis=0)
image = np.roll(image, [shiftdx], axis=1)
# work out QC here
qargs = [snr_hotpix, infile, rms_list]
qc_params, passed = pp.quality_control(params, *qargs, log=False)
# if passed break
if passed:
break
# ------------------------------------------------------------------
# Quality control to check for corrupt files
# ------------------------------------------------------------------
# re-calculate qc
qargs = [snr_hotpix, infile, rms_list]
qc_params, passed = pp.quality_control(params, *qargs, log=True)
# update recipe log
log1.add_qc(params, qc_params, passed)
if not passed:
# end log here
log1.end(params)
# go to next iteration
continue
# ------------------------------------------------------------------
# correct image
# ------------------------------------------------------------------
# correct for the top and bottom reference pixels
WLOG(params, '', TextEntry('40-010-00003'))
image = pp.correct_top_bottom(params, image)
# correct by a median filter from the dark amplifiers
WLOG(params, '', TextEntry('40-010-00016'))
image, pfile = pp.nirps_correction(params, image, header=infile.header)
# ------------------------------------------------------------------
# calculate mid observation time
# ------------------------------------------------------------------
mout = drs_fits.get_mid_obs_time(params, infile.header)
mid_obs_time, mid_obs_method = mout
# ------------------------------------------------------------------
# rotate image
# ------------------------------------------------------------------
# rotation to match HARPS orientation (expected by DRS)
image = drs_image.rotate_image(image, params['RAW_TO_PP_ROTATION'])
# ------------------------------------------------------------------
# Save rotated image
# ------------------------------------------------------------------
# define header keys for output file
# copy keys from input file
outfile.copy_original_keys(infile)
# add version
outfile.add_hkey('KW_PPVERSION', value=params['DRS_VERSION'])
# add dates
outfile.add_hkey('KW_DRS_DATE', value=params['DRS_DATE'])
outfile.add_hkey('KW_DRS_DATE_NOW', value=params['DATE_NOW'])
# add process id
outfile.add_hkey('KW_PID', value=params['PID'])
# add input filename
outfile.add_hkey_1d('KW_INFILE1', values=[infile.basename],
dim1name='infile')
# add qc parameters
outfile.add_qckeys(qc_params)
# add dprtype
outfile.add_hkey('KW_DPRTYPE', value=outfile.name)
# add the shift that was used to correct the image
outfile.add_hkey('KW_PPSHIFTX', value=shiftdx)
outfile.add_hkey('KW_PPSHIFTY', value=shiftdy)
outfile.add_hkey('KW_PPMSTR_FILE', value=os.path.basename(pfile))
# add mid observation time
outfile.add_hkey('KW_MID_OBS_TIME', value=mid_obs_time.mjd)
outfile.add_hkey('KW_MID_OBSTIME_METHOD', value=mid_obs_method)
# ------------------------------------------------------------------
# copy data
outfile.data = image
# ------------------------------------------------------------------
# log that we are saving rotated image
wargs = [outfile.filename]
WLOG(params, '', TextEntry('40-010-00009', args=wargs))
# ------------------------------------------------------------------
# writefits image to file
outfile.write_file()
# add to output files (for indexing)
recipe.add_output_file(outfile)
# index this file
core.end_main(params, None, recipe, success=True, outputs='pp',
end=False)
# ------------------------------------------------------------------
# append to output storage in p
# ------------------------------------------------------------------
output_names.append(outfile.filename)
# ------------------------------------------------------------------
# update recipe log file
# ------------------------------------------------------------------
log1.end(params)
# ----------------------------------------------------------------------
# End of main code
# ----------------------------------------------------------------------
return core.return_locals(params, dict(locals()))
# =============================================================================
# Start of code
# =============================================================================
if __name__ == "__main__":
# run main with no arguments (get from command line - sys.argv)
ll = main()
# =============================================================================
# End of code
# =============================================================================
| 42.644599 | 80 | 0.448158 |
7949881235fb13f4f1c15113ac4d8fe095de4222
| 3,521 |
py
|
Python
|
trisicell/tl/partition_function/_clt_sampler.py
|
faridrashidi/trisicell
|
4db89edd44c03ccb6c7d3477beff0079c3ff8035
|
[
"BSD-3-Clause"
] | 2 |
2021-07-02T13:53:15.000Z
|
2021-11-16T03:14:36.000Z
|
trisicell/tl/partition_function/_clt_sampler.py
|
faridrashidi/trisicell
|
4db89edd44c03ccb6c7d3477beff0079c3ff8035
|
[
"BSD-3-Clause"
] | 58 |
2021-06-14T17:14:39.000Z
|
2022-03-11T19:32:54.000Z
|
trisicell/tl/partition_function/_clt_sampler.py
|
faridrashidi/trisicell
|
4db89edd44c03ccb6c7d3477beff0079c3ff8035
|
[
"BSD-3-Clause"
] | null | null | null |
import copy
from decimal import Decimal
import numpy as np
import numpy.linalg as la
from scipy.special import softmax
from sklearn.metrics.pairwise import pairwise_distances
def draw_sample_clt(P, greedy, c=1, coef=2):
r"""
Draw sample clt.
:param P:
:param greedy:
:param c: gaussian kernel parameter
:param coef:
:return: edges, subtrees, prior_prob
prior_prob in the latex: Prob_{T\sim E}[T]
"""
edges, prior_prob = clt_sample_rec(P, greedy, c, coef=coef)
n_cells = P.shape[0]
n_nodes = 2 * n_cells - 1
edges_map = {a: (b, d) for a, b, d in edges}
subtrees = []
for i in range(n_nodes):
if i not in edges_map: # leaf
row = np.zeros(n_cells, dtype=np.int8)
row[i] = 1
else:
pair = edges_map[i]
row = subtrees[pair[0]] + subtrees[pair[1]] # logical_or
subtrees.append(row)
return edges, subtrees, prior_prob
def clt_sample_rec(
P,
greedy,
c,
names=None,
namecount=None,
coef=2,
prior_prob=None,
join_prob_matrix=None,
):
"""
Clt sample recursion.
O(n^2 m^2)
n: depth of the recursion two edges at each call
nm^2 : runtime of each call
can be improved to
:param P: probability matrix
:param greedy: sample or max
:param c: gaussian kernel parameter
:param names: for rec
:param namecount: for rec
:param coef: coef between dist and common mut
:param prior_prob: for rec
:return: edges, prior_prob
"""
# TODO make this faster by not recalculating
if prior_prob is None:
prior_prob = Decimal(1.0)
if P.shape[0] == 1:
return [], prior_prob
if names is None:
names = list(range(P.shape[0]))
namecount = P.shape[0]
if join_prob_matrix is None:
def join_neg_priority(a, b): # smaller values should be joined first
return la.norm(a - b) - row_leafness_score(a, b) * coef
dist = pairwise_distances(P, metric=join_neg_priority) # O(n m^2)
dist = dist.astype(np.float128)
np.fill_diagonal(dist, np.inf)
# This block adjusts c if dist/2c is too big for sotfmax.
c_rec = c
for _ in range(10):
sim = softmax(-dist / (2 * c_rec))
if not np.any(np.isnan(sim)):
break
c_rec *= 2.0
else:
# add new row to join_prob_matrix to get sim
pass
prob = sim
if greedy:
pair = np.unravel_index(np.argmax(prob), prob.shape)
else:
flat_probs = np.float64(prob.flat)
ind = np.random.choice(len(flat_probs), p=flat_probs)
pair = np.unravel_index(ind, prob.shape)
# conversion from numpy.float128 to Decimal is not supported
prior_prob = prior_prob * Decimal(np.float64(prob[pair]))
P_new = np.delete(P, pair, axis=0) # remove two rows
P_new = np.append(
P_new, np.minimum(P[pair[0]], P[pair[1]]).reshape(1, -1), axis=0
) # add one row that has only common ones
new_edge = [namecount, names[pair[0]], names[pair[1]]]
new_names = copy.copy(names)
del new_names[np.max(pair)]
del new_names[np.min(pair)]
new_names.append(namecount)
newnamecount = namecount + 1
edges, prior_prob = clt_sample_rec(
P_new, greedy, c, new_names, newnamecount, coef, prior_prob
)
edges.append(new_edge)
return edges, prior_prob
def row_leafness_score(row_a, row_b):
return np.sum(np.minimum(row_a, row_b))
| 28.168 | 77 | 0.618574 |
7949898777f7cb5036a036ca7691a820932c4476
| 455 |
py
|
Python
|
manoria_project/apps/manoria/utils.py
|
jtauber/team566
|
2884e6bbfd4f8ae0f5e0f09fcee645a6552f1d7a
|
[
"MIT"
] | 1 |
2019-06-13T16:18:47.000Z
|
2019-06-13T16:18:47.000Z
|
manoria_project/apps/manoria/utils.py
|
jtauber/team566
|
2884e6bbfd4f8ae0f5e0f09fcee645a6552f1d7a
|
[
"MIT"
] | null | null | null |
manoria_project/apps/manoria/utils.py
|
jtauber/team566
|
2884e6bbfd4f8ae0f5e0f09fcee645a6552f1d7a
|
[
"MIT"
] | null | null | null |
import random
def weighted_choices(weighted_population, k):
s = []
for choice, weight in weighted_population:
for i in range(weight):
s.append(choice)
results = []
while s:
r = random.choice(s)
results.append(r)
if len(results) == k:
break
s = [item for item in s if item != r]
d = dict(weighted_population)
return sorted(results, key=lambda x: d[x], reverse=True)
| 25.277778 | 60 | 0.582418 |
7949898f8298628db2fce6d26a61a665a90bc92d
| 569 |
py
|
Python
|
corpora/test.py
|
mobarski/sandbox
|
64ac79143750d5dcbd4d0f3abdab6efeb9bdf50c
|
[
"MIT"
] | null | null | null |
corpora/test.py
|
mobarski/sandbox
|
64ac79143750d5dcbd4d0f3abdab6efeb9bdf50c
|
[
"MIT"
] | null | null | null |
corpora/test.py
|
mobarski/sandbox
|
64ac79143750d5dcbd4d0f3abdab6efeb9bdf50c
|
[
"MIT"
] | null | null | null |
import sqlite3
db=sqlite3.connect('freq.sqlite')
sql = "select count(*) from freq where key=='SC1'"
sql = "select * from freq where key=='SC2' limit 90"
sql = "select sum(freq) from freq"
sql = "create table agg as select * from (select token,sum(freq) as freq group by token) order by freq desc, token asc"
#db.execute('drop table if exists agg')
sql = "create table agg as select token,sum(freq) as freq from freq group by token order by 2 desc, 1 asc"
sql = "select rowid,* from agg where token like 'd%' limit 10"
for x in db.execute(sql):
print(x)
db.commit()
| 35.5625 | 119 | 0.706503 |
79498a5898b17bec205cfd67c896efc2f998e8a3
| 2,236 |
py
|
Python
|
gui/constants.py
|
ErlendHer/AlgoView
|
946c2bb38e2ab3af011281c9672af4fcca84ae87
|
[
"Apache-2.0"
] | 1 |
2020-11-26T09:37:28.000Z
|
2020-11-26T09:37:28.000Z
|
gui/constants.py
|
ErlendHer/AlgoView
|
946c2bb38e2ab3af011281c9672af4fcca84ae87
|
[
"Apache-2.0"
] | null | null | null |
gui/constants.py
|
ErlendHer/AlgoView
|
946c2bb38e2ab3af011281c9672af4fcca84ae87
|
[
"Apache-2.0"
] | 1 |
2020-12-09T17:18:12.000Z
|
2020-12-09T17:18:12.000Z
|
import os
import sys
import yaml
from yaml.scanner import ScannerError
WIDTH = 0 # Height of our _maze in pixels
HEIGHT = 0 # Width of our _maze in pixels
SCREEN_WIDTH = 0 # Application width in pixels
SCREEN_HEIGHT = 0 # Application height in pixels
MAZE_LOC = (0, 0) # Location of the first pixel of the _maze
FONT = "Courier New"
running = True # Controls the game loop, terminates application when False
TICK = None # Number of updates performed per second
PADX = None # Global padding in the y direction
PADY = None # Global padding in the x direction
BOX_SIZE = None # Size of each individual box representing the _maze
BORDER_SIZE = None # Thickness of the application borders
default_config = {
"tick": 60,
"pad_x": 4,
"pad_y": 4,
"box_size": 20,
"border_size": 2
}
cfg_path = "config.yml"
def load_config():
"""
Loads the config.yml file. If the file does not exist, a default configuration file is created.
"""
# All constants are in the global scope
global TICK, PADX, PADY, BOX_SIZE, BORDER_SIZE
if not os.path.exists(cfg_path):
_create_config(cfg_path)
config = None
fail_safe = 0 # Implement fail_safe to prevent infinite loops
while not config:
if fail_safe > 4:
print("Fatal error reading config.yml, terminating application")
sys.exit(-1)
try:
fail_safe += 1
with open(cfg_path) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
except ScannerError:
_create_config(cfg_path)
continue
if not default_config.keys() == config.keys():
_create_config()
config = None
for key, value in config.items():
if key == "tick":
TICK = value
elif key == "pad_x":
PADX = value
elif key == "pad_y":
PADY = value
elif key == "box_size":
BOX_SIZE = value
elif key == "border_size":
BORDER_SIZE = value
def _create_config(path):
"""
Creates default config file.
"""
with open(path, "w+") as f:
yaml.dump(default_config, f)
| 26.939759 | 99 | 0.605098 |
79498ac5dee8c52121bb990e1b193bcffdda1e34
| 16,115 |
py
|
Python
|
omnizart/patch_cnn/app.py
|
nicolasanjoran/omnizart
|
b0e74af39b2e3a312ef32dbf0837626b2e043cb6
|
[
"MIT"
] | 1,145 |
2020-11-13T10:07:47.000Z
|
2022-03-29T17:35:36.000Z
|
omnizart/patch_cnn/app.py
|
nicolasanjoran/omnizart
|
b0e74af39b2e3a312ef32dbf0837626b2e043cb6
|
[
"MIT"
] | 44 |
2020-12-29T04:51:16.000Z
|
2022-03-15T06:52:04.000Z
|
omnizart/patch_cnn/app.py
|
nicolasanjoran/omnizart
|
b0e74af39b2e3a312ef32dbf0837626b2e043cb6
|
[
"MIT"
] | 61 |
2020-12-19T09:09:42.000Z
|
2022-03-23T01:26:22.000Z
|
import os
from os.path import join as jpath
from datetime import datetime
import h5py
import numpy as np
import tensorflow as tf
from mir_eval import sonify
from mir_eval.util import midi_to_hz
from scipy.io.wavfile import write as wavwrite
from omnizart.io import write_yaml, write_agg_f0_results
from omnizart.utils import get_logger, parallel_generator, get_filename, ensure_path_exists, aggregate_f0_info
from omnizart.base import BaseTranscription, BaseDatasetLoader
from omnizart.constants import datasets as d_struct
from omnizart.feature.cfp import extract_patch_cfp
from omnizart.setting_loaders import PatchCNNSettings
from omnizart.models.patch_cnn import patch_cnn_model
from omnizart.patch_cnn.inference import inference
from omnizart.train import get_train_val_feat_file_list
logger = get_logger("Patch CNN Transcription")
class PatchCNNTranscription(BaseTranscription):
"""Application class of PatchCNN module."""
def __init__(self, conf_path=None):
super().__init__(PatchCNNSettings, conf_path=conf_path)
def transcribe(self, input_audio, model_path=None, output="./"):
"""Transcribe frame-level fundamental frequency of vocal from the given audio.
Parameters
----------
input_audio: Path
Path to the wav audio file.
model_path: Path
Path to the trained model or the transcription mode. If given a path, should be
the folder that contains `arch.yaml`, `weights.h5`, and `configuration.yaml`.
output: Path (optional)
Path for writing out the extracted vocal f0. Default to current path.
Returns
-------
agg_f0: list[dict]
List of aggregated F0 information, with each entry containing the onset, offset,
and freqeuncy (Hz).
See Also
--------
omnizart.cli.patch_cnn.transcribe: The coressponding command line entry.
"""
if not os.path.isfile(input_audio):
raise FileNotFoundError(f"The given audio path does not exist. Path: {input_audio}")
logger.info("Loading model...")
model, model_settings = self._load_model(model_path)
logger.info("Extracting patch CFP feature...")
feat, mapping, zzz, cenf = extract_patch_cfp(
input_audio,
patch_size=model_settings.feature.patch_size,
threshold=model_settings.feature.peak_threshold,
down_fs=model_settings.feature.sampling_rate,
hop=model_settings.feature.hop_size,
win_size=model_settings.feature.window_size,
fr=model_settings.feature.frequency_resolution,
fc=model_settings.feature.frequency_center,
tc=model_settings.feature.time_center,
g=model_settings.feature.gamma,
bin_per_octave=model_settings.feature.bins_per_octave,
)
logger.info("Predicting...")
feat = np.expand_dims(feat, axis=-1)
pred = model.predict(feat)
logger.info("Inferring contour...")
contour = inference(
pred=pred,
mapping=mapping,
zzz=zzz,
cenf=cenf,
threshold=model_settings.inference.threshold,
max_method=model_settings.inference.max_method
)
agg_f0 = aggregate_f0_info(contour, t_unit=model_settings.feature.hop_size)
output = self._output_midi(output, input_audio, verbose=False)
if output is not None:
# Output contour information
write_agg_f0_results(agg_f0, output_path=f"{output}_f0.csv")
# Synthesize audio
timestamp = np.arange(len(contour)) * model_settings.feature.hop_size
wav = sonify.pitch_contour(
timestamp, contour, model_settings.feature.sampling_rate, amplitudes=0.5 * np.ones(len(contour))
)
wavwrite(f"{output}_trans.wav", model_settings.feature.sampling_rate, wav)
logger.info("Text and Wav files have been written to %s", os.path.abspath(os.path.dirname(output)))
return agg_f0
def generate_feature(self, dataset_path, patch_cnn_settings=None, num_threads=4):
"""Extract the feature from the given dataset.
To train the model, the first step is to pre-process the data into feature
representations. After downloading the dataset, use this function to generate
the feature by giving the path of the stored dataset.
To specify the output path, modify the attribute
``patch_cnn_settings.dataset.feature_save_path``.
It defaults to the folder of the stored dataset, and creates
two folders: ``train_feature`` and ``test_feature``.
Parameters
----------
dataset_path: Path
Path to the downloaded dataset.
patch_cnn_settings: PatchCNNSettings
The configuration instance that holds all relative settings for
the life-cycle of building a model.
num_threads:
Number of threads for parallel extraction of the feature.
See Also
--------
omnizart.constants.datasets:
The supported datasets and the corresponding training/testing splits.
"""
settings = self._validate_and_get_settings(patch_cnn_settings)
struct = d_struct.MIR1KStructure
## Below are examples of dealing with multiple supported datasets. # noqa: E266
# dataset_type = resolve_dataset_type(
# dataset_path,
# keywords={"maps": "maps", "musicnet": "musicnet", "maestro": "maestro", "rhythm": "pop", "pop": "pop"}
# )
# if dataset_type is None:
# logger.warning(
# "The given path %s does not match any built-in processable dataset. Do nothing...",
# dataset_path
# )
# return
# logger.info("Inferred dataset type: %s", dataset_type)
# # Build instance mapping
# struct = {
# "maps": d_struct.MapsStructure,
# "musicnet": d_struct.MusicNetStructure,
# "maestro": d_struct.MaestroStructure,
# "pop": d_struct.PopStructure
# }[dataset_type]
# label_extractor = {
# "maps": MapsLabelExtraction,
# "musicnet": MusicNetLabelExtraction,
# "maestro": MaestroLabelExtraction,
# "pop": PopLabelExtraction
# }[dataset_type]
# Fetching wav files
train_data_pair = struct.get_train_data_pair(dataset_path=dataset_path)
test_data_pair = struct.get_test_data_pair(dataset_path=dataset_path)
logger.info("Number of total training wavs: %d", len(train_data_pair))
logger.info("Number of total testing wavs: %d", len(test_data_pair))
# Resolve feature output path
train_feat_out_path, test_feat_out_path = self._resolve_feature_output_path(dataset_path, settings)
logger.info("Output training feature to %s", train_feat_out_path)
logger.info("Output testing feature to %s", test_feat_out_path)
# Feature extraction
logger.info(
"Start extracting the training feature. "
"This may take time to finish and affect the computer's performance"
)
_parallel_feature_extraction(
train_data_pair, out_path=train_feat_out_path, feat_settings=settings.feature, num_threads=num_threads
)
logger.info(
"Start extracting the testing feature. "
"This may take time to finish and affect the computer's performance"
)
_parallel_feature_extraction(
test_data_pair, out_path=test_feat_out_path, feat_settings=settings.feature, num_threads=num_threads
)
# Writing out the settings
write_yaml(settings.to_json(), jpath(train_feat_out_path, ".success.yaml"))
write_yaml(settings.to_json(), jpath(test_feat_out_path, ".success.yaml"))
logger.info("All done")
def train(self, feature_folder, model_name=None, input_model_path=None, patch_cnn_settings=None):
"""Model training.
Train the model from scratch or continue training given a model checkpoint.
Parameters
----------
feature_folder: Path
Path to the generated feature.
model_name: str
The name of the trained model. If not given, will default to the
current timestamp.
input_model_path: Path
Specify the path to the model checkpoint in order to fine-tune
the model.
patch_cnn_settings: VocalContourSettings
The configuration that holds all relative settings for
the life-cycle of model building.
"""
settings = self._validate_and_get_settings(patch_cnn_settings)
if input_model_path is not None:
logger.info("Continue to train on model: %s", input_model_path)
model, prev_set = self._load_model(input_model_path, custom_objects=self.custom_objects)
settings.feature.patch_size = prev_set.feature.patch_size
logger.info("Constructing dataset instance")
split = settings.training.steps / (settings.training.steps + settings.training.val_steps)
train_feat_files, val_feat_files = get_train_val_feat_file_list(feature_folder, split=split)
output_types = (tf.float32, tf.float32)
output_shapes = ((settings.feature.patch_size, settings.feature.patch_size, 1), (2))
train_dataset = PatchCNNDatasetLoader(
feature_files=train_feat_files,
num_samples=settings.training.epoch * settings.training.batch_size * settings.training.steps
) \
.get_dataset(settings.training.batch_size, output_types=output_types, output_shapes=output_shapes)
val_dataset = PatchCNNDatasetLoader(
feature_files=val_feat_files,
num_samples=settings.training.epoch * settings.training.val_batch_size * settings.training.val_steps
) \
.get_dataset(settings.training.val_batch_size, output_types=output_types, output_shapes=output_shapes)
if input_model_path is None:
logger.info("Constructing new model")
model = patch_cnn_model(patch_size=settings.feature.patch_size)
logger.info("Compiling model")
optimizer = tf.keras.optimizers.Adam(learning_rate=settings.training.init_learning_rate)
model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"])
logger.info("Resolving model output path")
if model_name is None:
model_name = str(datetime.now()).replace(" ", "_")
if not model_name.startswith(settings.model.save_prefix):
model_name = settings.model.save_prefix + "_" + model_name
model_save_path = jpath(settings.model.save_path, model_name)
ensure_path_exists(model_save_path)
write_yaml(settings.to_json(), jpath(model_save_path, "configurations.yaml"))
write_yaml(model.to_yaml(), jpath(model_save_path, "arch.yaml"), dump=False)
logger.info("Model output to: %s", model_save_path)
logger.info("Constrcuting callbacks")
callbacks = [
tf.keras.callbacks.EarlyStopping(patience=settings.training.early_stop),
tf.keras.callbacks.ModelCheckpoint(jpath(model_save_path, "weights.h5"), save_weights_only=True)
]
logger.info("Callback list: %s", callbacks)
logger.info("Start training")
history = model.fit(
train_dataset,
validation_data=val_dataset,
epochs=settings.training.epoch,
steps_per_epoch=settings.training.steps,
validation_steps=settings.training.val_steps,
callbacks=callbacks,
use_multiprocessing=True,
workers=8
)
return model_save_path, history
def extract_label(label_path, label_loader, mapping, cenf, t_unit):
"""Label extraction function of PatchCNN module.
Extracts the label representation required by PatchCNN module.
The output dimesions are: patch_length x 2. The second dimension indicates whether
there is an active vocal pitch or not of that patch.
Small probabilities are assigned to those patch with pitch slightly shifted
to augment the sparse label. The probabilities are computed according to the distance
of that pitch index to the ground-truth index: 1 / (dist + 1).
Parameters
----------
label_path: Path
Path to the ground-truth file.
label_loader:
Label loader that contains ``load_label`` function for parsing the ground-truth
file into list :class:`Label` representation.
mapping: 2D numpy array
The original frequency and time index of patches.
See ``omnizart.feature.cfp.extract_patch_cfp`` for more details.
cenf: list[float]
Center frequencies in Hz of each frequency index.
t_unit: float
Time unit of each frame in seconds.
Returns
-------
gt_roll: 2D numpy array
A sequence of binary classes, represents whether the patch contains the pitch
of vocal.
"""
labels = label_loader.load_label(label_path)
total_len = len(mapping)
cenf = np.array(cenf)
gt_roll = np.zeros((total_len, 2))
for label in labels:
start_tidx = int(round(label.start_time / t_unit))
end_tidx = int(round(label.end_time / t_unit))
frm_start = np.argmin(np.abs(mapping[:, 1] - start_tidx))
frm_end = total_len - np.argmin(np.abs(mapping[::-1, 1] - end_tidx))
cur_hz = midi_to_hz(label.note)
pitch_idx = np.argmin(np.abs(cenf - cur_hz))
for idx in range(frm_start, frm_end):
dist = abs(mapping[idx, 0] - pitch_idx)
prob = 1 / (1 + dist)
gt_roll[idx, 1] = prob
gt_roll[:, 0] = 1 - gt_roll[:, 1]
return gt_roll
def _all_in_one_extract(data_pair, **feat_params):
feat, mapping, zzz, cenf = extract_patch_cfp(data_pair[0], **feat_params)
label = extract_label(data_pair[1], d_struct.MIR1KStructure, mapping=mapping, cenf=cenf, t_unit=feat_params["hop"])
return feat, mapping, zzz, label
def _parallel_feature_extraction(data_pair_list, out_path, feat_settings, num_threads=4):
feat_params = {
"patch_size": feat_settings.patch_size,
"threshold": feat_settings.peak_threshold,
"down_fs": feat_settings.sampling_rate,
"hop": feat_settings.hop_size,
"win_size": feat_settings.window_size,
"fr": feat_settings.frequency_resolution,
"fc": feat_settings.frequency_center,
"tc": feat_settings.time_center,
"g": feat_settings.gamma,
"bin_per_octave": feat_settings.bins_per_octave,
}
iters = enumerate(
parallel_generator(
_all_in_one_extract,
data_pair_list,
max_workers=num_threads,
use_thread=True,
chunk_size=num_threads,
**feat_params
)
)
for idx, ((feat, mapping, zzz, label), audio_idx) in iters:
audio = data_pair_list[audio_idx][0]
# logger.info("Progress: %s/%s - %s", idx+1, len(data_pair_list), audio)
print(f"Progress: {idx + 1}/{len(data_pair_list)} - {audio}", end="\r")
filename = get_filename(audio)
out_hdf = jpath(out_path, filename + ".hdf")
with h5py.File(out_hdf, "w") as out_f:
out_f.create_dataset("feature", data=feat)
out_f.create_dataset("mapping", data=mapping)
out_f.create_dataset("Z", data=zzz)
out_f.create_dataset("label", data=label)
print("")
class PatchCNNDatasetLoader(BaseDatasetLoader):
"""Dataset loader for PatchCNN module."""
def _get_feature(self, hdf_name, slice_start):
feat = self.hdf_refs[hdf_name][self.feat_col_name][slice_start:slice_start + self.slice_hop].squeeze()
return np.expand_dims(feat, axis=-1)
| 42.407895 | 119 | 0.662985 |
79498ba74276fc1ebed3f588a5106bfd97b96987
| 4,835 |
py
|
Python
|
setup.py
|
yuttie/pylti
|
18a608282e0d5bc941beb2eaaeea3b7ad484b399
|
[
"BSD-2-Clause"
] | 55 |
2015-03-04T02:08:51.000Z
|
2022-01-21T20:57:38.000Z
|
setup.py
|
yuttie/pylti
|
18a608282e0d5bc941beb2eaaeea3b7ad484b399
|
[
"BSD-2-Clause"
] | 69 |
2015-01-07T18:38:37.000Z
|
2021-08-31T23:35:48.000Z
|
setup.py
|
yuttie/pylti
|
18a608282e0d5bc941beb2eaaeea3b7ad484b399
|
[
"BSD-2-Clause"
] | 44 |
2015-03-28T01:18:15.000Z
|
2021-12-28T16:45:09.000Z
|
#!/usr/bin/env python
# Copyright 2009-2014 MIT ODL Engineering
#
# This file is part of PyLTI.
#
from __future__ import print_function
import os
import sys
if sys.version_info < (2, 7):
error = "ERROR: PyLTI requires Python 2.7+ ... exiting."
print(error, file=sys.stderr)
sys.exit(1)
try:
from setuptools import setup, find_packages
from setuptools.command.test import test as testcommand
class PyTest(testcommand):
user_options = testcommand.user_options[:]
user_options += [
('coverage', 'C', 'Produce a coverage report for PyLTI'),
('pep8', 'P', 'Produce a pep8 report for PyLTI'),
('flakes', 'F', 'Produce a flakes report for PyLTI'),
]
coverage = None
pep8 = None
flakes = None
test_suite = False
test_args = []
def initialize_options(self):
testcommand.initialize_options(self)
def finalize_options(self):
testcommand.finalize_options(self)
self.test_suite = True
self.test_args = []
if self.coverage:
self.test_args.append('--cov')
self.test_args.append('pylti')
if self.pep8:
self.test_args.append('--pep8')
if self.flakes:
self.test_args.append('--flakes')
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
extra = dict(test_suite="pylti.tests",
tests_require=["pytest-cov>=2.3.0", "pytest-pep8>=1.0.6",
"pytest-flakes>=1.0.1", "pytest>=2.9.2",
"httpretty>=0.8.3", "flask>=0.10.1",
"oauthlib>=0.6.3", "semantic_version>=2.3.1",
"mock==1.0.1"],
cmdclass={"test": PyTest},
install_requires=["oauth2>=1.9.0.post1", "httplib2>=0.9", "six>=1.10.0"],
include_package_data=True,
zip_safe=False)
except ImportError as err:
import string
from distutils.core import setup
def convert_path(pathname):
"""
Local copy of setuptools.convert_path used by find_packages (only used
with distutils which is missing the find_packages feature)
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = string.split(pathname, '/')
while '.' in paths:
paths.remove('.')
if not paths:
return os.curdir
return os.path.join(*paths)
def find_packages(where='.', exclude=()):
"""
Local copy of setuptools.find_packages (only used with distutils which
is missing the find_packages feature)
"""
out = []
stack = [(convert_path(where), '')]
while stack:
where, prefix = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
isdir = os.path.isdir(fn)
has_init = os.path.isfile(os.path.join(fn, '__init__.py'))
if '.' not in name and isdir and has_init:
out.append(prefix + name)
stack.append((fn, prefix + name + '.'))
for pat in list(exclude) + ['ez_setup', 'distribute_setup']:
from fnmatch import fnmatchcase
out = [item for item in out if not fnmatchcase(item, pat)]
return out
print("Non-Fatal Error:", err, "\n")
print("Setup encountered an error while importing setuptools (see above).")
print("Proceeding anyway with manual replacements for setuptools.find_packages.")
print("Try installing setuptools if you continue to have problems.\n\n")
extra = dict()
VERSION = __import__('pylti').__version__
README = open('README.rst').read()
setup(
name='PyLTI',
version=VERSION,
packages=find_packages(),
package_data={'pylti.templates': ['web/*.*', 'web/css/*', 'web/js/*']},
license='BSD',
author='MIT ODL Engineering',
author_email='odl-engineering@mit.edu',
url="http://github.com/mitodl/pylti",
description="PyLTI provides Python Implementation of IMS"
" LTI interface that works with edX",
long_description=README,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Education',
],
**extra
)
| 34.049296 | 90 | 0.562358 |
79498c9430f1e2aa1df0702c9f5ca1ae38c6a19e
| 241 |
py
|
Python
|
setup.py
|
mayabenowitz/neuralcraft
|
00622818e6a078d906399d2be6d641acf040186c
|
[
"MIT"
] | 1 |
2020-09-28T21:24:25.000Z
|
2020-09-28T21:24:25.000Z
|
setup.py
|
mayabenowitz/neuralcraft
|
00622818e6a078d906399d2be6d641acf040186c
|
[
"MIT"
] | null | null | null |
setup.py
|
mayabenowitz/neuralcraft
|
00622818e6a078d906399d2be6d641acf040186c
|
[
"MIT"
] | null | null | null |
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='A Free Massively Multiplayer Online Learning Platform',
author='maya benowitz',
license='MIT',
)
| 21.909091 | 72 | 0.692946 |
79498c94f4977fcdf537034c99c52026d5146aae
| 151 |
py
|
Python
|
src/index.py
|
oprogramador/facebook-graph-analyser
|
59c80378d95da10a6ec912b5f0fc1cb9500b03a1
|
[
"MIT"
] | null | null | null |
src/index.py
|
oprogramador/facebook-graph-analyser
|
59c80378d95da10a6ec912b5f0fc1cb9500b03a1
|
[
"MIT"
] | null | null | null |
src/index.py
|
oprogramador/facebook-graph-analyser
|
59c80378d95da10a6ec912b5f0fc1cb9500b03a1
|
[
"MIT"
] | null | null | null |
import GraphAnalyser
import FacebookConnector
import sys
print GraphAnalyser.findShortestPath(FacebookConnector.getFriends, sys.argv[1], sys.argv[2])
| 25.166667 | 92 | 0.847682 |
79498e62d8821d2dcf0b7b4ec4ce4f28d37cefcf
| 8,429 |
py
|
Python
|
ppci/utils/codepage.py
|
dcoles/ppci-mirror
|
51181db69d933adbb3c5b5ada2bd427d140b1385
|
[
"BSD-2-Clause"
] | null | null | null |
ppci/utils/codepage.py
|
dcoles/ppci-mirror
|
51181db69d933adbb3c5b5ada2bd427d140b1385
|
[
"BSD-2-Clause"
] | null | null | null |
ppci/utils/codepage.py
|
dcoles/ppci-mirror
|
51181db69d933adbb3c5b5ada2bd427d140b1385
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Cool idea to load actual object code into memory and execute it from python
using ctypes
Credits for idea: Luke Campagnola
"""
import inspect
import sys
import mmap
import struct
import logging
import ctypes
from ..arch import get_current_arch
from .. import ir
from ..binutils import debuginfo, layout
from ..binutils.linker import link
def get_ctypes_type(debug_type):
mapping = {
"int": ctypes.c_int,
"char": ctypes.c_int, # TODO: how to handle this?
"long": ctypes.c_long,
"void": ctypes.c_int, # TODO: what to do?
"double": ctypes.c_double,
"float": ctypes.c_float,
"bool": ctypes.c_int,
"byte": ctypes.c_int,
}
if isinstance(debug_type, debuginfo.DebugBaseType):
return mapping[debug_type.name]
elif isinstance(debug_type, debuginfo.DebugPointerType):
if isinstance(debug_type.pointed_type, debuginfo.DebugStructType):
# TODO: treat struct pointers as void pointers for now.
# TODO: fix this?
return ctypes.c_voidp
else:
return ctypes.POINTER(get_ctypes_type(debug_type.pointed_type))
elif debug_type is None:
return
elif isinstance(debug_type, type):
mapping = {int: ctypes.c_int, float: ctypes.c_double}
return mapping[debug_type]
elif isinstance(debug_type, ir.BasicTyp):
mapping = {
ir.f32: ctypes.c_float,
ir.f64: ctypes.c_double,
ir.i32: ctypes.c_int32,
ir.i64: ctypes.c_int64, # TODO: which one of 32 and 64 is int?
}
return mapping[debug_type]
else: # pragma: no cover
raise NotImplementedError(str(debug_type) + str(type(debug_type)))
uintt = ctypes.c_uint64 if struct.calcsize("P") == 8 else ctypes.c_uint32
class WinPage:
""" Nice windows hack to emulate mmap.
Copied from:
https://github.com/campagnola/pycca/blob/master/pycca/asm/codepage.py
"""
def __init__(self, size):
kern = ctypes.windll.kernel32
valloc = kern.VirtualAlloc
valloc.argtypes = (uintt,) * 4
valloc.restype = uintt
self.addr = valloc(0, size, 0x1000 | 0x2000, 0x40)
self.ptr = 0
self.size = size
self.mem = (ctypes.c_char * size).from_address(self.addr)
def write(self, data):
self.mem[self.ptr : self.ptr + len(data)] = data
self.ptr += len(data)
def seek(self, pos):
self.ptr = pos
def read(self):
return bytes(self.mem[self.ptr : self.size])
def __len__(self):
return self.size
def __del__(self):
kern = ctypes.windll.kernel32
vfree = kern.VirtualFree
vfree.argtypes = (uintt,) * 3
vfree(self.addr, self.size, 0x8000)
logger = logging.getLogger("codepage")
class MemoryPage:
""" Allocate a memory slab in the current process. """
def __init__(self, size):
self.size = size
if size > 0:
if sys.platform == "win32":
self._page = WinPage(size)
self.addr = self._page.addr
else:
self._page = mmap.mmap(-1, size, prot=1 | 2 | 4)
buf = (ctypes.c_char * size).from_buffer(self._page)
self.addr = ctypes.addressof(buf)
logger.debug("Allocated %s bytes at 0x%x", size, self.addr)
else:
self._page = None
self.addr = 0
def write(self, data):
""" Fill page with the given data """
if data:
assert self._page
self._page.write(data)
def seek(self, pos):
if self._page:
self._page.seek(pos)
def read(self, size=None):
if self._page:
return self._page.read(size)
else:
return bytes()
class Mod:
""" Container for machine code """
def __init__(self, obj, imports=None):
size = obj.byte_size
if not obj.debug_info:
raise ValueError(
'Unable to load "{}"'
" because it does not contain debug info.".format(obj)
)
# Create a code page into memory:
self._code_page = MemoryPage(size)
self._data_page = MemoryPage(size)
# Create callback pointers if any:
imports = imports or {}
self._import_symbols = []
extra_symbols = {}
for name, function in imports.items():
signature = inspect.signature(function)
if signature.return_annotation is inspect._empty:
raise ValueError(
'"{}" requires return type annotations'.format(name)
)
return_type = signature.return_annotation
argument_types = [
p.annotation for p in signature.parameters.values()
]
restype = get_ctypes_type(return_type)
argtypes = [get_ctypes_type(a) for a in argument_types]
ftype = ctypes.CFUNCTYPE(restype, *argtypes)
callback = ftype(function)
logger.debug("Import name %s", name)
self._import_symbols.append((name, callback, ftype))
extra_symbols[name] = ctypes.cast(callback, ctypes.c_void_p).value
# Link to e.g. apply offset to global literals
layout2 = layout.Layout()
layout_code_mem = layout.Memory("codepage")
layout_code_mem.location = self._code_page.addr
layout_code_mem.size = size
layout_code_mem.add_input(layout.Section("code"))
layout2.add_memory(layout_code_mem)
layout_data_mem = layout.Memory("datapage")
layout_data_mem.location = self._data_page.addr
layout_data_mem.size = size
layout_data_mem.add_input(layout.Section("data"))
layout2.add_memory(layout_data_mem)
# Link the object into memory:
obj = link(
[obj], layout=layout2, debug=True, extra_symbols=extra_symbols
)
assert obj.byte_size == size
# Load the code into the page:
code = bytes(obj.get_section("code").data)
self._code_page.write(code)
data = bytes(obj.get_section("data").data)
self._data_page.write(data)
# TODO: we might have more sections!
# Get a function pointer
for function in obj.debug_info.functions:
function_name = function.name
# Determine the function type:
restype = get_ctypes_type(function.return_type)
argtypes = [get_ctypes_type(a.typ) for a in function.arguments]
logger.debug("function sig %s %s", restype, argtypes)
ftype = ctypes.CFUNCTYPE(restype, *argtypes)
# Create a function pointer:
vaddress = obj.get_symbol_id_value(function.begin.symbol_id)
fpointer = ftype(vaddress)
# Set the attribute:
setattr(self, function_name, fpointer)
# Get a variable pointers
for variable in obj.debug_info.variables:
variable_name = variable.name
assert isinstance(variable, debuginfo.DebugVariable)
assert isinstance(variable.address, debuginfo.DebugAddress)
vaddress = obj.get_symbol_id_value(variable.address.symbol_id)
var_ctyp = ctypes.POINTER(get_ctypes_type(variable.typ))
vpointer = ctypes.cast(vaddress, var_ctyp)
# Set the attribute:
setattr(self, variable_name, vpointer)
# Store object for later usage:
self._obj = obj
def get_symbol_offset(self, name):
""" Get the memory address of a symbol """
return self._obj.get_symbol(name).value
def load_code_as_module(source_file, reporter=None):
""" Load c3 code as a module """
from ..api import c3c
# Compile a simple function
march = get_current_arch()
if march is None:
raise NotImplementedError(sys.platform)
obj = c3c(
[source_file], [], march, debug=True, opt_level=2, reporter=reporter
)
# Convert obj to executable module
m = Mod(obj)
return m
def load_obj(obj, imports=None):
""" Load an object into memory.
Args:
obj: the code object to load.
imports: A dictionary of functions to attach.
Optionally a dictionary of functions that must be imported can
be provided.
"""
return Mod(obj, imports=imports)
| 31.569288 | 78 | 0.609206 |
79498f88d2c3e79ed25b04394bb3ec7d080f237a
| 3,212 |
py
|
Python
|
automox_console_sdk/models/api_keys_id_body.py
|
AutomoxCommunity/automox-console-sdk-python
|
9e921b138d63f90750e071d0a40e1d7edfa06733
|
[
"MIT"
] | 1 |
2021-10-05T22:09:10.000Z
|
2021-10-05T22:09:10.000Z
|
automox_console_sdk/models/api_keys_id_body.py
|
AutomoxCommunity/automox-console-sdk-python
|
9e921b138d63f90750e071d0a40e1d7edfa06733
|
[
"MIT"
] | 1 |
2021-09-16T06:00:51.000Z
|
2021-09-16T06:00:51.000Z
|
automox_console_sdk/models/api_keys_id_body.py
|
AutomoxCommunity/automox-console-sdk-python
|
9e921b138d63f90750e071d0a40e1d7edfa06733
|
[
"MIT"
] | 4 |
2021-09-16T02:35:32.000Z
|
2022-02-16T01:09:57.000Z
|
# coding: utf-8
"""
Automox Console API
API for use with the Automox Console # noqa: E501
OpenAPI spec version: 2021-11-16
Contact: support@automox.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ApiKeysIdBody(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'is_enabled': 'bool'
}
attribute_map = {
'is_enabled': 'is_enabled'
}
def __init__(self, is_enabled=None): # noqa: E501
"""ApiKeysIdBody - a model defined in Swagger""" # noqa: E501
self._is_enabled = None
self.discriminator = None
self.is_enabled = is_enabled
@property
def is_enabled(self):
"""Gets the is_enabled of this ApiKeysIdBody. # noqa: E501
:return: The is_enabled of this ApiKeysIdBody. # noqa: E501
:rtype: bool
"""
return self._is_enabled
@is_enabled.setter
def is_enabled(self, is_enabled):
"""Sets the is_enabled of this ApiKeysIdBody.
:param is_enabled: The is_enabled of this ApiKeysIdBody. # noqa: E501
:type: bool
"""
if is_enabled is None:
raise ValueError("Invalid value for `is_enabled`, must not be `None`") # noqa: E501
self._is_enabled = is_enabled
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ApiKeysIdBody, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApiKeysIdBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.678571 | 96 | 0.565691 |
794990d322bf21f2201943c7b77f810cd74a33a2
| 941 |
py
|
Python
|
angr/procedures/libc/strcmp.py
|
mariusmue/angr
|
f8304c4b1f0097a721a6692b02a45cabaae137c5
|
[
"BSD-2-Clause"
] | 2 |
2018-05-02T17:41:36.000Z
|
2020-05-18T02:49:16.000Z
|
angr/procedures/libc/strcmp.py
|
mariusmue/angr
|
f8304c4b1f0097a721a6692b02a45cabaae137c5
|
[
"BSD-2-Clause"
] | null | null | null |
angr/procedures/libc/strcmp.py
|
mariusmue/angr
|
f8304c4b1f0097a721a6692b02a45cabaae137c5
|
[
"BSD-2-Clause"
] | 1 |
2022-02-10T02:29:38.000Z
|
2022-02-10T02:29:38.000Z
|
import angr
from angr.sim_type import SimTypeString, SimTypeInt
import logging
l = logging.getLogger("angr.procedures.libc.strcmp")
class strcmp(angr.SimProcedure):
#pylint:disable=arguments-differ
def run(self, a_addr, b_addr, wchar=False, ignore_case=False):
self.argument_types = {0: self.ty_ptr(SimTypeString()),
1: self.ty_ptr(SimTypeString())}
self.return_type = SimTypeInt(32, True)
strlen = angr.SIM_PROCEDURES['libc']['strlen']
a_strlen = self.inline_call(strlen, a_addr, wchar=wchar)
b_strlen = self.inline_call(strlen, b_addr, wchar=wchar)
maxlen = self.state.se.BVV(max(a_strlen.max_null_index, b_strlen.max_null_index), self.state.arch.bits)
strncmp = self.inline_call(angr.SIM_PROCEDURES['libc']['strncmp'], a_addr, b_addr, maxlen, a_len=a_strlen, b_len=b_strlen, wchar=wchar, ignore_case=ignore_case)
return strncmp.ret_expr
| 40.913043 | 168 | 0.706695 |
794991186df01f8dcad2311738a088ba3bd27824
| 4,899 |
py
|
Python
|
kitchen/kitchen2/tests/test_text_utf8.py
|
crylearner/RobotFrameworkAppiumIntegration
|
895b96025dbc04c152b820faf62ba8525dd7af78
|
[
"Apache-2.0"
] | 2 |
2017-08-10T16:14:15.000Z
|
2021-11-28T10:47:46.000Z
|
kitchen/kitchen2/tests/test_text_utf8.py
|
crylearner/RobotFrameworkAppiumIntegration
|
895b96025dbc04c152b820faf62ba8525dd7af78
|
[
"Apache-2.0"
] | null | null | null |
kitchen/kitchen2/tests/test_text_utf8.py
|
crylearner/RobotFrameworkAppiumIntegration
|
895b96025dbc04c152b820faf62ba8525dd7af78
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
import unittest
from nose import tools
import warnings
from kitchen.text import utf8
import base_classes
class TestUTF8(base_classes.UnicodeTestData, unittest.TestCase):
def setUp(self):
# All of the utf8* functions are deprecated
warnings.simplefilter('ignore', DeprecationWarning)
def tearDown(self):
warnings.simplefilter('default', DeprecationWarning)
def test_utf8_width(self):
'''Test that we find the proper number of spaces that a utf8 string will consume'''
tools.ok_(utf8.utf8_width(self.utf8_japanese) == 31)
tools.ok_(utf8.utf8_width(self.utf8_spanish) == 50)
tools.ok_(utf8.utf8_width(self.utf8_mixed) == 23)
def test_utf8_width_non_utf8(self):
'''Test that we handle non-utf8 bytes in utf8_width without backtracing'''
# utf8_width() treats non-utf8 byte sequences as undecodable so you
# end up with less characters than normal. In this string:
# Python-2.7+ replaces problematic characters in a different manner
# than older pythons.
# Python >= 2.7:
# El veloz murci�lago salt� sobre el perro perezoso.
# Python < 2.7:
# El veloz murci�go salt�bre el perro perezoso.
if len(unicode(u'\xe9la'.encode('latin1'), 'utf8', 'replace')) == 1:
# Python < 2.7
tools.ok_(utf8.utf8_width(self.latin1_spanish) == 45)
else:
# Python >= 2.7
tools.ok_(utf8.utf8_width(self.latin1_spanish) == 50)
def test_utf8_width_chop(self):
'''utf8_width_chop with byte strings'''
tools.ok_(utf8.utf8_width_chop(self.utf8_mixed) == (23, self.utf8_mixed))
tools.ok_(utf8.utf8_width_chop(self.utf8_mixed, 23) == (23, self.utf8_mixed))
tools.ok_(utf8.utf8_width_chop(self.utf8_mixed, 22) == (22, self.utf8_mixed[:-1]))
tools.ok_(utf8.utf8_width_chop(self.utf8_mixed, 19) == (18, self.u_mixed[:-4].encode('utf8')))
tools.ok_(utf8.utf8_width_chop(self.utf8_mixed, 2) == (2, self.u_mixed[0].encode('utf8')))
tools.ok_(utf8.utf8_width_chop(self.utf8_mixed, 1) == (0, ''))
def test_utf8_width_chop_unicode(self):
'''utf8_width_chop with unicode input'''
tools.ok_(utf8.utf8_width_chop(self.u_mixed) == (23, self.u_mixed))
tools.ok_(utf8.utf8_width_chop(self.u_mixed, 23) == (23, self.u_mixed))
tools.ok_(utf8.utf8_width_chop(self.u_mixed, 22) == (22, self.u_mixed[:-1]))
tools.ok_(utf8.utf8_width_chop(self.u_mixed, 19) == (18, self.u_mixed[:-4]))
tools.ok_(utf8.utf8_width_chop(self.u_mixed, 2) == (2, self.u_mixed[0]))
tools.ok_(utf8.utf8_width_chop(self.u_mixed, 1) == (0, u''))
def test_utf8_width_fill(self):
'''Pad a utf8 string'''
tools.ok_(utf8.utf8_width_fill(self.utf8_mixed, 1) == self.utf8_mixed)
tools.ok_(utf8.utf8_width_fill(self.utf8_mixed, 25) == self.utf8_mixed + ' ')
tools.ok_(utf8.utf8_width_fill(self.utf8_mixed, 25, left=False) == ' ' + self.utf8_mixed)
tools.ok_(utf8.utf8_width_fill(self.utf8_mixed, 25, chop=18) == self.u_mixed[:-4].encode('utf8') + ' ')
tools.ok_(utf8.utf8_width_fill(self.utf8_mixed, 25, chop=18, prefix=self.utf8_spanish, suffix=self.utf8_spanish) == self.utf8_spanish + self.u_mixed[:-4].encode('utf8') + self.utf8_spanish + ' ')
tools.ok_(utf8.utf8_width_fill(self.utf8_mixed, 25, chop=18) == self.u_mixed[:-4].encode('utf8') + ' ')
tools.ok_(utf8.utf8_width_fill(self.u_mixed, 25, chop=18, prefix=self.u_spanish, suffix=self.utf8_spanish) == self.u_spanish.encode('utf8') + self.u_mixed[:-4].encode('utf8') + self.u_spanish.encode('utf8') + ' ')
pass
def test_utf8_valid(self):
'''Test that a utf8 byte sequence is validated'''
warnings.simplefilter('ignore', DeprecationWarning)
tools.ok_(utf8.utf8_valid(self.utf8_japanese) == True)
tools.ok_(utf8.utf8_valid(self.utf8_spanish) == True)
warnings.simplefilter('default', DeprecationWarning)
def test_utf8_invalid(self):
'''Test that we return False with non-utf8 chars'''
warnings.simplefilter('ignore', DeprecationWarning)
tools.ok_(utf8.utf8_valid('\xff') == False)
tools.ok_(utf8.utf8_valid(self.latin1_spanish) == False)
warnings.simplefilter('default', DeprecationWarning)
def test_utf8_text_wrap(self):
tools.ok_(utf8.utf8_text_wrap(self.utf8_mixed) == [self.utf8_mixed])
tools.ok_(utf8.utf8_text_wrap(self.utf8_paragraph) == self.utf8_paragraph_out)
tools.ok_(utf8.utf8_text_wrap(self.utf8_mixed_para) == self.utf8_mixed_para_out)
tools.ok_(utf8.utf8_text_wrap(self.utf8_mixed_para, width=57,
initial_indent=' ', subsequent_indent='----') ==
self.utf8_mixed_para_57_initial_subsequent_out)
| 52.677419 | 227 | 0.664013 |
7949911a29de6bbc08f9ad875797e5a2e7b9e706
| 43,349 |
py
|
Python
|
app/baremetal_service/repository/serializers.py
|
21vcloud/Controller
|
63169d220f412330a22e3a2fe9964c73893d4e0f
|
[
"Apache-2.0"
] | null | null | null |
app/baremetal_service/repository/serializers.py
|
21vcloud/Controller
|
63169d220f412330a22e3a2fe9964c73893d4e0f
|
[
"Apache-2.0"
] | null | null | null |
app/baremetal_service/repository/serializers.py
|
21vcloud/Controller
|
63169d220f412330a22e3a2fe9964c73893d4e0f
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from rest_framework import serializers
from baremetal_service.repository import service_model
from drf_yasg import openapi
class NoneMeta(models.Model):
class Meta:
managed = False
db_table = 'NoneMeta'
CookiesParameter = [
openapi.Parameter(
name='COOKIE', in_=openapi.IN_HEADER,
type=openapi.TYPE_STRING,
description="COOKIES",
required=True,
default=""
),
openapi.Parameter(
name='cookies', in_=openapi.IN_HEADER,
type=openapi.TYPE_STRING,
description="cookies",
required=False,
default=""
),
]
TokenParameter = [
openapi.Parameter(
name='token', in_=openapi.IN_HEADER,
type=openapi.TYPE_STRING,
description="token",
required=True,
default=""
)
]
BucketUploadOjbectSerializer = [
# openapi.Parameter(
# name='container_name', in_=openapi.IN_PATH,
# type=openapi.TYPE_STRING,
# description="container_name",
# required=True,
# default=""
# ),
# openapi.Parameter(
# name='object_name', in_=openapi.IN_PATH,
# type=openapi.TYPE_STRING,
# description="object_name",
# required=True,
# default=""
# ),
openapi.Parameter(
name='file', in_=openapi.IN_FORM,
type=openapi.TYPE_FILE,
description="file",
required=True,
)
]
class ObjectFileUploadSerializer(serializers.ModelSerializer):
object_name = serializers.CharField(required=True)
container_name = serializers.CharField(required=True)
class Meta:
model = NoneMeta
fields = ['container_name', 'object_name']
class GetConfInfoSerializer(serializers.ModelSerializer):
key_value = serializers.CharField(required=True)
class Meta:
model = NoneMeta
fields = ["key_value"]
class ServiceInstanceFeedbackSerializer(serializers.ModelSerializer):
uuid = serializers.CharField(required=True, label="实例所对应的机器", help_text="示例: c4130c54-bc4b-4249-928d-c014827653db")
status = serializers.CharField(required=True, label="当前实例状态", help_text="示例:active/DELETED")
task = serializers.CharField(required=True, label="实例更新结果", help_text="示例:success/instance_build")
class Meta:
model = NoneMeta
fields = ["uuid", "status", "task"]
class ServiceMachineFeedbackSerializer(serializers.ModelSerializer):
machine_id = serializers.CharField(required=True, label="machine id", help_text="5d5f9d0c419049990ddd19d2")
uuid = serializers.CharField(required=True, label="对应flavor id", help_text="c4130c54-bc4b-4249-928d-c014827653db")
job_model = serializers.JSONField(required=False, label="job model")
class Meta:
model = NoneMeta
fields = ["machine_id", "uuid", "job_model"]
class ServiceOrderWithIdSerializer(serializers.ModelSerializer):
class Meta:
model = service_model.BmServiceOrder
exclude = ["deleted", "create_at", "update_at", "delivery_status", "project"]
class ServiceOrderDeliveryUpdateSerializer(serializers.ModelSerializer):
order_id = serializers.CharField(required=True, label="订单id", help_text="示例:BMS201908231116166874034")
delivery_status = serializers.CharField(required=True, label="订单状态", help_text="示例:delivered")
class Meta:
model = NoneMeta
fields = ["order_id", "delivery_status"]
class DiskInfoSerializer(serializers.ModelSerializer):
volume_type = serializers.CharField(required=True, label="云硬盘类型", help_text="inspure_iscsi")
size = serializers.IntegerField(required=True, label="云硬盘大小", help_text="1")
volume_order_price = serializers.IntegerField(required=True, label="所购买云硬盘价格", help_text="30")
count = serializers.IntegerField(required=True, label="所购买云硬盘数量", help_text="2")
class Meta:
model = NoneMeta
fields = ["volume_type", "size", "volume_order_price", "count"]
class FloatingIPInfoSerializer(serializers.ModelSerializer):
# floating_ip_allocation = serializers.BooleanField(required=True)
shared_qos_policy_type = serializers.BooleanField(required=False, default=False, label="带宽类型")
qos_policy_id = serializers.CharField(required=False, label="共享带宽id")
qos_policy_name = serializers.CharField(required=True, label="带宽名称")
external_line_type = serializers.CharField(required=True, label="对外网络类型")
firewall_id = serializers.CharField(required=True, label="防火墙id")
firewall_name = serializers.CharField(required=True, label="防火墙名称")
floating_ip_order_price = serializers.IntegerField(required=True, label="所购买弹性公网IP价格")
class Meta:
model = NoneMeta
fields = ["qos_policy_name", "external_line_type", "firewall_id", "firewall_name", "floating_ip_order_price",
"shared_qos_policy_type", "qos_policy_id"]
class ServiceMachineCreateSerializer(serializers.ModelSerializer):
disk_info = DiskInfoSerializer(many=True)
floating_ip_info = FloatingIPInfoSerializer(many=False)
class Meta:
model = service_model.BmServiceMachine
exclude = ['id', "deleted", "create_at", "update_at", "order", "uuid", "status", "firewall_id",
"firewall_name", "floating_ip_line", "floating_ip_bandwidth"]
class ServiceOrderCreateSerializer(serializers.ModelSerializer):
class Meta:
model = service_model.BmServiceOrder
exclude = ['id', "deleted", "create_at", "update_at", "delivery_status", "project"]
class ServiceOrderAlterationSerializer(serializers.ModelSerializer):
order_type = serializers.CharField(required=True, label="订单类型", help_text="alteration")
order_price = serializers.FloatField(required=True, label="订单价格", help_text="-4000")
product_type = serializers.CharField(required=True, label="产品类型", help_text="带宽")
product_info = serializers.CharField(required=True, label="产品信息")
class Meta:
model = NoneMeta
fields = ['order_type', "order_price", "product_type", "product_info"]
class ServiceCreateListSerializer(serializers.ModelSerializer):
order_info = ServiceOrderCreateSerializer(required=True)
service_info = ServiceMachineCreateSerializer(required=True)
class Meta:
model = NoneMeta
fields = ["order_info", "service_info"]
class MonitoringInstanceSerializer(serializers.ModelSerializer):
project_id = serializers.CharField(required=True)
region = serializers.CharField(required=True)
class Meta:
model = NoneMeta
fields = ['project_id', "region"]
class ServiceInstanceSerializer(serializers.ModelSerializer):
class Meta:
model = service_model.BmServiceInstance
exclude = ['id', "deleted", "update_at"]
class ServiceOrderQuerybyAccountSerializer(serializers.ModelSerializer):
account_id = serializers.CharField(required=True, help_text="示例:3f4cb35aeec544d3af33150f38b55286")
class Meta:
model = NoneMeta
fields = ["account_id"]
class ServiceMachineIdSerializer(serializers.ModelSerializer):
machine_id = serializers.CharField(required=True, label="Machine id", help_text="示例:5d5f9d0c419049990ddd19d2")
class Meta:
model = NoneMeta
fields = ["machine_id"]
class ServiceRetrySerializer(serializers.ModelSerializer):
order_id = serializers.CharField(required=True, label="Order id", help_text="示例:BMS201911251135127208056")
machine_id = serializers.CharField(required=False, label="Machine id", help_text="示例:5d5f9d0c419049990ddd19d2")
class Meta:
model = NoneMeta
fields = ["order_id", "machine_id"]
class ServiceInstanceUuidSerializer(serializers.ModelSerializer):
instance_uuid = serializers.CharField(required=True, label="instance uuid ",
help_text="示例:c63a51b9-ccbf-412a-b250-661791473a6b")
class Meta:
model = NoneMeta
fields = ["instance_uuid"]
class ServiceBmServiceFloatingIp(serializers.ModelSerializer):
class Meta:
model = service_model.BmServiceFloatingIp
exclude = ['id', "order_id", "account_id", "project_id", "contract_number",
"create_at", "update_at", "status",
"is_measure_end", "floating_ip", "floating_ip_id", "attached",
"instance_uuid", "instance_name", "fixed_address",
"external_name", "external_name_id", "first_create_at", "attached_type"]
class ServiceCreateFloatingIpSerializer(serializers.ModelSerializer):
order_info = ServiceOrderCreateSerializer(required=True)
floating_ip_info = ServiceBmServiceFloatingIp(required=True)
firewall_id = serializers.CharField(required=True, label="防火墙id 示例:5d68dbc679580c3d577bca3e")
class Meta:
model = NoneMeta
fields = ["order_info", "floating_ip_info", "firewall_id"]
class ServiceCreateFloatingIpWithOrderSerializer(serializers.ModelSerializer):
order_info = ServiceOrderWithIdSerializer(required=True)
floating_ip_info = ServiceBmServiceFloatingIp(required=True)
firewall_id = serializers.CharField(required=True)
class Meta:
model = NoneMeta
fields = ["order_info", "floating_ip_info", "firewall_id"]
class ServiceFloatingIpQuotasSetSerializer(serializers.ModelSerializer):
# qos_policy_id = serializers.CharField(required=True)
order_info = ServiceOrderAlterationSerializer(required=True)
floating_ip_id = serializers.CharField(required=True)
qos_policy_name = serializers.CharField(required=True)
class Meta:
model = NoneMeta
fields = ['order_info', 'floating_ip_id', 'qos_policy_name']
class ServiceDeleteFloatingIpSerializer(serializers.ModelSerializer):
floating_ip_id_list = serializers.ListField(required=True, label="",
help_text="示例:[‘41deaaeb-b007-41c2-8150-f953cb9765e9’]")
class Meta:
model = NoneMeta
fields = ['floating_ip_id_list']
class ServiceAddFloatingIptoServerSerializer(serializers.ModelSerializer):
instance_uuid = serializers.CharField(required=True, label="实例 id",
help_text="51c7094c-f039-41b5-865d-d37faa148c96")
instance_name = serializers.CharField(required=True, label="实例名称", help_text="node_8_32")
floating_ip_id = serializers.CharField(required=True, label="弹性公网IP id",
help_text="172d6c81-595f-4aa6-b2f7-1ec99e01716c")
floating_ip_address = serializers.CharField(required=False, label="弹性公网IP", help_text="10.100.2.172")
fixed_address = serializers.CharField(required=True, label="所绑定的端口", help_text="172.24.0.24")
class Meta:
model = NoneMeta
fields = ['instance_uuid', "instance_name", 'floating_ip_id', "floating_ip_address", "fixed_address"]
class ServiceAddFloatingIptoLoadbalanceSerializer(serializers.Serializer):
loadbalance_name = serializers.CharField(required=True)
loadbalance_id = serializers.CharField(required=True)
floating_ip_id = serializers.CharField(required=True)
class ServiceDetachFloatingIptoLoadbalanceSerializer(serializers.Serializer):
loadbalance_id = serializers.CharField(required=True)
floating_ip_id = serializers.CharField(required=True)
class LoadbalanceSerializer(serializers.Serializer):
loadbalancer_id = serializers.CharField(required=True)
listener_protocol = serializers.CharField(required=True)
class ServiceDetachFloatingIptoServerSerializer(serializers.ModelSerializer):
instance_uuid = serializers.CharField(required=True, label="实例 id",
help_text="51c7094c-f039-41b5-865d-d37faa148c96")
floating_ip_id = serializers.CharField(required=True, label="弹性公网IP id",
help_text="172d6c81-595f-4aa6-b2f7-1ec99e01716c")
class Meta:
model = NoneMeta
fields = ['instance_uuid', 'floating_ip_id']
class ServiceFloatingIpAssociateFirewall(serializers.ModelSerializer):
floating_ip_id = serializers.CharField(required=True, label="弹性公网IP id",
help_text="1458e4d9-c0f0-46ae-96f6-3eac0cd34d33")
floating_ip = serializers.CharField(required=True, label="弹性公网IP", help_text="10.200.204.107")
firewall_id = serializers.CharField(required=True, label="防火墙id", help_text="5d5f59d7af7b14da70048a6f")
class Meta:
model = NoneMeta
fields = ['floating_ip_id', 'floating_ip', 'firewall_id']
class ServiceBmServiceVolume(serializers.ModelSerializer):
class Meta:
model = service_model.BmServiceVolume
exclude = ['id', 'order_id', 'account_id', 'project_id',
'create_at', 'update_at', 'is_measure_end',
'region', 'attached_type', 'instance_uuid', 'instance_name',
'volume_id', 'contract_number']
class VolumeCreateSerializer(serializers.ModelSerializer):
order_info = ServiceOrderCreateSerializer(required=True)
volume_info = ServiceBmServiceVolume(required=True)
class Meta:
model = NoneMeta
fields = ['order_info', 'volume_info']
class VolumeCreateWithOrderSerializer(serializers.ModelSerializer):
order_info = ServiceOrderWithIdSerializer(required=True)
volume_info = ServiceBmServiceVolume(required=True)
class Meta:
model = NoneMeta
fields = ['order_info', 'volume_info']
class VolumeUpdateSerializer(serializers.ModelSerializer):
order_info = ServiceOrderAlterationSerializer(required=True)
volume_id = serializers.CharField(required=True)
name = serializers.CharField(required=False)
size = serializers.CharField(required=False)
class Meta:
model = NoneMeta
fields = ['order_info', 'volume_id', 'name', 'size']
class VolumeDeleteSerailizer(serializers.ModelSerializer):
volume_ids = serializers.ListField(required=True)
class Meta:
model = NoneMeta
fields = ['volume_ids']
class VolumeRollBackSerailizer(serializers.ModelSerializer):
volume_id_list = serializers.ListField(required=True)
class Meta:
model = NoneMeta
fields = ['volume_id_list']
class VolumeBackCreateSerializer(serializers.ModelSerializer):
order_info = ServiceOrderCreateSerializer(required=True, label="订单信息", help_text="示例:")
volume_id = serializers.CharField(required=True, label="云硬盘id", help_text="示例:bdbdd6f9-6c5f-4b33-bf19-563389565d3f")
name = serializers.CharField(required=False, label="备份名称", help_text="示例:ding_test")
volume_name = serializers.CharField(required=True, label="云硬盘名称", help_text="示例:ds2334445555555555555555566666666")
description = serializers.CharField(required=False, label="描述信息", help_text="示例:")
class Meta:
model = NoneMeta
fields = ['volume_id', 'name', 'description', 'order_info', "volume_name"]
class VolumeBackupDeleteSerializer(serializers.ModelSerializer):
backup_ids = serializers.ListField(required=True, label="所删除的云硬盘备份id列表",
help_text="[6d4dafbf-cb34-47f6-953e-f45e084cf8fa]")
class Meta:
model = NoneMeta
fields = ['backup_ids']
class VolumeBackupRestoreSerializer(serializers.ModelSerializer):
order_info = ServiceOrderCreateSerializer(required=True)
backup_id = serializers.CharField(required=True)
volume_name = serializers.CharField(required=False)
class Meta:
model = NoneMeta
fields = ['backup_id', 'volume_name', 'order_info']
class VolumeAttachSerializer(serializers.ModelSerializer):
server_id = serializers.CharField(required=True, label="绑定的示例ID",
help_text="示例:776c046b-4675-4ccb-a16a-30db03e44caa")
volume_id = serializers.CharField(required=True, label="云硬盘ID", help_text="示例:b264e220-8d1c-4695-9941-e1c19a99a246")
server_name = serializers.CharField(required=False, label="实例名称", help_text="示例:cen7")
class Meta:
model = NoneMeta
fields = ['server_id', 'volume_id', 'server_name']
class BucketCreateSerializer(serializers.ModelSerializer):
order_info = ServiceOrderCreateSerializer(required=True, label="订单信息")
container_name = serializers.CharField(required=True, label="桶名称 示例:ding")
is_public = serializers.BooleanField(required=False, default=False, label="权限是否公有 示例:True/False")
class Meta:
model = NoneMeta
fields = ['container_name', 'is_public', 'order_info']
class BucketDeleteSerializer(serializers.ModelSerializer):
buckets = serializers.ListField(required=True, label="删除桶列表", help_text="示例:[ding]")
class Meta:
model = NoneMeta
fields = ['buckets']
class MaterialSerializer(serializers.ModelSerializer):
contract_number = serializers.CharField(required=True)
class Meta:
model = NoneMeta
fields = ['contract_number']
class MaterialIPSerializer(serializers.ModelSerializer):
floating_ip_id = serializers.CharField(required=True, label="弹性公网IP ",
help_text="示例:eb345bb0-9bf0-461d-b5e1-ca1896d50997")
material_ip_date = serializers.DateField(required=True, label="查询时间", help_text="示例:2019-08-20")
class Meta:
model = NoneMeta
exclude = ["id"]
class MaterialIPSeasonSerializer(serializers.ModelSerializer):
# contract_number = serializers.CharField(required=True)
floating_ip_id = serializers.CharField(required=True, label="",
help_text="示例:eb345bb0-9bf0-461d-b5e1-ca1896d50997")
start_date = serializers.DateField(required=True, label="", help_text="示例:2019-08-20")
end_date = serializers.DateField(required=True, label="", help_text="示例:2019-08-20")
class Meta:
model = NoneMeta
exclude = ["id"]
class ContarctIPSeasonSerializer(serializers.ModelSerializer):
contract_number = serializers.CharField(required=True, label="合同编码", help_text="示例:huo123456")
start_date = serializers.DateField(required=True, label="开始日期", help_text="示例:2019-09-26")
end_date = serializers.DateField(required=True, label="结束日期", help_text="示例:2019-09-26")
class Meta:
model = NoneMeta
exclude = ["id"]
class ContarctVolumeSerializer(serializers.ModelSerializer):
contract_number = serializers.CharField(required=True, label="合同编码", help_text="示例:ding")
class Meta:
model = NoneMeta
exclude = ["id"]
class ContarctVolumeSeasonalSerializer(serializers.ModelSerializer):
# contract_number = serializers.CharField(required=True, label="合同编码", help_text="示例:ding")
start_date = serializers.DateField(required=True, label="开始日期", help_text="示例:2019-09-26")
end_date = serializers.DateField(required=True, label="结束日期", help_text="示例:2019-09-26")
class Meta:
model = NoneMeta
exclude = ["id"]
class MaterialIpContract(serializers.ModelSerializer):
contract_number = serializers.CharField(required=True, label="合同编码", help_text="示例:ding")
contract_ip_date = serializers.DateField(required=True, label="所查询的日期", help_text="示例:2019-10-14")
class Meta:
model = NoneMeta
exclude = ["id"]
class FloatingIPCalculaterInRangeParamDictSerializer(serializers.ModelSerializer):
contract_number = serializers.CharField(default=None)
project_id = serializers.CharField(required=False)
account_id = serializers.CharField(required=False)
class Meta:
model = NoneMeta
fields = ["contract_number", "project_id", "account_id"]
class FloatingIPCalculaterInRangeSerializer(serializers.ModelSerializer):
start_date = serializers.DateField(required=True, label="开始日期", help_text="示例:2019-09-26")
end_date = serializers.DateField(required=True, label="结束日期", help_text="示例:2019-09-26")
contract_number = serializers.CharField(required=False, label="合同编号", help_text="示例:w20190924")
project_id = serializers.CharField(required=False, label="项目id",
help_text="示例:7ae5a60714014778baddea703b85cd93")
account_id = serializers.CharField(required=False, label="用户id",
help_text="示例:c46c3de5d6984241ad21e8fe45761059")
class Meta:
model = NoneMeta
fields = ["start_date", "end_date", "contract_number", "project_id", "account_id"]
class BucketUpdateSeraializer(serializers.ModelSerializer):
container_name = serializers.CharField(required=True, label="桶名称", help_text="示例:ding")
is_public = serializers.BooleanField(required=False, default=False, label="是否公有", help_text="示例:True/False")
class Meta:
model = NoneMeta
fields = ['container_name', 'is_public']
class BucketShowSerializer(serializers.ModelSerializer):
container_name = serializers.CharField(required=True, label="桶名称", help_text="示例:container_jacky_test")
path = serializers.CharField(required=False, label="路径", help_text="示例:container_jacky_test")
class Meta:
model = NoneMeta
fields = ['container_name', 'path']
class BucketCreateDirSerializer(serializers.ModelSerializer):
container_name = serializers.CharField(required=True)
floder_name = serializers.CharField(required=True)
class Meta:
model = NoneMeta
fields = ['container_name', 'floder_name']
class BucketShowObjectSerializer(serializers.ModelSerializer):
container_name = serializers.CharField(required=True, help_text="示例:韩总Q1_-")
object_name = serializers.CharField(required=True, help_text="object_name为目录时末尾加斜杠/")
class Meta:
model = NoneMeta
fields = ['container_name', 'object_name']
class BucketDeleteObjectSerializer(serializers.ModelSerializer):
container_name = serializers.CharField(required=True)
object_names = serializers.ListField(required=True,
help_text="""object_name为目录时末尾加斜杠/, ['ccc/ddd/, 'ccc/eee/d']""")
class Meta:
model = NoneMeta
fields = ['container_name', 'object_names']
class BucketUploadParamOjbectSerializer(serializers.ModelSerializer):
object_name = serializers.CharField(required=True)
# file = serializers.FileField(required=True)
container_name = serializers.CharField(required=True)
class Meta:
model = NoneMeta
fields = ['container_name', 'object_name']
class BucketCopyObjectSerializer(serializers.ModelSerializer):
src_container_name = serializers.CharField(required=True)
src_object_name = serializers.CharField(required=True,
help_text="""object_name为目录时末尾加斜杠/, ['ccc/ddd/, 'ccc/eee/d']""")
dst_container_name = serializers.CharField(required=True)
dst_object_name = serializers.CharField(required=True)
class Meta:
model = NoneMeta
fields = ['src_container_name', 'src_object_name',
'dst_container_name', 'dst_object_name']
class CreateLoadBalancersSerializer(serializers.Serializer):
floating_ip_order_info = ServiceOrderCreateSerializer(required=False)
loadbalance_order_info = ServiceOrderCreateSerializer(required=False)
floating_ip_info = ServiceBmServiceFloatingIp(required=False)
firewall_id = serializers.CharField(required=False, label="防火墙id 示例:5d68dbc679580c3d577bca3e")
name = serializers.CharField(required=True)
flavor_id = serializers.CharField(required=False)
vip_network_id = serializers.CharField(required=True)
location = serializers.CharField(required=False)
is_public = serializers.BooleanField(required=False)
network_name = serializers.CharField(required=False)
public_ip_id = serializers.CharField(required=False)
vpc_name = serializers.CharField(required=False)
vpc_id = serializers.CharField(required=False)
listener_count = serializers.CharField(required=False)
pool_count = serializers.CharField(required=False)
is_new_ip = serializers.BooleanField(required=False)
class CreateListenerSerializer(serializers.Serializer):
whether_insert_headers = serializers.CharField(required=False, help_text="判断监听器是否需要获取客户端真实IP的头字典")
listener_name = serializers.CharField(required=True, help_text="监听器名称")
loadbalancer_id = serializers.CharField(required=True, help_text="负载均衡器ID")
protocol = serializers.CharField(required=True, help_text="监听器协议")
protocol_port = serializers.IntegerField(required=True, help_text="监听器协议端口")
timeout_member_connect = serializers.IntegerField(required=True, help_text="监听器成员连接超时时限(秒)")
class CreateSNIListenerSerializer(serializers.Serializer):
default_tls_container_ref = serializers.CharField(required=False, help_text="监听器SNI证书管理处的引用")
listener_name = serializers.CharField(required=True, help_text="监听器名称")
loadbalancer_id = serializers.CharField(required=True, help_text="负载均衡器ID")
protocol = serializers.CharField(required=True, help_text="监听器协议")
protocol_port = serializers.IntegerField(required=True, help_text="监听器协议端口")
sni_container_refs = serializers.ListField(required=False, help_text="监听器SNI证书的引用(PKCS12格式)")
timeout_member_connect = serializers.IntegerField(required=False, help_text="监听器成员连接超时时限(秒)")
class UpdateListenerSerializer(serializers.Serializer):
whether_insert_headers = serializers.CharField(required=False, help_text="判断监听器是否需要获取客户端真实IP的头字典")
loadbalancer_id = serializers.CharField(required=True, help_text="负载均衡器ID")
protocol = serializers.CharField(required=True, help_text="监听器协议")
protocol_port = serializers.IntegerField(required=True, help_text="监听器协议端口")
listener_name = serializers.CharField(required=False, help_text="监听器名称")
timeout_member_connect = serializers.IntegerField(required=False, help_text="监听器成员连接超时时限(秒)")
default_pool_id = serializers.CharField(required=True, help_text="监听器所绑定的资源池")
class DeleteCertificatesSerializer(serializers.Serializer):
crt_name = serializers.CharField(required=False, help_text="SNI证书名称")
crt_domain = serializers.IntegerField(required=False, help_text="SNI证书对应域名")
class UpdateCertificatesSerializer(serializers.Serializer):
crt_name = serializers.CharField(required=False, help_text="SNI证书名称")
crt_domain = serializers.IntegerField(required=False, help_text="SNI证书对应域名")
class CreateCertificatesSerializer(serializers.Serializer):
crt_name = serializers.CharField(required=True, help_text="Secret名称")
crt_type = serializers.CharField(required=True, help_text="SNI证书类型")
crt_contents = serializers.CharField(required=True, help_text="Secret内容")
crt_secret_key = serializers.CharField(required=False, help_text="SNI证书私钥")
crt_domain = serializers.CharField(required=False, help_text="当前SNI证书对应域名")
class CreateSecretsSerializer(serializers.Serializer):
crt_name = serializers.CharField(required=True, help_text="Secret名称")
# crt_type = serializers.CharField(required=True, help_text="SNI证书类型")
crt_contents = serializers.CharField(required=True, help_text="Secret内容")
# crt_secret_key = serializers.CharField(required=False, help_text="SNI证书私钥")
# crt_domain = serializers.CharField(required=False, help_text="当前SNI证书对应域名")
class CreatePoliciesSerializer(serializers.Serializer):
# admin_state_up = serializers.BooleanField(required=False)
name = serializers.CharField(required=False)
# position = serializers.IntegerField(required=False)
# redirect_http_code = serializers.IntegerField(required=False)
redirect_pool_id = serializers.CharField(required=False)
# redirect_prefix = serializers.CharField(required=False)
# redirect_url = serializers.CharField(required=False)
# tags = serializers.ListField(required=False)
domain_compare_type = serializers.CharField(required=False, default="EQUAL_TO")
domain_name = serializers.CharField(required=False)
url_compare_type = serializers.CharField(required=False, default="STARTS_WITH")
url = serializers.CharField(required=True, )
class CreateSessionSerializer(serializers.ModelSerializer):
type = serializers.CharField(required=True)
persistence_timeout = serializers.CharField(required=False)
cookie_name = serializers.CharField(required=False)
class Meta:
model = NoneMeta
fields = ['type', 'persistence_timeout', 'cookie_name']
class CreatePoolsSerializer(serializers.Serializer):
lb_algorithm = serializers.CharField(required=True)
loadbalancer_id = serializers.CharField(required=False)
listener_id = serializers.CharField(required=False)
name = serializers.CharField(required=False)
protocol = serializers.CharField(required=True)
session_persistence = CreateSessionSerializer(required=False)
is_keep_session = serializers.BooleanField(required=False)
class CreateHealthMonitorSerializer(serializers.Serializer):
delay = serializers.IntegerField(required=True, help_text="健康检查器检查间隔(秒)")
max_retries = serializers.IntegerField(required=True, help_text="转为ONLINE状态前健康检查器最大重试次数(次)")
timeout = serializers.IntegerField(required=True, help_text="健康检查器超时时间(秒)")
pool_id = serializers.CharField(required=True, help_text="健康检查器将要绑定的资源池ID")
type = serializers.CharField(required=True,
help_text="健康检查器协议类型(One of HTTP, HTTPS, PING, TCP, TLS-HELLO, or UDP-CONNECT)")
url_path = serializers.CharField(required=False, help_text="健康检查器的请求的HTTP URL路径;必须以'/'开头,default:/")
# expected_codes = serializers.CharField(required=False,
# help_text="后端成员的预期HTTP状态码;单个状态码'200',或列表[200, 202],或状态码范围200-204,default:200")
class CreatePoliciesRuleSerializer(serializers.Serializer):
compare_type = serializers.CharField(required=False)
type = serializers.CharField(required=False)
value = serializers.CharField(required=False)
class AttachPoolMemberSerializer(serializers.ModelSerializer):
name = serializers.CharField(required=False, help_text="成员名称")
private_address = serializers.CharField(required=True, help_text="当前成员private IP地址")
public_address = serializers.CharField(required=False, help_text="当前成员public IP地址")
protocol_port = serializers.IntegerField(required=True, help_text="当前成员端口号")
weight = serializers.IntegerField(required=True, help_text="当前成员权重")
instance_id = serializers.CharField(required=True, help_text="添加的裸金属实例id")
loadbancer_id = serializers.CharField(required=True, help_text="当前资源池所对应的负载均衡名称")
class Meta:
model = NoneMeta
fields = ["name", "private_address", "public_address", "protocol_port", "weight", "instance_id",
"loadbancer_id"]
class AttachPoolMemberListSerializer(serializers.ModelSerializer):
member_obj_list = AttachPoolMemberSerializer(many=True)
class Meta:
model = NoneMeta
fields = ["member_obj_list"]
class CreatePoolMemberSerializer(serializers.Serializer):
address = serializers.CharField(required=True, help_text="当前成员IP地址")
protocol_port = serializers.IntegerField(required=True, help_text="当前成员端口号")
weight = serializers.IntegerField(required=True, help_text="当前成员权重")
class UpdateLoadBalancersSerializer(serializers.Serializer):
name = serializers.CharField(required=False)
admin_state_up = serializers.BooleanField(required=False)
class UpdatePoliciesSerializer(serializers.Serializer):
redirect_pool_id = serializers.CharField(required=False)
name = serializers.CharField(required=False)
class UpdatePoolSerializer(serializers.Serializer):
lb_algorithm = serializers.CharField(required=True)
name = serializers.CharField(required=False)
session_persistence = CreateSessionSerializer(required=False)
is_keep_session = serializers.BooleanField(required=False)
class UpdateHealthMonitorSerializer(serializers.Serializer):
delay = serializers.IntegerField(required=True, help_text="健康检查器检查间隔(秒)")
max_retries = serializers.IntegerField(required=True, help_text="转为ONLINE状态前健康检查器最大重试次数(次)")
timeout = serializers.IntegerField(required=True, help_text="健康检查器超时时间(秒)")
url_path = serializers.CharField(required=False, help_text="健康检查器的请求的HTTP URL路径;必须以'/'开头,default:/")
class UpdatePoolsSerializer(serializers.ModelSerializer):
admin_state_up = serializers.BooleanField(required=False)
ca_tls_container_ref = serializers.CharField(required=False)
crl_container_ref = serializers.CharField(required=False)
description = serializers.CharField(required=False)
lb_algorithm = serializers.CharField(required=False)
name = serializers.CharField(required=False)
session_persistence = serializers.CharField(required=False)
tags = serializers.ListField(required=False)
tls_enabled = serializers.BooleanField(required=False)
tls_container_ref = serializers.CharField(required=False)
class Meta:
model = NoneMeta
fields = ['admin_state_up', 'ca_tls_container_ref', 'crl_container_ref', 'description',
'lb_algorithm', 'listener_id', 'loadbalancer_id', 'name',
'session_persistence', 'tags', "tls_enabled", "tls_container_ref"]
class UpdatePoliciesRuleSerializer(serializers.ModelSerializer):
admin_state_up = serializers.BooleanField(required=False)
compare_type = serializers.CharField(required=False)
invert = serializers.BooleanField(required=False)
key = serializers.CharField(required=False)
tags = serializers.ListField(required=False)
type = serializers.CharField(required=False)
value = serializers.CharField(required=False)
class Meta:
model = NoneMeta
fields = ['admin_state_up', 'compare_type', 'invert', 'key',
'tags', "type", "value"]
class UpdatePoolMemberSerializer(serializers.Serializer):
# protocol_port = serializers.IntegerField(required=True, help_text="当前成员监控端口")
weight = serializers.IntegerField(required=True, help_text="当前成员权重")
class ShowLoadBalancersSerializer(serializers.Serializer):
# loadbalancer_id = serializers.UUIDField(required=True)
fields = serializers.CharField(required=False, help_text="可提供参数值,按需返回;如果该参数为空,返回默认策略设置的所有属性")
class ListenerDeatilSerializer(serializers.ModelSerializer):
listener_id = serializers.UUIDField(required=True)
class Meta:
model = NoneMeta
fields = ['listener_id']
class GetBalancersStatisticsSerializer(serializers.ModelSerializer):
loadbalancer_id = serializers.CharField(required=True)
class Meta:
model = NoneMeta
fields = ['loadbalancer_id']
class InstanceInterfaceAttachmentSerializer(serializers.ModelSerializer):
server_id = serializers.CharField(required=True)
net_id = serializers.CharField(required=True)
class Meta:
model = NoneMeta
fields = ['server_id', 'net_id']
class InstanceInterfaceDetSerializer(serializers.ModelSerializer):
server_id = serializers.CharField(required=True)
port_ids = serializers.ListField(required=True)
class Meta:
model = NoneMeta
fields = ['server_id', 'port_ids']
class InstancePostSerializer(serializers.ModelSerializer):
name = serializers.CharField(required=True)
image = serializers.CharField(required=True)
flavor = serializers.CharField(required=True)
network_dict = serializers.DictField(required=True)
class Meta:
model = NoneMeta
fields = ['name', 'image', 'flavor', 'network_dict']
class InstStopSerializer(serializers.ModelSerializer):
server_id = serializers.CharField(required=True)
class Meta:
model = NoneMeta
fields = ['server_id']
class InstRestartSerializer(serializers.ModelSerializer):
server_id = serializers.CharField(required=True)
class Meta:
model = NoneMeta
fields = ['server_id']
class InstStartSerializer(serializers.ModelSerializer):
server_id = serializers.CharField(required=True)
class Meta:
model = NoneMeta
fields = ['server_id']
class InstanceDelSerializer(serializers.ModelSerializer):
server_id_list = serializers.ListField(required=True)
class Meta:
model = NoneMeta
fields = ['server_id_list']
class InstanceIdListSerializer(serializers.ModelSerializer):
server_id = serializers.CharField(required=True)
class Meta:
model = NoneMeta
fields = ['server_id']
class CreateVpcsSerializer(serializers.ModelSerializer):
vpc_name = serializers.CharField(required=True)
region = serializers.CharField(required=True)
net_name = serializers.CharField(required=True)
vpc_type = serializers.CharField(required=True)
cidr = serializers.CharField(required=True)
enable_dhcp = serializers.BooleanField(required=False)
gateway_ip = serializers.CharField(required=False)
dns = serializers.ListField(required=False)
class Meta:
model = NoneMeta
fields = ['vpc_name', 'region', 'net_name', 'cidr',
'enable_dhcp', 'gateway_ip', 'dns', 'vpc_type']
class UpdateVpcsSerializer(serializers.ModelSerializer):
vpc_id = serializers.IntegerField(required=True)
name = serializers.CharField(required=True)
class Meta:
model = NoneMeta
fields = ['vpc_id', 'name']
class DleleteVpcsSerializer(serializers.ModelSerializer):
vpc_ids = serializers.ListField(required=True)
class Meta:
model = NoneMeta
fields = ['vpc_ids']
class ListVpcNetworks(serializers.ModelSerializer):
vpc_id = serializers.IntegerField(required=True)
class Meta:
model = NoneMeta
fields = ['vpc_id']
class PostVpcNetwork(serializers.ModelSerializer):
vpc_id = serializers.IntegerField(required=True)
net_name = serializers.CharField(required=True)
cidr = serializers.CharField(required=True)
enable_dhcp = serializers.BooleanField(required=False)
gateway_ip = serializers.CharField(required=False)
dns = serializers.ListField(required=False)
class Meta:
model = NoneMeta
fields = ['vpc_id', 'net_name', 'cidr',
'enable_dhcp', 'gateway_ip', 'dns']
class PutVpcNetwork(serializers.ModelSerializer):
vpc_id = serializers.IntegerField(required=True)
net_id = serializers.CharField(required=True)
net_name = serializers.CharField(required=False)
enable_dhcp = serializers.BooleanField(required=False)
dns = serializers.ListField(required=False)
class Meta:
model = NoneMeta
fields = ['vpc_id', 'net_id', 'net_name',
'enable_dhcp', 'dns']
class DelVpcNetwork(serializers.ModelSerializer):
vpc_id = serializers.IntegerField(required=True)
net_id = serializers.CharField(required=True)
class Meta:
model = NoneMeta
fields = ['vpc_id', 'net_id']
class NatGateWayCreate(serializers.ModelSerializer):
order_info = ServiceOrderCreateSerializer(required=True, label="订单信息", help_text="示例:")
name = serializers.CharField(required=True)
vpc_id = serializers.IntegerField(required=True)
description = serializers.CharField(required=False)
class Meta:
model = NoneMeta
fields = ['order_info', 'name', 'vpc_id', 'description']
class DnatRuleCreate(serializers.ModelSerializer):
scenes = serializers.CharField(required=False, label="使用场景", default="VPC")
floatingip_id = serializers.CharField(required=True, label="floating ip id")
floating_ip_address = serializers.CharField(required=True, label='floating ip 地址')
external_port = serializers.IntegerField(required=True, label="公网端口")
protocol = serializers.ChoiceField(choices=['tcp', 'udp'])
internal_ip_address = serializers.CharField(required=True, label="内网ip地址")
internal_port_id = serializers.CharField(required=True, label="内网port id")
internal_port = serializers.IntegerField(required=True, label="内网端口号")
description = serializers.CharField(required=False, label="描述")
class Meta:
model = NoneMeta
fields = ['scenes', 'floatingip_id', 'floating_ip_address', 'external_port', 'protocol',
'internal_ip_address', 'internal_port_id', 'internal_port', 'description']
class DnatRuleUpdate(serializers.ModelSerializer):
external_port = serializers.IntegerField(required=True, label="公网端口")
protocol = serializers.ChoiceField(choices=['tcp', 'udp'])
internal_ip_address = serializers.CharField(required=True, label="内网ip地址")
internal_port_id = serializers.CharField(required=True, label="内网port id")
internal_port = serializers.IntegerField(required=True, label="内网端口号")
description = serializers.CharField(required=False, label="描述")
class Meta:
model = NoneMeta
fields = ['external_port', 'protocol',
'internal_ip_address', 'internal_port_id', 'internal_port',
'description']
class NatGateWayUpdate(serializers.ModelSerializer):
name = serializers.CharField(required=True)
description = serializers.CharField(required=True)
class Meta:
model = NoneMeta
fields = ['name', 'description']
class DnatRuleDelete(serializers.ModelSerializer):
nat_rules = serializers.ListField(required=True, label='nat规则列表')
class Meta:
model = NoneMeta
fields = ['nat_rules']
class ServiceBandWidth(serializers.ModelSerializer):
class Meta:
model = service_model.BmShareBandWidth
exclude = ['id', 'order_id', 'account_id', 'project_id',
'create_at', 'update_at', 'is_measure_end',
'contract_number', 'first_create_at',
'shared_bandwidth_id', "contract_id",
'status', 'deleted_at']
class BandWidthCreateSerializer(serializers.Serializer):
order_info = ServiceOrderCreateSerializer(required=True)
bandwidth_info = ServiceBandWidth(required=True)
contract_id = serializers.CharField(required=True)
class BandWidthUpdateNameSerializer(serializers.Serializer):
bandwidth_id = serializers.CharField(required=True)
name = serializers.CharField(required=False)
class BandWidthUpdateSerializer(serializers.Serializer):
order_info = ServiceOrderAlterationSerializer(required=False)
bandwidth_id = serializers.CharField(required=True)
name = serializers.CharField(required=False)
max_kbps = serializers.IntegerField(required=False)
class BandWidthDeleteSerializer(serializers.Serializer):
bandwidth_ids = serializers.ListField(required=True)
class BandWidthDetailsSerializer(serializers.Serializer):
fields_key = serializers.CharField(required=False, help_text="取值:name, fip")
fields_value = serializers.CharField(required=False)
class SharedBWFipAttachSerializer(serializers.Serializer):
floating_ip_ids = serializers.ListField(required=True)
class SharedBWFipDetachSerializer(serializers.Serializer):
order_info = ServiceOrderAlterationSerializer(required=False)
floating_ip_ids = serializers.ListField(required=True)
qos_policy_name = serializers.CharField(required=True, help_text="150M")
service_count = serializers.IntegerField(required=True, help_text="1")
| 39.44404 | 123 | 0.727122 |
794992c4f0c79775e61c16ae7330e2f96a83c284
| 2,633 |
py
|
Python
|
projects/cats/gui_files/leaderboard_integrity.py
|
jjllzhang/CS61A
|
57b68c7c06999210d96499f6d84e4ec99085d396
|
[
"MIT"
] | 8 |
2020-07-28T11:10:49.000Z
|
2021-05-29T15:27:17.000Z
|
11-Project-CATS/cats/cats/gui_files/leaderboard_integrity.py
|
ericchen12377/CS61A_LearningDoc
|
31f23962b0e2834795bf61eeb0f4884cc5da1809
|
[
"MIT"
] | null | null | null |
11-Project-CATS/cats/cats/gui_files/leaderboard_integrity.py
|
ericchen12377/CS61A_LearningDoc
|
31f23962b0e2834795bf61eeb0f4884cc5da1809
|
[
"MIT"
] | 1 |
2022-02-13T08:28:27.000Z
|
2022-02-13T08:28:27.000Z
|
import base64
import json
import os
import random
import time
from functools import wraps
from queue import Queue
from threading import Thread
import cats
fernet = None
COMMON_WORDS_SET = set(cats.lines_from_file('data/common_words.txt'))
CAPTCHA_QUEUE_LEN = 200
CAPTCHA_LENGTH = 10
CAPTCHA_WORD_LEN = 6
captcha_queue = Queue()
def require_fernet(f):
@wraps(f)
def wrapped(*args, **kwargs):
global fernet
if not fernet:
from cryptography.fernet import Fernet
fernet = Fernet(os.environ.get("FERNET_KEY", Fernet.generate_key()))
return f(*args, **kwargs)
return wrapped
def token_writer(f):
@wraps(f)
@require_fernet
def wrapped(*args, **kwargs):
data = f(*args, **kwargs)
decoded = json.dumps(data).encode("utf-8")
return fernet.encrypt(decoded).decode("utf-8")
return wrapped
def token_reader(fail):
def decorator(f):
@wraps(f)
@require_fernet
def wrapped(*, token, **kwargs):
from cryptography.fernet import InvalidToken
if not token:
return fail
try:
return f(token=json.loads(fernet.decrypt(token.encode("utf-8"))), **kwargs)
except (TypeError, InvalidToken):
return fail
return wrapped
return decorator
@token_writer
def create_wpm_authorization(user, wpm):
return {
"user": user,
"wpm": wpm,
}
@token_reader(fail=0)
def get_authorized_limit(user, token):
if token["user"] != user:
return 0
return token["wpm"]
@token_writer
def encode_challenge(user, words):
return {
"user": user,
"words": words,
"startTime": time.time(),
}
@token_reader(fail=(False, False))
def decode_challenge(token):
return token["user"], token["words"], token["startTime"]
def populate_captcha_queue():
while captcha_queue.qsize() < CAPTCHA_QUEUE_LEN:
captcha_queue.put(generate_captcha())
def generate_captcha():
from claptcha import Claptcha
word = random.choice([x for x in COMMON_WORDS_SET if len(x) < CAPTCHA_LENGTH])
c = Claptcha(word, "gui_files/FreeMono.ttf", margin=(20, 10))
image_b64 = base64.b64encode(c.bytes[1].getvalue()).decode("utf-8")
return "data:image/png;base64," + image_b64, word
def get_captcha_urls(num_words=CAPTCHA_LENGTH):
Thread(target=populate_captcha_queue).start()
images, words = [], []
for _ in range(num_words):
image, word = captcha_queue.get()
images.append(image)
words.append(word)
return images, words
| 23.508929 | 91 | 0.643752 |
7949944cb5c8f3d960ba0835c71b50de9030b414
| 2,809 |
py
|
Python
|
src/pymor/__init__.py
|
pdiercks/pymor
|
e94f05714d666a929113543c49e88f8f494d64e1
|
[
"Unlicense"
] | null | null | null |
src/pymor/__init__.py
|
pdiercks/pymor
|
e94f05714d666a929113543c49e88f8f494d64e1
|
[
"Unlicense"
] | 4 |
2022-03-17T10:07:38.000Z
|
2022-03-30T12:41:06.000Z
|
src/pymor/__init__.py
|
pdiercks/pymor
|
e94f05714d666a929113543c49e88f8f494d64e1
|
[
"Unlicense"
] | null | null | null |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2020 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import os
def _init_mpi():
"""provides a way to manually set the thread init mode for MPI if necessary.
Needs to happen as early as possible, otherwise mpi4py might auto-init somewhere else.
"""
try:
import mpi4py
except ImportError:
return
# only change finalize setting if unset
finalize = (mpi4py.rc.finalize is None) or mpi4py.rc.finalize
mpi4py.rc(initialize=False, finalize=finalize)
from mpi4py import MPI
if not MPI.Is_initialized():
required_level = int(os.environ.get('PYMOR_MPI_INIT_THREAD', MPI.THREAD_MULTIPLE))
supported_lvl = MPI.Init_thread(required_level)
if supported_lvl < required_level:
print(f'MPI does support threading level {required_level}, running with {supported_lvl} instead', flush=True)
try:
# this solves sporadic mpi calls happening after finalize
import petsc4py
petsc4py.init()
except ImportError:
return
_init_mpi()
from pymor.core.config import config
from pymor.core.defaults import load_defaults_from_file
import pymor.version as _version
revstring = _version.get_versions()['version']
__version__ = str(revstring)
if 'PYMOR_DEFAULTS' in os.environ:
filename = os.environ['PYMOR_DEFAULTS']
if filename in ('', 'NONE'):
print('Not loading any pyMOR defaults from config file')
else:
for fn in filename.split(':'):
if not os.path.exists(fn):
raise IOError('Cannot load pyMOR defaults from file ' + fn)
print('Loading pyMOR defaults from file ' + fn + ' (set by PYMOR_DEFAULTS)')
load_defaults_from_file(fn)
else:
filename = os.path.join(os.getcwd(), 'pymor_defaults.py')
if os.path.exists(filename):
if os.stat(filename).st_uid != os.getuid():
raise IOError('Cannot load pyMOR defaults from config file ' + filename
+ ': not owned by user running Python interpreter')
print('Loading pyMOR defaults from file ' + filename)
load_defaults_from_file(filename)
from pymor.core.logger import set_log_levels, set_log_format
set_log_levels()
set_log_format()
from pymor.tools import mpi
if mpi.parallel and mpi.event_loop_settings()['auto_launch']:
if mpi.rank0:
import atexit
@atexit.register
def quit_event_loop():
if not mpi.finished:
mpi.quit()
else:
print(f'Rank {mpi.rank}: MPI parallel run detected. Launching event loop ...')
mpi.event_loop()
import sys
sys.exit(0)
| 35.556962 | 121 | 0.673549 |
794994c4e2b44e4ea842caa941d2387d73afffc5
| 5,402 |
py
|
Python
|
waldur_core/cost_tracking/tests/test_price_list_item.py
|
opennode/nodeconductor
|
d6c17a9592bb6c49c33567542eef8d099605a46a
|
[
"MIT"
] | 23 |
2015-01-15T13:29:53.000Z
|
2017-05-04T05:12:24.000Z
|
waldur_core/cost_tracking/tests/test_price_list_item.py
|
opennode/nodeconductor
|
d6c17a9592bb6c49c33567542eef8d099605a46a
|
[
"MIT"
] | null | null | null |
waldur_core/cost_tracking/tests/test_price_list_item.py
|
opennode/nodeconductor
|
d6c17a9592bb6c49c33567542eef8d099605a46a
|
[
"MIT"
] | 8 |
2015-01-11T18:51:47.000Z
|
2017-06-29T18:53:12.000Z
|
from ddt import ddt, data
from rest_framework import status
from waldur_core.structure.tests import factories as structure_factories
from . import factories
from .base_test import BaseCostTrackingTest
from .. import models
@ddt
class PriceListItemListTest(BaseCostTrackingTest):
def setUp(self):
super(PriceListItemListTest, self).setUp()
self.price_list_item = factories.PriceListItemFactory(service=self.service)
@data('staff', 'owner', 'manager')
def test_user_with_access_to_service_can_see_services_price_list(self, user):
self.client.force_authenticate(self.users[user])
response = self.client.get(factories.PriceListItemFactory.get_list_url())
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn(self.price_list_item.uuid.hex, [el['uuid'] for el in response.data])
@data('administrator')
def test_user_without_access_to_service_cannot_see_services_price_list(self, user):
self.service_project_link.delete()
self.client.force_authenticate(self.users[user])
response = self.client.get(factories.PriceListItemFactory.get_list_url())
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNotIn(self.price_list_item.uuid.hex, [el['uuid'] for el in response.data])
def test_price_list_can_be_filtered_by_service(self):
other_price_list_item = factories.PriceListItemFactory()
self.client.force_authenticate(self.users['staff'])
response = self.client.get(
factories.PriceListItemFactory.get_list_url(),
data={'service': structure_factories.TestServiceFactory.get_url(self.service)}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn(self.price_list_item.uuid.hex, [el['uuid'] for el in response.data])
self.assertNotIn(other_price_list_item.uuid.hex, [el['uuid'] for el in response.data])
@ddt
class PriceListItemCreateTest(BaseCostTrackingTest):
def setUp(self):
super(PriceListItemCreateTest, self).setUp()
self.default_price_list_item = factories.DefaultPriceListItemFactory()
self.valid_data = {
'service': structure_factories.TestServiceFactory.get_url(self.service),
'default_price_list_item': factories.DefaultPriceListItemFactory.get_url(self.default_price_list_item),
'value': 100,
'units': 'UAH'
}
@data('staff', 'owner')
def test_user_with_permissions_can_create_price_list_item(self, user):
self.client.force_authenticate(self.users[user])
response = self.client.post(factories.PriceListItemFactory.get_list_url(), data=self.valid_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(models.PriceListItem.objects.filter(
service=self.service,
value=self.valid_data['value'],
default_price_list_item=self.default_price_list_item).exists())
@data('manager', 'administrator')
def test_user_without_permissions_cannot_create_price_list_item(self, user):
self.client.force_authenticate(self.users[user])
response = self.client.post(factories.PriceListItemFactory.get_list_url(), data=self.valid_data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, str(response.data) + " " + user)
self.assertFalse(models.PriceListItem.objects.filter(
service=self.service,
value=self.valid_data['value'],
default_price_list_item=self.default_price_list_item).exists())
def test_if_price_list_item_already_exists_validation_error_is_raised(self):
self.client.force_authenticate(self.users['staff'])
response = self.client.post(factories.PriceListItemFactory.get_list_url(), data=self.valid_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.post(factories.PriceListItemFactory.get_list_url(), data=self.valid_data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@ddt
class PriceListItemUpdateTest(BaseCostTrackingTest):
def setUp(self):
super(PriceListItemUpdateTest, self).setUp()
self.price_list_item = factories.PriceListItemFactory(service=self.service)
@data('staff', 'owner')
def test_user_with_permissions_can_update_price_list_item(self, user):
self.client.force_authenticate(self.users[user])
data = {'value': 200}
response = self.client.patch(factories.PriceListItemFactory.get_url(self.price_list_item), data=data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
reread_price_list_item = models.PriceListItem.objects.get(id=self.price_list_item.id)
self.assertEqual(reread_price_list_item.value, data['value'])
# We do not execute this test for administrator, because he does not see price estimates at all
@data('manager')
def test_user_without_permissions_cannot_update_price_list_item(self, user):
self.client.force_authenticate(self.users[user])
data = {'items': [{'name': 'cpu', 'value': 1000, 'units': 'USD per CPU'}]}
response = self.client.patch(factories.PriceListItemFactory.get_url(self.price_list_item), data=data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
| 45.779661 | 115 | 0.735468 |
7949980584e76fae0bc7a8d70b5c5d9ea227d39a
| 18,477 |
py
|
Python
|
sdk/python/pulumi_azure_nextgen/network/v20181001/security_rule.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31 |
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/network/v20181001/security_rule.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231 |
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/network/v20181001/security_rule.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4 |
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['SecurityRule']
class SecurityRule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access: Optional[pulumi.Input[Union[str, 'SecurityRuleAccess']]] = None,
description: Optional[pulumi.Input[str]] = None,
destination_address_prefix: Optional[pulumi.Input[str]] = None,
destination_address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
destination_application_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationSecurityGroupArgs']]]]] = None,
destination_port_range: Optional[pulumi.Input[str]] = None,
destination_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
direction: Optional[pulumi.Input[Union[str, 'SecurityRuleDirection']]] = None,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_security_group_name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[Union[str, 'SecurityRuleProtocol']]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
security_rule_name: Optional[pulumi.Input[str]] = None,
source_address_prefix: Optional[pulumi.Input[str]] = None,
source_address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
source_application_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationSecurityGroupArgs']]]]] = None,
source_port_range: Optional[pulumi.Input[str]] = None,
source_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Network security rule.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Union[str, 'SecurityRuleAccess']] access: The network traffic is allowed or denied. Possible values are: 'Allow' and 'Deny'.
:param pulumi.Input[str] description: A description for this rule. Restricted to 140 chars.
:param pulumi.Input[str] destination_address_prefix: The destination address prefix. CIDR or destination IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
:param pulumi.Input[Sequence[pulumi.Input[str]]] destination_address_prefixes: The destination address prefixes. CIDR or destination IP ranges.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationSecurityGroupArgs']]]] destination_application_security_groups: The application security group specified as destination.
:param pulumi.Input[str] destination_port_range: The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
:param pulumi.Input[Sequence[pulumi.Input[str]]] destination_port_ranges: The destination port ranges.
:param pulumi.Input[Union[str, 'SecurityRuleDirection']] direction: The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are: 'Inbound' and 'Outbound'.
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[str] network_security_group_name: The name of the network security group.
:param pulumi.Input[int] priority: The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
:param pulumi.Input[Union[str, 'SecurityRuleProtocol']] protocol: Network protocol this rule applies to. Possible values are 'Tcp', 'Udp', and '*'.
:param pulumi.Input[str] provisioning_state: The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] security_rule_name: The name of the security rule.
:param pulumi.Input[str] source_address_prefix: The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from.
:param pulumi.Input[Sequence[pulumi.Input[str]]] source_address_prefixes: The CIDR or source IP ranges.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationSecurityGroupArgs']]]] source_application_security_groups: The application security group specified as source.
:param pulumi.Input[str] source_port_range: The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
:param pulumi.Input[Sequence[pulumi.Input[str]]] source_port_ranges: The source port ranges.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if access is None and not opts.urn:
raise TypeError("Missing required property 'access'")
__props__['access'] = access
__props__['description'] = description
__props__['destination_address_prefix'] = destination_address_prefix
__props__['destination_address_prefixes'] = destination_address_prefixes
__props__['destination_application_security_groups'] = destination_application_security_groups
__props__['destination_port_range'] = destination_port_range
__props__['destination_port_ranges'] = destination_port_ranges
if direction is None and not opts.urn:
raise TypeError("Missing required property 'direction'")
__props__['direction'] = direction
__props__['etag'] = etag
__props__['id'] = id
__props__['name'] = name
if network_security_group_name is None and not opts.urn:
raise TypeError("Missing required property 'network_security_group_name'")
__props__['network_security_group_name'] = network_security_group_name
__props__['priority'] = priority
if protocol is None and not opts.urn:
raise TypeError("Missing required property 'protocol'")
__props__['protocol'] = protocol
__props__['provisioning_state'] = provisioning_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['security_rule_name'] = security_rule_name
__props__['source_address_prefix'] = source_address_prefix
__props__['source_address_prefixes'] = source_address_prefixes
__props__['source_application_security_groups'] = source_application_security_groups
__props__['source_port_range'] = source_port_range
__props__['source_port_ranges'] = source_port_ranges
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/latest:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20150615:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20160330:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20160601:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20160901:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20161201:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20170301:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20170601:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20170801:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20170901:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20171001:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20171101:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180101:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180201:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180401:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180601:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180701:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180801:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20181101:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20181201:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190201:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190401:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190601:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190701:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190801:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190901:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20191101:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20191201:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200301:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200401:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200501:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200601:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200701:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200801:SecurityRule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SecurityRule, __self__).__init__(
'azure-nextgen:network/v20181001:SecurityRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SecurityRule':
"""
Get an existing SecurityRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return SecurityRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def access(self) -> pulumi.Output[str]:
"""
The network traffic is allowed or denied. Possible values are: 'Allow' and 'Deny'.
"""
return pulumi.get(self, "access")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description for this rule. Restricted to 140 chars.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="destinationAddressPrefix")
def destination_address_prefix(self) -> pulumi.Output[Optional[str]]:
"""
The destination address prefix. CIDR or destination IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
"""
return pulumi.get(self, "destination_address_prefix")
@property
@pulumi.getter(name="destinationAddressPrefixes")
def destination_address_prefixes(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The destination address prefixes. CIDR or destination IP ranges.
"""
return pulumi.get(self, "destination_address_prefixes")
@property
@pulumi.getter(name="destinationApplicationSecurityGroups")
def destination_application_security_groups(self) -> pulumi.Output[Optional[Sequence['outputs.ApplicationSecurityGroupResponse']]]:
"""
The application security group specified as destination.
"""
return pulumi.get(self, "destination_application_security_groups")
@property
@pulumi.getter(name="destinationPortRange")
def destination_port_range(self) -> pulumi.Output[Optional[str]]:
"""
The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "destination_port_range")
@property
@pulumi.getter(name="destinationPortRanges")
def destination_port_ranges(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The destination port ranges.
"""
return pulumi.get(self, "destination_port_ranges")
@property
@pulumi.getter
def direction(self) -> pulumi.Output[str]:
"""
The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are: 'Inbound' and 'Outbound'.
"""
return pulumi.get(self, "direction")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def priority(self) -> pulumi.Output[Optional[int]]:
"""
The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[str]:
"""
Network protocol this rule applies to. Possible values are 'Tcp', 'Udp', and '*'.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="sourceAddressPrefix")
def source_address_prefix(self) -> pulumi.Output[Optional[str]]:
"""
The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from.
"""
return pulumi.get(self, "source_address_prefix")
@property
@pulumi.getter(name="sourceAddressPrefixes")
def source_address_prefixes(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The CIDR or source IP ranges.
"""
return pulumi.get(self, "source_address_prefixes")
@property
@pulumi.getter(name="sourceApplicationSecurityGroups")
def source_application_security_groups(self) -> pulumi.Output[Optional[Sequence['outputs.ApplicationSecurityGroupResponse']]]:
"""
The application security group specified as source.
"""
return pulumi.get(self, "source_application_security_groups")
@property
@pulumi.getter(name="sourcePortRange")
def source_port_range(self) -> pulumi.Output[Optional[str]]:
"""
The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "source_port_range")
@property
@pulumi.getter(name="sourcePortRanges")
def source_port_ranges(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The source port ranges.
"""
return pulumi.get(self, "source_port_ranges")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 61.795987 | 2,495 | 0.697083 |
79499a6ef6b66fd7630e53f9e9779b674794daae
| 1,477 |
py
|
Python
|
examples/simple_identification.py
|
murad9001/bluesquall.recurvise_least_squares
|
83510c575a446ae4be4d031fb16cc42a5724c12f
|
[
"MIT"
] | 8 |
2015-05-27T08:00:27.000Z
|
2021-06-02T04:07:43.000Z
|
examples/simple_identification.py
|
murad9001/bluesquall.recurvise_least_squares
|
83510c575a446ae4be4d031fb16cc42a5724c12f
|
[
"MIT"
] | null | null | null |
examples/simple_identification.py
|
murad9001/bluesquall.recurvise_least_squares
|
83510c575a446ae4be4d031fb16cc42a5724c12f
|
[
"MIT"
] | 4 |
2017-07-19T10:59:55.000Z
|
2021-11-24T20:47:08.000Z
|
#!/usr/bin/env python
"""
Simple RLS identification example
=================================
"""
import numpy as np
import rlspy
def generate_random_truth_data(order = 3, sigma = 1):
return np.random.normal(0, sigma, [order, 1])
def generate_random_coupling_matrix(shape = [4, 3]):
return np.random.normal(0, 1, shape)
def generate_noisy_measurements(A, x, sigma):
return np.dot(A, x) + np.random.normal(0, sigma)
def example(order = 3, morder = 4, N = 20):
x = generate_random_truth_data(order, 1)
A = [generate_random_coupling_matrix([morder, order]) for i in xrange(N)]
sm = 1e-2 * np.ones(morder).reshape(-1, 1)
V = np.diag(sm.ravel()**2)
b = [generate_noisy_measurements(Ai, x, sm) for Ai in A]
x0 = np.ones(order).reshape(-1, 1)
P0 = np.identity(order)
rlsi = rlspy.data_matrix.Estimator(x0, P0)
# preallocate some arrays to track the evolution of the estimate
xest = np.empty([order, N + 1])
Pest = np.empty([order, order, N + 1])
xest[:,0] = x0.ravel()
Pest[:,:,0] = P0
for i, (Ai, bi) in enumerate(zip(A, b)):
rlsi.update(Ai, bi, V)
xest[:, i + 1] = rlsi.x.ravel()
Pest[:, :, i + 1] = rlsi.P
xerr = x - xest
return xest, Pest, xerr
if __name__ == '__main__':
import matplotlib.pyplot as plt
x, P, r = example()
plt.semilogy(np.abs(r.T))
plt.grid(True)
plt.ylabel('abs(estimation error)')
plt.xlabel('iteration')
plt.show()
| 25.465517 | 77 | 0.604604 |
79499b28541ca712e51db51139ab1bb60cc2c643
| 109,645 |
py
|
Python
|
django/db/models/sql/query.py
|
Manny27nyc/django
|
28f66b2783e52d5be51887c31cd2be907b820e11
|
[
"BSD-3-Clause",
"0BSD"
] | 1 |
2021-11-07T12:42:43.000Z
|
2021-11-07T12:42:43.000Z
|
django/db/models/sql/query.py
|
Manny27nyc/django
|
28f66b2783e52d5be51887c31cd2be907b820e11
|
[
"BSD-3-Clause",
"0BSD"
] | 1 |
2016-02-24T19:56:50.000Z
|
2016-04-03T11:29:06.000Z
|
django/db/models/sql/query.py
|
Manny27nyc/django
|
28f66b2783e52d5be51887c31cd2be907b820e11
|
[
"BSD-3-Clause",
"0BSD"
] | null | null | null |
"""
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
import copy
import difflib
import functools
import sys
from collections import Counter, namedtuple
from collections.abc import Iterator, Mapping
from itertools import chain, count, product
from string import ascii_uppercase
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections
from django.db.models.aggregates import Count
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import (
BaseExpression, Col, Exists, F, OuterRef, Ref, ResolvedOuterRef,
)
from django.db.models.fields import Field
from django.db.models.fields.related_lookups import MultiColSource
from django.db.models.lookups import Lookup
from django.db.models.query_utils import (
Q, check_rel_lookup_compatibility, refs_expression,
)
from django.db.models.sql.constants import INNER, LOUTER, ORDER_DIR, SINGLE
from django.db.models.sql.datastructures import (
BaseTable, Empty, Join, MultiJoin,
)
from django.db.models.sql.where import (
AND, OR, ExtraWhere, NothingNode, WhereNode,
)
from django.utils.functional import cached_property
from django.utils.tree import Node
__all__ = ['Query', 'RawQuery']
def get_field_names_from_opts(opts):
return set(chain.from_iterable(
(f.name, f.attname) if f.concrete else (f.name,)
for f in opts.get_fields()
))
def get_children_from_q(q):
for child in q.children:
if isinstance(child, Node):
yield from get_children_from_q(child)
else:
yield child
JoinInfo = namedtuple(
'JoinInfo',
('final_field', 'targets', 'opts', 'joins', 'path', 'transform_function')
)
class RawQuery:
"""A single raw SQL query."""
def __init__(self, sql, using, params=()):
self.params = params
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.annotation_select = {}
def chain(self, using):
return self.clone(using)
def clone(self, using):
return RawQuery(self.sql, using, params=self.params)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.identifier_converter
return [converter(column_meta[0])
for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
@property
def params_type(self):
if self.params is None:
return None
return dict if isinstance(self.params, Mapping) else tuple
def __str__(self):
if self.params_type is None:
return self.sql
return self.sql % self.params_type(self.params)
def _execute_query(self):
connection = connections[self.using]
# Adapt parameters to the database, as much as possible considering
# that the target type isn't known. See #17755.
params_type = self.params_type
adapter = connection.ops.adapt_unknown_value
if params_type is tuple:
params = tuple(adapter(val) for val in self.params)
elif params_type is dict:
params = {key: adapter(val) for key, val in self.params.items()}
elif params_type is None:
params = None
else:
raise RuntimeError("Unexpected params type: %s" % params_type)
self.cursor = connection.cursor()
self.cursor.execute(self.sql, params)
ExplainInfo = namedtuple('ExplainInfo', ('format', 'options'))
class Query(BaseExpression):
"""A single SQL query."""
alias_prefix = 'T'
empty_result_set_value = None
subq_aliases = frozenset([alias_prefix])
compiler = 'SQLCompiler'
def __init__(self, model, alias_cols=True):
self.model = model
self.alias_refcount = {}
# alias_map is the most important data structure regarding joins.
# It's used for recording which joins exist in the query and what
# types they are. The key is the alias of the joined table (possibly
# the table name) and the value is a Join-like object (see
# sql.datastructures.Join for more information).
self.alias_map = {}
# Whether to provide alias to columns during reference resolving.
self.alias_cols = alias_cols
# Sometimes the query contains references to aliases in outer queries (as
# a result of split_exclude). Correct alias quoting needs to know these
# aliases too.
# Map external tables to whether they are aliased.
self.external_aliases = {}
self.table_map = {} # Maps table names to list of aliases.
self.default_cols = True
self.default_ordering = True
self.standard_ordering = True
self.used_aliases = set()
self.filter_is_sticky = False
self.subquery = False
# SQL-related attributes
# Select and related select clauses are expressions to use in the
# SELECT clause of the query.
# The select is used for cases where we want to set up the select
# clause to contain other than default fields (values(), subqueries...)
# Note that annotations go to annotations dictionary.
self.select = ()
self.where = WhereNode()
# The group_by attribute can have one of the following forms:
# - None: no group by at all in the query
# - A tuple of expressions: group by (at least) those expressions.
# String refs are also allowed for now.
# - True: group by all select fields of the model
# See compiler.get_group_by() for details.
self.group_by = None
self.order_by = ()
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.distinct = False
self.distinct_fields = ()
self.select_for_update = False
self.select_for_update_nowait = False
self.select_for_update_skip_locked = False
self.select_for_update_of = ()
self.select_for_no_key_update = False
self.select_related = False
# Arbitrary limit for select_related to prevents infinite recursion.
self.max_depth = 5
# Holds the selects defined by a call to values() or values_list()
# excluding annotation_select and extra_select.
self.values_select = ()
# SQL annotation-related attributes
self.annotations = {} # Maps alias -> Annotation Expression
self.annotation_select_mask = None
self._annotation_select_cache = None
# Set combination attributes
self.combinator = None
self.combinator_all = False
self.combined_queries = ()
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
self.extra = {} # Maps col_alias -> (col_sql, params).
self.extra_select_mask = None
self._extra_select_cache = None
self.extra_tables = ()
self.extra_order_by = ()
# A tuple that is a set of model field names and either True, if these
# are the fields to defer, or False if these are the only fields to
# load.
self.deferred_loading = (frozenset(), True)
self._filtered_relations = {}
self.explain_info = None
@property
def output_field(self):
if len(self.select) == 1:
select = self.select[0]
return getattr(select, 'target', None) or select.field
elif len(self.annotation_select) == 1:
return next(iter(self.annotation_select.values())).output_field
@property
def has_select_fields(self):
return bool(self.select or self.annotation_select_mask or self.extra_select_mask)
@cached_property
def base_table(self):
for alias in self.alias_map:
return alias
def __str__(self):
"""
Return the query as a string of SQL with the parameter values
substituted in (use sql_with_params() to see the unsubstituted string).
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.sql_with_params()
return sql % params
def sql_with_params(self):
"""
Return the query as an SQL string and the parameters that will be
substituted into the query.
"""
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
def __deepcopy__(self, memo):
"""Limit the amount of work when a Query is deepcopied."""
result = self.clone()
memo[id(self)] = result
return result
def get_compiler(self, using=None, connection=None, elide_empty=True):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
return connection.ops.compiler(self.compiler)(self, connection, using, elide_empty)
def get_meta(self):
"""
Return the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
return self.model._meta
def clone(self):
"""
Return a copy of the current Query. A lightweight alternative to
to deepcopy().
"""
obj = Empty()
obj.__class__ = self.__class__
# Copy references to everything.
obj.__dict__ = self.__dict__.copy()
# Clone attributes that can't use shallow copy.
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.external_aliases = self.external_aliases.copy()
obj.table_map = self.table_map.copy()
obj.where = self.where.clone()
obj.annotations = self.annotations.copy()
if self.annotation_select_mask is not None:
obj.annotation_select_mask = self.annotation_select_mask.copy()
if self.combined_queries:
obj.combined_queries = tuple([
query.clone() for query in self.combined_queries
])
# _annotation_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both annotations and
# _annotation_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._annotation_select_cache = None
obj.extra = self.extra.copy()
if self.extra_select_mask is not None:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is not None:
obj._extra_select_cache = self._extra_select_cache.copy()
if self.select_related is not False:
# Use deepcopy because select_related stores fields in nested
# dicts.
obj.select_related = copy.deepcopy(obj.select_related)
if 'subq_aliases' in self.__dict__:
obj.subq_aliases = self.subq_aliases.copy()
obj.used_aliases = self.used_aliases.copy()
obj._filtered_relations = self._filtered_relations.copy()
# Clear the cached_property
try:
del obj.base_table
except AttributeError:
pass
return obj
def chain(self, klass=None):
"""
Return a copy of the current Query that's ready for another operation.
The klass argument changes the type of the Query, e.g. UpdateQuery.
"""
obj = self.clone()
if klass and obj.__class__ != klass:
obj.__class__ = klass
if not obj.filter_is_sticky:
obj.used_aliases = set()
obj.filter_is_sticky = False
if hasattr(obj, '_setup_query'):
obj._setup_query()
return obj
def relabeled_clone(self, change_map):
clone = self.clone()
clone.change_aliases(change_map)
return clone
def _get_col(self, target, field, alias):
if not self.alias_cols:
alias = None
return target.get_col(alias, field)
def rewrite_cols(self, annotation, col_cnt):
# We must make sure the inner query has the referred columns in it.
# If we are aggregating over an annotation, then Django uses Ref()
# instances to note this. However, if we are annotating over a column
# of a related model, then it might be that column isn't part of the
# SELECT clause of the inner query, and we must manually make sure
# the column is selected. An example case is:
# .aggregate(Sum('author__awards'))
# Resolving this expression results in a join to author, but there
# is no guarantee the awards column of author is in the select clause
# of the query. Thus we must manually add the column to the inner
# query.
orig_exprs = annotation.get_source_expressions()
new_exprs = []
for expr in orig_exprs:
# FIXME: These conditions are fairly arbitrary. Identify a better
# method of having expressions decide which code path they should
# take.
if isinstance(expr, Ref):
# Its already a Ref to subquery (see resolve_ref() for
# details)
new_exprs.append(expr)
elif isinstance(expr, (WhereNode, Lookup)):
# Decompose the subexpressions further. The code here is
# copied from the else clause, but this condition must appear
# before the contains_aggregate/is_summary condition below.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
else:
# Reuse aliases of expressions already selected in subquery.
for col_alias, selected_annotation in self.annotation_select.items():
if selected_annotation is expr:
new_expr = Ref(col_alias, expr)
break
else:
# An expression that is not selected the subquery.
if isinstance(expr, Col) or (expr.contains_aggregate and not expr.is_summary):
# Reference column or another aggregate. Select it
# under a non-conflicting alias.
col_cnt += 1
col_alias = '__col%d' % col_cnt
self.annotations[col_alias] = expr
self.append_annotation_mask([col_alias])
new_expr = Ref(col_alias, expr)
else:
# Some other expression not referencing database values
# directly. Its subexpression might contain Cols.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
annotation.set_source_expressions(new_exprs)
return annotation, col_cnt
def get_aggregation(self, using, added_aggregate_names):
"""
Return the dictionary with the values of the existing aggregations.
"""
if not self.annotation_select:
return {}
existing_annotations = [
annotation for alias, annotation
in self.annotations.items()
if alias not in added_aggregate_names
]
# Decide if we need to use a subquery.
#
# Existing annotations would cause incorrect results as get_aggregation()
# must produce just one result and thus must not use GROUP BY. But we
# aren't smart enough to remove the existing annotations from the
# query, so those would force us to use GROUP BY.
#
# If the query has limit or distinct, or uses set operations, then
# those operations must be done in a subquery so that the query
# aggregates on the limit and/or distinct results instead of applying
# the distinct and limit after the aggregation.
if (isinstance(self.group_by, tuple) or self.is_sliced or existing_annotations or
self.distinct or self.combinator):
from django.db.models.sql.subqueries import AggregateQuery
inner_query = self.clone()
inner_query.subquery = True
outer_query = AggregateQuery(self.model, inner_query)
inner_query.select_for_update = False
inner_query.select_related = False
inner_query.set_annotation_mask(self.annotation_select)
# Queries with distinct_fields need ordering and when a limit is
# applied we must take the slice from the ordered query. Otherwise
# no need for ordering.
inner_query.clear_ordering(force=False)
if not inner_query.distinct:
# If the inner query uses default select and it has some
# aggregate annotations, then we must make sure the inner
# query is grouped by the main model's primary key. However,
# clearing the select clause can alter results if distinct is
# used.
has_existing_aggregate_annotations = any(
annotation for annotation in existing_annotations
if getattr(annotation, 'contains_aggregate', True)
)
if inner_query.default_cols and has_existing_aggregate_annotations:
inner_query.group_by = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),)
inner_query.default_cols = False
relabels = {t: 'subquery' for t in inner_query.alias_map}
relabels[None] = 'subquery'
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
col_cnt = 0
for alias, expression in list(inner_query.annotation_select.items()):
annotation_select_mask = inner_query.annotation_select_mask
if expression.is_summary:
expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt)
outer_query.annotations[alias] = expression.relabeled_clone(relabels)
del inner_query.annotations[alias]
annotation_select_mask.remove(alias)
# Make sure the annotation_select wont use cached results.
inner_query.set_annotation_mask(inner_query.annotation_select_mask)
if inner_query.select == () and not inner_query.default_cols and not inner_query.annotation_select_mask:
# In case of Model.objects[0:3].count(), there would be no
# field selected in the inner query, yet we must use a subquery.
# So, make sure at least one field is selected.
inner_query.select = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),)
else:
outer_query = self
self.select = ()
self.default_cols = False
self.extra = {}
empty_set_result = [
expression.empty_result_set_value
for expression in outer_query.annotation_select.values()
]
elide_empty = not any(result is NotImplemented for result in empty_set_result)
outer_query.clear_ordering(force=True)
outer_query.clear_limits()
outer_query.select_for_update = False
outer_query.select_related = False
compiler = outer_query.get_compiler(using, elide_empty=elide_empty)
result = compiler.execute_sql(SINGLE)
if result is None:
result = empty_set_result
converters = compiler.get_converters(outer_query.annotation_select.values())
result = next(compiler.apply_converters((result,), converters))
return dict(zip(outer_query.annotation_select, result))
def get_count(self, using):
"""
Perform a COUNT() query using the current filter constraints.
"""
obj = self.clone()
obj.add_annotation(Count('*'), alias='__count', is_summary=True)
return obj.get_aggregation(using, ['__count'])['__count']
def has_filters(self):
return self.where
def exists(self, using, limit=True):
q = self.clone()
if not q.distinct:
if q.group_by is True:
q.add_fields((f.attname for f in self.model._meta.concrete_fields), False)
# Disable GROUP BY aliases to avoid orphaning references to the
# SELECT clause which is about to be cleared.
q.set_group_by(allow_aliases=False)
q.clear_select_clause()
if q.combined_queries and q.combinator == 'union':
limit_combined = connections[using].features.supports_slicing_ordering_in_compound
q.combined_queries = tuple(
combined_query.exists(using, limit=limit_combined)
for combined_query in q.combined_queries
)
q.clear_ordering(force=True)
if limit:
q.set_limits(high=1)
q.add_extra({'a': 1}, None, None, None, None, None)
q.set_extra_mask(['a'])
return q
def has_results(self, using):
q = self.exists(using)
compiler = q.get_compiler(using=using)
return compiler.has_results()
def explain(self, using, format=None, **options):
q = self.clone()
q.explain_info = ExplainInfo(format, options)
compiler = q.get_compiler(using=using)
return '\n'.join(compiler.explain_query())
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
if self.model != rhs.model:
raise TypeError('Cannot combine queries on two different base models.')
if self.is_sliced:
raise TypeError('Cannot combine queries once a slice has been taken.')
if self.distinct != rhs.distinct:
raise TypeError('Cannot combine a unique query with a non-unique query.')
if self.distinct_fields != rhs.distinct_fields:
raise TypeError('Cannot combine queries with different distinct fields.')
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
conjunction = (connector == AND)
# Determine which existing joins can be reused. When combining the
# query with AND we must recreate all joins for m2m filters. When
# combining with OR we can reuse joins. The reason is that in AND
# case a single row can't fulfill a condition like:
# revrel__col=1 & revrel__col=2
# But, there might be two different related rows matching this
# condition. In OR case a single True is enough, so single row is
# enough, too.
#
# Note that we will be creating duplicate joins for non-m2m joins in
# the AND case. The results will be correct but this creates too many
# joins. This is something that could be fixed later on.
reuse = set() if conjunction else set(self.alias_map)
# Base table must be present in the query - this is the same
# table on both sides.
self.get_initial_alias()
joinpromoter = JoinPromoter(connector, 2, False)
joinpromoter.add_votes(
j for j in self.alias_map if self.alias_map[j].join_type == INNER)
rhs_votes = set()
# Now, add the joins from rhs query into the new query (skipping base
# table).
rhs_tables = list(rhs.alias_map)[1:]
for alias in rhs_tables:
join = rhs.alias_map[alias]
# If the left side of the join was already relabeled, use the
# updated alias.
join = join.relabeled_clone(change_map)
new_alias = self.join(join, reuse=reuse)
if join.join_type == INNER:
rhs_votes.add(new_alias)
# We can't reuse the same join again in the query. If we have two
# distinct joins for the same connection in rhs query, then the
# combined query must have two joins, too.
reuse.discard(new_alias)
if alias != new_alias:
change_map[alias] = new_alias
if not rhs.alias_refcount[alias]:
# The alias was unused in the rhs query. Unref it so that it
# will be unused in the new query, too. We have to add and
# unref the alias so that join promotion has information of
# the join type for the unused alias.
self.unref_alias(new_alias)
joinpromoter.add_votes(rhs_votes)
joinpromoter.update_join_types(self)
# Combine subqueries aliases to ensure aliases relabelling properly
# handle subqueries when combining where and select clauses.
self.subq_aliases |= rhs.subq_aliases
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
w = rhs.where.clone()
w.relabel_aliases(change_map)
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
if rhs.select:
self.set_select([col.relabeled_clone(change_map) for col in rhs.select])
else:
self.select = ()
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self.extra and rhs.extra:
raise ValueError("When merging querysets using 'or', you cannot have extra(select=...) on both sides.")
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by or self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def deferred_to_data(self, target, callback):
"""
Convert the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialized on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
The "target" parameter is the instance that is populated (in place).
The "callback" is a function that is called whenever a (model, field)
pair need to be added to "target". It accepts three parameters:
"target", and the model and list of fields being added for that model.
"""
field_names, defer = self.deferred_loading
if not field_names:
return
orig_opts = self.get_meta()
seen = {}
must_include = {orig_opts.concrete_model: {orig_opts.pk}}
for field_name in field_names:
parts = field_name.split(LOOKUP_SEP)
cur_model = self.model._meta.concrete_model
opts = orig_opts
for name in parts[:-1]:
old_model = cur_model
if name in self._filtered_relations:
name = self._filtered_relations[name].relation_name
source = opts.get_field(name)
if is_reverse_o2o(source):
cur_model = source.related_model
else:
cur_model = source.remote_field.model
opts = cur_model._meta
# Even if we're "just passing through" this model, we must add
# both the current model's pk and the related reference field
# (if it's not a reverse relation) to the things we select.
if not is_reverse_o2o(source):
must_include[old_model].add(source)
add_to_dict(must_include, cur_model, opts.pk)
field = opts.get_field(parts[-1])
is_reverse_object = field.auto_created and not field.concrete
model = field.related_model if is_reverse_object else field.model
model = model._meta.concrete_model
if model == opts.model:
model = cur_model
if not is_reverse_o2o(field):
add_to_dict(seen, model, field)
if defer:
# We need to load all fields for each model, except those that
# appear in "seen" (for all models that appear in "seen"). The only
# slight complexity here is handling fields that exist on parent
# models.
workset = {}
for model, values in seen.items():
for field in model._meta.local_fields:
if field not in values:
m = field.model._meta.concrete_model
add_to_dict(workset, m, field)
for model, values in must_include.items():
# If we haven't included a model in workset, we don't add the
# corresponding must_include fields for that model, since an
# empty set means "include all fields". That's why there's no
# "else" branch here.
if model in workset:
workset[model].update(values)
for model, values in workset.items():
callback(target, model, values)
else:
for model, values in must_include.items():
if model in seen:
seen[model].update(values)
else:
# As we've passed through this model, but not explicitly
# included any fields, we have to make sure it's mentioned
# so that only the "must include" fields are pulled in.
seen[model] = values
# Now ensure that every model in the inheritance chain is mentioned
# in the parent list. Again, it must be mentioned to ensure that
# only "must include" fields are pulled in.
for model in orig_opts.get_parent_list():
seen.setdefault(model, set())
for model, values in seen.items():
callback(target, model, values)
def table_alias(self, table_name, create=False, filtered_relation=None):
"""
Return a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
alias_list = self.table_map.get(table_name)
if not create and alias_list:
alias = alias_list[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if alias_list:
alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)
alias_list.append(alias)
else:
# The first occurrence of a table uses the table name directly.
alias = filtered_relation.alias if filtered_relation is not None else table_name
self.table_map[table_name] = [alias]
self.alias_refcount[alias] = 1
return alias, True
def ref_alias(self, alias):
"""Increases the reference count for this alias."""
self.alias_refcount[alias] += 1
def unref_alias(self, alias, amount=1):
"""Decreases the reference count for this alias."""
self.alias_refcount[alias] -= amount
def promote_joins(self, aliases):
"""
Promote recursively the join type of given aliases and its children to
an outer join. If 'unconditional' is False, only promote the join if
it is nullable or the parent join is an outer join.
The children promotion is done to avoid join chains that contain a LOUTER
b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted,
then we must also promote b->c automatically, or otherwise the promotion
of a->b doesn't actually change anything in the query results.
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type is None:
# This is the base table (first FROM entry) - this table
# isn't really joined at all in the query, so we should not
# alter its join type.
continue
# Only the first alias (skipped above) should have None join_type
assert self.alias_map[alias].join_type is not None
parent_alias = self.alias_map[alias].parent_alias
parent_louter = parent_alias and self.alias_map[parent_alias].join_type == LOUTER
already_louter = self.alias_map[alias].join_type == LOUTER
if ((self.alias_map[alias].nullable or parent_louter) and
not already_louter):
self.alias_map[alias] = self.alias_map[alias].promote()
# Join type of 'alias' changed, so re-examine all aliases that
# refer to this one.
aliases.extend(
join for join in self.alias_map
if self.alias_map[join].parent_alias == alias and join not in aliases
)
def demote_joins(self, aliases):
"""
Change join type from LOUTER to INNER for all joins in aliases.
Similarly to promote_joins(), this method must ensure no join chains
containing first an outer, then an inner join are generated. If we
are demoting b->c join in chain a LOUTER b LOUTER c then we must
demote a->b automatically, or otherwise the demotion of b->c doesn't
actually change anything in the query results. .
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type == LOUTER:
self.alias_map[alias] = self.alias_map[alias].demote()
parent_alias = self.alias_map[alias].parent_alias
if self.alias_map[parent_alias].join_type == INNER:
aliases.append(parent_alias)
def reset_refcounts(self, to_counts):
"""
Reset reference counts for aliases so that they match the value passed
in `to_counts`.
"""
for alias, cur_refcount in self.alias_refcount.copy().items():
unref_amount = cur_refcount - to_counts.get(alias, 0)
self.unref_alias(alias, unref_amount)
def change_aliases(self, change_map):
"""
Change the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
assert set(change_map).isdisjoint(change_map.values())
# 1. Update references in "select" (normal columns plus aliases),
# "group by" and "where".
self.where.relabel_aliases(change_map)
if isinstance(self.group_by, tuple):
self.group_by = tuple([col.relabeled_clone(change_map) for col in self.group_by])
self.select = tuple([col.relabeled_clone(change_map) for col in self.select])
self.annotations = self.annotations and {
key: col.relabeled_clone(change_map) for key, col in self.annotations.items()
}
# 2. Rename the alias in the internal table/alias datastructures.
for old_alias, new_alias in change_map.items():
if old_alias not in self.alias_map:
continue
alias_data = self.alias_map[old_alias].relabeled_clone(change_map)
self.alias_map[new_alias] = alias_data
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data.table_name]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
self.external_aliases = {
# Table is aliased or it's being changed and thus is aliased.
change_map.get(alias, alias): (aliased or alias in change_map)
for alias, aliased in self.external_aliases.items()
}
def bump_prefix(self, outer_query):
"""
Change the alias prefix to the next letter in the alphabet in a way
that the outer query's aliases and this query's aliases will not
conflict. Even tables that previously had no alias will get an alias
after this call.
"""
def prefix_gen():
"""
Generate a sequence of characters in alphabetical order:
-> 'A', 'B', 'C', ...
When the alphabet is finished, the sequence will continue with the
Cartesian product:
-> 'AA', 'AB', 'AC', ...
"""
alphabet = ascii_uppercase
prefix = chr(ord(self.alias_prefix) + 1)
yield prefix
for n in count(1):
seq = alphabet[alphabet.index(prefix):] if prefix else alphabet
for s in product(seq, repeat=n):
yield ''.join(s)
prefix = None
if self.alias_prefix != outer_query.alias_prefix:
# No clashes between self and outer query should be possible.
return
# Explicitly avoid infinite loop. The constant divider is based on how
# much depth recursive subquery references add to the stack. This value
# might need to be adjusted when adding or removing function calls from
# the code path in charge of performing these operations.
local_recursion_limit = sys.getrecursionlimit() // 16
for pos, prefix in enumerate(prefix_gen()):
if prefix not in self.subq_aliases:
self.alias_prefix = prefix
break
if pos > local_recursion_limit:
raise RecursionError(
'Maximum recursion depth exceeded: too many subqueries.'
)
self.subq_aliases = self.subq_aliases.union([self.alias_prefix])
outer_query.subq_aliases = outer_query.subq_aliases.union(self.subq_aliases)
self.change_aliases({
alias: '%s%d' % (self.alias_prefix, pos)
for pos, alias in enumerate(self.alias_map)
})
def get_initial_alias(self):
"""
Return the first alias for this query, after increasing its reference
count.
"""
if self.alias_map:
alias = self.base_table
self.ref_alias(alias)
else:
alias = self.join(BaseTable(self.get_meta().db_table, None))
return alias
def count_active_tables(self):
"""
Return the number of tables in this query with a non-zero reference
count. After execution, the reference counts are zeroed, so tables
added in compiler will not be seen by this method.
"""
return len([1 for count in self.alias_refcount.values() if count])
def join(self, join, reuse=None):
"""
Return an alias for the 'join', either reusing an existing alias for
that join or creating a new one. 'join' is either a
sql.datastructures.BaseTable or Join.
The 'reuse' parameter can be either None which means all joins are
reusable, or it can be a set containing the aliases that can be reused.
A join is always created as LOUTER if the lhs alias is LOUTER to make
sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new
joins are created as LOUTER if the join is nullable.
"""
reuse_aliases = [
a for a, j in self.alias_map.items()
if (reuse is None or a in reuse) and j.equals(join)
]
if reuse_aliases:
if join.table_alias in reuse_aliases:
reuse_alias = join.table_alias
else:
# Reuse the most recent alias of the joined table
# (a many-to-many relation may be joined multiple times).
reuse_alias = reuse_aliases[-1]
self.ref_alias(reuse_alias)
return reuse_alias
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(join.table_name, create=True, filtered_relation=join.filtered_relation)
if join.join_type:
if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable:
join_type = LOUTER
else:
join_type = INNER
join.join_type = join_type
join.table_alias = alias
self.alias_map[alias] = join
return alias
def join_parent_model(self, opts, model, alias, seen):
"""
Make sure the given 'model' is joined in the query. If 'model' isn't
a parent of 'opts' or if it is None this method is a no-op.
The 'alias' is the root alias for starting the join, 'seen' is a dict
of model -> alias of existing joins. It must also contain a mapping
of None -> some alias. This will be returned in the no-op case.
"""
if model in seen:
return seen[model]
chain = opts.get_base_chain(model)
if not chain:
return alias
curr_opts = opts
for int_model in chain:
if int_model in seen:
curr_opts = int_model._meta
alias = seen[int_model]
continue
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not curr_opts.parents[int_model]:
curr_opts = int_model._meta
continue
link_field = curr_opts.get_ancestor_link(int_model)
join_info = self.setup_joins([link_field.name], curr_opts, alias)
curr_opts = int_model._meta
alias = seen[int_model] = join_info.joins[-1]
return alias or seen[None]
def add_annotation(self, annotation, alias, is_summary=False, select=True):
"""Add a single annotation expression to the Query."""
annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None,
summarize=is_summary)
if select:
self.append_annotation_mask([alias])
else:
self.set_annotation_mask(set(self.annotation_select).difference({alias}))
self.annotations[alias] = annotation
def resolve_expression(self, query, *args, **kwargs):
clone = self.clone()
# Subqueries need to use a different set of aliases than the outer query.
clone.bump_prefix(query)
clone.subquery = True
clone.where.resolve_expression(query, *args, **kwargs)
for key, value in clone.annotations.items():
resolved = value.resolve_expression(query, *args, **kwargs)
if hasattr(resolved, 'external_aliases'):
resolved.external_aliases.update(clone.external_aliases)
clone.annotations[key] = resolved
# Outer query's aliases are considered external.
for alias, table in query.alias_map.items():
clone.external_aliases[alias] = (
(isinstance(table, Join) and table.join_field.related_model._meta.db_table != alias) or
(isinstance(table, BaseTable) and table.table_name != table.table_alias)
)
return clone
def get_external_cols(self):
exprs = chain(self.annotations.values(), self.where.children)
return [
col for col in self._gen_cols(exprs, include_external=True)
if col.alias in self.external_aliases
]
def as_sql(self, compiler, connection):
# Some backends (e.g. Oracle) raise an error when a subquery contains
# unnecessary ORDER BY clause.
if (
self.subquery and
not connection.features.ignores_unnecessary_order_by_in_subqueries
):
self.clear_ordering(force=False)
sql, params = self.get_compiler(connection=connection).as_sql()
if self.subquery:
sql = '(%s)' % sql
return sql, params
def resolve_lookup_value(self, value, can_reuse, allow_joins):
if hasattr(value, 'resolve_expression'):
value = value.resolve_expression(
self, reuse=can_reuse, allow_joins=allow_joins,
)
elif isinstance(value, (list, tuple)):
# The items of the iterable may be expressions and therefore need
# to be resolved independently.
values = (
self.resolve_lookup_value(sub_value, can_reuse, allow_joins)
for sub_value in value
)
type_ = type(value)
if hasattr(type_, '_make'): # namedtuple
return type_(*values)
return type_(values)
return value
def solve_lookup_type(self, lookup):
"""
Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains').
"""
lookup_splitted = lookup.split(LOOKUP_SEP)
if self.annotations:
expression, expression_lookups = refs_expression(lookup_splitted, self.annotations)
if expression:
return expression_lookups, (), expression
_, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())
field_parts = lookup_splitted[0:len(lookup_splitted) - len(lookup_parts)]
if len(lookup_parts) > 1 and not field_parts:
raise FieldError(
'Invalid lookup "%s" for model %s".' %
(lookup, self.get_meta().model.__name__)
)
return lookup_parts, field_parts, False
def check_query_object_type(self, value, opts, field):
"""
Check whether the object passed while querying is of the correct type.
If not, raise a ValueError specifying the wrong object.
"""
if hasattr(value, '_meta'):
if not check_rel_lookup_compatibility(value._meta.model, opts, field):
raise ValueError(
'Cannot query "%s": Must be "%s" instance.' %
(value, opts.object_name))
def check_related_objects(self, field, value, opts):
"""Check the type of object passed to query relations."""
if field.is_relation:
# Check that the field and the queryset use the same model in a
# query like .filter(author=Author.objects.all()). For example, the
# opts would be Author's (from the author field) and value.model
# would be Author.objects.all() queryset's .model (Author also).
# The field is the related field on the lhs side.
if (isinstance(value, Query) and not value.has_select_fields and
not check_rel_lookup_compatibility(value.model, opts, field)):
raise ValueError(
'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' %
(value.model._meta.object_name, opts.object_name)
)
elif hasattr(value, '_meta'):
self.check_query_object_type(value, opts, field)
elif hasattr(value, '__iter__'):
for v in value:
self.check_query_object_type(v, opts, field)
def check_filterable(self, expression):
"""Raise an error if expression cannot be used in a WHERE clause."""
if (
hasattr(expression, 'resolve_expression') and
not getattr(expression, 'filterable', True)
):
raise NotSupportedError(
expression.__class__.__name__ + ' is disallowed in the filter '
'clause.'
)
if hasattr(expression, 'get_source_expressions'):
for expr in expression.get_source_expressions():
self.check_filterable(expr)
def build_lookup(self, lookups, lhs, rhs):
"""
Try to extract transforms and lookup from given lhs.
The lhs value is something that works like SQLExpression.
The rhs value is what the lookup is going to compare against.
The lookups is a list of names to extract using get_lookup()
and get_transform().
"""
# __exact is the default lookup if one isn't given.
*transforms, lookup_name = lookups or ['exact']
for name in transforms:
lhs = self.try_transform(lhs, name)
# First try get_lookup() so that the lookup takes precedence if the lhs
# supports both transform and lookup for the name.
lookup_class = lhs.get_lookup(lookup_name)
if not lookup_class:
if lhs.field.is_relation:
raise FieldError('Related Field got invalid lookup: {}'.format(lookup_name))
# A lookup wasn't found. Try to interpret the name as a transform
# and do an Exact lookup against it.
lhs = self.try_transform(lhs, lookup_name)
lookup_name = 'exact'
lookup_class = lhs.get_lookup(lookup_name)
if not lookup_class:
return
lookup = lookup_class(lhs, rhs)
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value unless the lookup supports it.
if lookup.rhs is None and not lookup.can_use_none_as_rhs:
if lookup_name not in ('exact', 'iexact'):
raise ValueError("Cannot use None as a query value")
return lhs.get_lookup('isnull')(lhs, True)
# For Oracle '' is equivalent to null. The check must be done at this
# stage because join promotion can't be done in the compiler. Using
# DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here.
# A similar thing is done in is_nullable(), too.
if (
lookup_name == 'exact' and
lookup.rhs == '' and
connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls
):
return lhs.get_lookup('isnull')(lhs, True)
return lookup
def try_transform(self, lhs, name):
"""
Helper method for build_lookup(). Try to fetch and initialize
a transform for name parameter from lhs.
"""
transform_class = lhs.get_transform(name)
if transform_class:
return transform_class(lhs)
else:
output_field = lhs.output_field.__class__
suggested_lookups = difflib.get_close_matches(name, output_field.get_lookups())
if suggested_lookups:
suggestion = ', perhaps you meant %s?' % ' or '.join(suggested_lookups)
else:
suggestion = '.'
raise FieldError(
"Unsupported lookup '%s' for %s or join on the field not "
"permitted%s" % (name, output_field.__name__, suggestion)
)
def build_filter(self, filter_expr, branch_negated=False, current_negated=False,
can_reuse=None, allow_joins=True, split_subq=True,
check_filterable=True):
"""
Build a WhereNode for a single filter clause but don't add it
to this Query. Query.add_q() will then add this filter to the where
Node.
The 'branch_negated' tells us if the current branch contains any
negations. This will be used to determine if subqueries are needed.
The 'current_negated' is used to determine if the current filter is
negated or not and this will be used to determine if IS NULL filtering
is needed.
The difference between current_negated and branch_negated is that
branch_negated is set on first negation, but current_negated is
flipped for each negation.
Note that add_filter will not do any negating itself, that is done
upper in the code by add_q().
The 'can_reuse' is a set of reusable joins for multijoins.
The method will create a filter clause that can be added to the current
query. However, if the filter isn't added to the query then the caller
is responsible for unreffing the joins used.
"""
if isinstance(filter_expr, dict):
raise FieldError("Cannot parse keyword query as dict")
if isinstance(filter_expr, Q):
return self._add_q(
filter_expr,
branch_negated=branch_negated,
current_negated=current_negated,
used_aliases=can_reuse,
allow_joins=allow_joins,
split_subq=split_subq,
check_filterable=check_filterable,
)
if hasattr(filter_expr, 'resolve_expression'):
if not getattr(filter_expr, 'conditional', False):
raise TypeError('Cannot filter against a non-conditional expression.')
condition = filter_expr.resolve_expression(self, allow_joins=allow_joins)
if not isinstance(condition, Lookup):
condition = self.build_lookup(['exact'], condition, True)
return WhereNode([condition], connector=AND), []
arg, value = filter_expr
if not arg:
raise FieldError("Cannot parse keyword query %r" % arg)
lookups, parts, reffed_expression = self.solve_lookup_type(arg)
if check_filterable:
self.check_filterable(reffed_expression)
if not allow_joins and len(parts) > 1:
raise FieldError("Joined field references are not permitted in this query")
pre_joins = self.alias_refcount.copy()
value = self.resolve_lookup_value(value, can_reuse, allow_joins)
used_joins = {k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)}
if check_filterable:
self.check_filterable(value)
if reffed_expression:
condition = self.build_lookup(lookups, reffed_expression, value)
return WhereNode([condition], connector=AND), []
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = not branch_negated or not split_subq
try:
join_info = self.setup_joins(
parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many,
)
# Prevent iterator from being consumed by check_related_objects()
if isinstance(value, Iterator):
value = list(value)
self.check_related_objects(join_info.final_field, value, join_info.opts)
# split_exclude() needs to know which joins were generated for the
# lookup parts
self._lookup_joins = join_info.joins
except MultiJoin as e:
return self.split_exclude(filter_expr, can_reuse, e.names_with_path)
# Update used_joins before trimming since they are reused to determine
# which joins could be later promoted to INNER.
used_joins.update(join_info.joins)
targets, alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path)
if can_reuse is not None:
can_reuse.update(join_list)
if join_info.final_field.is_relation:
# No support for transforms for relational fields
num_lookups = len(lookups)
if num_lookups > 1:
raise FieldError('Related Field got invalid lookup: {}'.format(lookups[0]))
if len(targets) == 1:
col = self._get_col(targets[0], join_info.final_field, alias)
else:
col = MultiColSource(alias, targets, join_info.targets, join_info.final_field)
else:
col = self._get_col(targets[0], join_info.final_field, alias)
condition = self.build_lookup(lookups, col, value)
lookup_type = condition.lookup_name
clause = WhereNode([condition], connector=AND)
require_outer = lookup_type == 'isnull' and condition.rhs is True and not current_negated
if current_negated and (lookup_type != 'isnull' or condition.rhs is False) and condition.rhs is not None:
require_outer = True
if lookup_type != 'isnull':
# The condition added here will be SQL like this:
# NOT (col IS NOT NULL), where the first NOT is added in
# upper layers of code. The reason for addition is that if col
# is null, then col != someval will result in SQL "unknown"
# which isn't the same as in Python. The Python None handling
# is wanted, and it can be gotten by
# (col IS NULL OR col != someval)
# <=>
# NOT (col IS NOT NULL AND col = someval).
if (
self.is_nullable(targets[0]) or
self.alias_map[join_list[-1]].join_type == LOUTER
):
lookup_class = targets[0].get_lookup('isnull')
col = self._get_col(targets[0], join_info.targets[0], alias)
clause.add(lookup_class(col, False), AND)
# If someval is a nullable column, someval IS NOT NULL is
# added.
if isinstance(value, Col) and self.is_nullable(value.target):
lookup_class = value.target.get_lookup('isnull')
clause.add(lookup_class(value, False), AND)
return clause, used_joins if not require_outer else ()
def add_filter(self, filter_lhs, filter_rhs):
self.add_q(Q((filter_lhs, filter_rhs)))
def add_q(self, q_object):
"""
A preprocessor for the internal _add_q(). Responsible for doing final
join promotion.
"""
# For join promotion this case is doing an AND for the added q_object
# and existing conditions. So, any existing inner join forces the join
# type to remain inner. Existing outer joins can however be demoted.
# (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if
# rel_a doesn't produce any rows, then the whole condition must fail.
# So, demotion is OK.
existing_inner = {a for a in self.alias_map if self.alias_map[a].join_type == INNER}
clause, _ = self._add_q(q_object, self.used_aliases)
if clause:
self.where.add(clause, AND)
self.demote_joins(existing_inner)
def build_where(self, filter_expr):
return self.build_filter(filter_expr, allow_joins=False)[0]
def clear_where(self):
self.where = WhereNode()
def _add_q(self, q_object, used_aliases, branch_negated=False,
current_negated=False, allow_joins=True, split_subq=True,
check_filterable=True):
"""Add a Q-object to the current filter."""
connector = q_object.connector
current_negated = current_negated ^ q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = WhereNode(connector=connector, negated=q_object.negated)
joinpromoter = JoinPromoter(q_object.connector, len(q_object.children), current_negated)
for child in q_object.children:
child_clause, needed_inner = self.build_filter(
child, can_reuse=used_aliases, branch_negated=branch_negated,
current_negated=current_negated, allow_joins=allow_joins,
split_subq=split_subq, check_filterable=check_filterable,
)
joinpromoter.add_votes(needed_inner)
if child_clause:
target_clause.add(child_clause, connector)
needed_inner = joinpromoter.update_join_types(self)
return target_clause, needed_inner
def build_filtered_relation_q(self, q_object, reuse, branch_negated=False, current_negated=False):
"""Add a FilteredRelation object to the current filter."""
connector = q_object.connector
current_negated ^= q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = WhereNode(connector=connector, negated=q_object.negated)
for child in q_object.children:
if isinstance(child, Node):
child_clause = self.build_filtered_relation_q(
child, reuse=reuse, branch_negated=branch_negated,
current_negated=current_negated,
)
else:
child_clause, _ = self.build_filter(
child, can_reuse=reuse, branch_negated=branch_negated,
current_negated=current_negated,
allow_joins=True, split_subq=False,
)
target_clause.add(child_clause, connector)
return target_clause
def add_filtered_relation(self, filtered_relation, alias):
filtered_relation.alias = alias
lookups = dict(get_children_from_q(filtered_relation.condition))
relation_lookup_parts, relation_field_parts, _ = self.solve_lookup_type(filtered_relation.relation_name)
if relation_lookup_parts:
raise ValueError(
"FilteredRelation's relation_name cannot contain lookups "
"(got %r)." % filtered_relation.relation_name
)
for lookup in chain(lookups):
lookup_parts, lookup_field_parts, _ = self.solve_lookup_type(lookup)
shift = 2 if not lookup_parts else 1
lookup_field_path = lookup_field_parts[:-shift]
for idx, lookup_field_part in enumerate(lookup_field_path):
if len(relation_field_parts) > idx:
if relation_field_parts[idx] != lookup_field_part:
raise ValueError(
"FilteredRelation's condition doesn't support "
"relations outside the %r (got %r)."
% (filtered_relation.relation_name, lookup)
)
else:
raise ValueError(
"FilteredRelation's condition doesn't support nested "
"relations deeper than the relation_name (got %r for "
"%r)." % (lookup, filtered_relation.relation_name)
)
self._filtered_relations[filtered_relation.alias] = filtered_relation
def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False):
"""
Walk the list of names and turns them into PathInfo tuples. A single
name in 'names' can generate multiple PathInfos (m2m, for example).
'names' is the path of names to travel, 'opts' is the model Options we
start the name resolving from, 'allow_many' is as for setup_joins().
If fail_on_missing is set to True, then a name that can't be resolved
will generate a FieldError.
Return a list of PathInfo tuples. In addition return the final field
(the last used join field) and target (which is a field guaranteed to
contain the same value as the final field). Finally, return those names
that weren't found (which are likely transforms and the final lookup).
"""
path, names_with_path = [], []
for pos, name in enumerate(names):
cur_names_with_path = (name, [])
if name == 'pk':
name = opts.pk.name
field = None
filtered_relation = None
try:
field = opts.get_field(name)
except FieldDoesNotExist:
if name in self.annotation_select:
field = self.annotation_select[name].output_field
elif name in self._filtered_relations and pos == 0:
filtered_relation = self._filtered_relations[name]
if LOOKUP_SEP in filtered_relation.relation_name:
parts = filtered_relation.relation_name.split(LOOKUP_SEP)
filtered_relation_path, field, _, _ = self.names_to_path(
parts, opts, allow_many, fail_on_missing,
)
path.extend(filtered_relation_path[:-1])
else:
field = opts.get_field(filtered_relation.relation_name)
if field is not None:
# Fields that contain one-to-many relations with a generic
# model (like a GenericForeignKey) cannot generate reverse
# relations and therefore cannot be used for reverse querying.
if field.is_relation and not field.related_model:
raise FieldError(
"Field %r does not generate an automatic reverse "
"relation and therefore cannot be used for reverse "
"querying. If it is a GenericForeignKey, consider "
"adding a GenericRelation." % name
)
try:
model = field.model._meta.concrete_model
except AttributeError:
# QuerySet.annotate() may introduce fields that aren't
# attached to a model.
model = None
else:
# We didn't find the current field, so move position back
# one step.
pos -= 1
if pos == -1 or fail_on_missing:
available = sorted([
*get_field_names_from_opts(opts),
*self.annotation_select,
*self._filtered_relations,
])
raise FieldError("Cannot resolve keyword '%s' into field. "
"Choices are: %s" % (name, ", ".join(available)))
break
# Check if we need any joins for concrete inheritance cases (the
# field lives in parent, but we are currently in one of its
# children)
if model is not opts.model:
path_to_parent = opts.get_path_to_parent(model)
if path_to_parent:
path.extend(path_to_parent)
cur_names_with_path[1].extend(path_to_parent)
opts = path_to_parent[-1].to_opts
if hasattr(field, 'get_path_info'):
pathinfos = field.get_path_info(filtered_relation)
if not allow_many:
for inner_pos, p in enumerate(pathinfos):
if p.m2m:
cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1])
names_with_path.append(cur_names_with_path)
raise MultiJoin(pos + 1, names_with_path)
last = pathinfos[-1]
path.extend(pathinfos)
final_field = last.join_field
opts = last.to_opts
targets = last.target_fields
cur_names_with_path[1].extend(pathinfos)
names_with_path.append(cur_names_with_path)
else:
# Local non-relational field.
final_field = field
targets = (field,)
if fail_on_missing and pos + 1 != len(names):
raise FieldError(
"Cannot resolve keyword %r into field. Join on '%s'"
" not permitted." % (names[pos + 1], name))
break
return path, final_field, targets, names[pos + 1:]
def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are starting from), 'alias' is the alias for
the table to start the joining from.
The 'can_reuse' defines the reverse foreign key joins we can reuse. It
can be None in which case all joins are reusable or a set of aliases
that can be reused. Note that non-reverse foreign keys are always
reusable when using setup_joins().
If 'allow_many' is False, then any reverse foreign key seen will
generate a MultiJoin exception.
Return the final field involved in the joins, the target field (used
for any 'where' constraint), the final 'opts' value, the joins, the
field path traveled to generate the joins, and a transform function
that takes a field and alias and is equivalent to `field.get_col(alias)`
in the simple case but wraps field transforms if they were included in
names.
The target field is the field containing the concrete value. Final
field can be something different, for example foreign key pointing to
that value. Final field is needed for example in some value
conversions (convert 'obj' in fk__id=obj to pk val using the foreign
key field for example).
"""
joins = [alias]
# The transform can't be applied yet, as joins must be trimmed later.
# To avoid making every caller of this method look up transforms
# directly, compute transforms here and create a partial that converts
# fields to the appropriate wrapped version.
def final_transformer(field, alias):
if not self.alias_cols:
alias = None
return field.get_col(alias)
# Try resolving all the names as fields first. If there's an error,
# treat trailing names as lookups until a field can be resolved.
last_field_exception = None
for pivot in range(len(names), 0, -1):
try:
path, final_field, targets, rest = self.names_to_path(
names[:pivot], opts, allow_many, fail_on_missing=True,
)
except FieldError as exc:
if pivot == 1:
# The first item cannot be a lookup, so it's safe
# to raise the field error here.
raise
else:
last_field_exception = exc
else:
# The transforms are the remaining items that couldn't be
# resolved into fields.
transforms = names[pivot:]
break
for name in transforms:
def transform(field, alias, *, name, previous):
try:
wrapped = previous(field, alias)
return self.try_transform(wrapped, name)
except FieldError:
# FieldError is raised if the transform doesn't exist.
if isinstance(final_field, Field) and last_field_exception:
raise last_field_exception
else:
raise
final_transformer = functools.partial(transform, name=name, previous=final_transformer)
# Then, add the path to the query's joins. Note that we can't trim
# joins at this stage - we will need the information about join type
# of the trimmed joins.
for join in path:
if join.filtered_relation:
filtered_relation = join.filtered_relation.clone()
table_alias = filtered_relation.alias
else:
filtered_relation = None
table_alias = None
opts = join.to_opts
if join.direct:
nullable = self.is_nullable(join.join_field)
else:
nullable = True
connection = Join(
opts.db_table, alias, table_alias, INNER, join.join_field,
nullable, filtered_relation=filtered_relation,
)
reuse = can_reuse if join.m2m else None
alias = self.join(connection, reuse=reuse)
joins.append(alias)
if filtered_relation:
filtered_relation.path = joins[:]
return JoinInfo(final_field, targets, opts, joins, path, final_transformer)
def trim_joins(self, targets, joins, path):
"""
The 'target' parameter is the final field being joined to, 'joins'
is the full list of join aliases. The 'path' contain the PathInfos
used to create the joins.
Return the final target field and table alias and the new active
joins.
Always trim any direct join if the target column is already in the
previous table. Can't trim reverse joins as it's unknown if there's
anything on the other side of the join.
"""
joins = joins[:]
for pos, info in enumerate(reversed(path)):
if len(joins) == 1 or not info.direct:
break
if info.filtered_relation:
break
join_targets = {t.column for t in info.join_field.foreign_related_fields}
cur_targets = {t.column for t in targets}
if not cur_targets.issubset(join_targets):
break
targets_dict = {r[1].column: r[0] for r in info.join_field.related_fields if r[1].column in cur_targets}
targets = tuple(targets_dict[t.column] for t in targets)
self.unref_alias(joins.pop())
return targets, joins[-1], joins
@classmethod
def _gen_cols(cls, exprs, include_external=False):
for expr in exprs:
if isinstance(expr, Col):
yield expr
elif include_external and callable(getattr(expr, 'get_external_cols', None)):
yield from expr.get_external_cols()
elif hasattr(expr, 'get_source_expressions'):
yield from cls._gen_cols(
expr.get_source_expressions(),
include_external=include_external,
)
@classmethod
def _gen_col_aliases(cls, exprs):
yield from (expr.alias for expr in cls._gen_cols(exprs))
def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False):
annotation = self.annotations.get(name)
if annotation is not None:
if not allow_joins:
for alias in self._gen_col_aliases([annotation]):
if isinstance(self.alias_map[alias], Join):
raise FieldError(
'Joined field references are not permitted in '
'this query'
)
if summarize:
# Summarize currently means we are doing an aggregate() query
# which is executed as a wrapped subquery if any of the
# aggregate() elements reference an existing annotation. In
# that case we need to return a Ref to the subquery's annotation.
if name not in self.annotation_select:
raise FieldError(
"Cannot aggregate over the '%s' alias. Use annotate() "
"to promote it." % name
)
return Ref(name, self.annotation_select[name])
else:
return annotation
else:
field_list = name.split(LOOKUP_SEP)
annotation = self.annotations.get(field_list[0])
if annotation is not None:
for transform in field_list[1:]:
annotation = self.try_transform(annotation, transform)
return annotation
join_info = self.setup_joins(field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse)
targets, final_alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path)
if not allow_joins and len(join_list) > 1:
raise FieldError('Joined field references are not permitted in this query')
if len(targets) > 1:
raise FieldError("Referencing multicolumn fields with F() objects "
"isn't supported")
# Verify that the last lookup in name is a field or a transform:
# transform_function() raises FieldError if not.
transform = join_info.transform_function(targets[0], final_alias)
if reuse is not None:
reuse.update(join_list)
return transform
def split_exclude(self, filter_expr, can_reuse, names_with_path):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
For example, if the origin filter is ~Q(child__name='foo'), filter_expr
is ('child__name', 'foo') and can_reuse is a set of joins usable for
filters in the original query.
We will turn this into equivalent of:
WHERE NOT EXISTS(
SELECT 1
FROM child
WHERE name = 'foo' AND child.parent_id = parent.id
LIMIT 1
)
"""
# Generate the inner query.
query = Query(self.model)
query._filtered_relations = self._filtered_relations
filter_lhs, filter_rhs = filter_expr
if isinstance(filter_rhs, OuterRef):
filter_rhs = OuterRef(filter_rhs)
elif isinstance(filter_rhs, F):
filter_rhs = OuterRef(filter_rhs.name)
query.add_filter(filter_lhs, filter_rhs)
query.clear_ordering(force=True)
# Try to have as simple as possible subquery -> trim leading joins from
# the subquery.
trimmed_prefix, contains_louter = query.trim_start(names_with_path)
col = query.select[0]
select_field = col.target
alias = col.alias
if alias in can_reuse:
pk = select_field.model._meta.pk
# Need to add a restriction so that outer query's filters are in effect for
# the subquery, too.
query.bump_prefix(self)
lookup_class = select_field.get_lookup('exact')
# Note that the query.select[0].alias is different from alias
# due to bump_prefix above.
lookup = lookup_class(pk.get_col(query.select[0].alias),
pk.get_col(alias))
query.where.add(lookup, AND)
query.external_aliases[alias] = True
lookup_class = select_field.get_lookup('exact')
lookup = lookup_class(col, ResolvedOuterRef(trimmed_prefix))
query.where.add(lookup, AND)
condition, needed_inner = self.build_filter(Exists(query))
if contains_louter:
or_null_condition, _ = self.build_filter(
('%s__isnull' % trimmed_prefix, True),
current_negated=True, branch_negated=True, can_reuse=can_reuse)
condition.add(or_null_condition, OR)
# Note that the end result will be:
# (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL.
# This might look crazy but due to how IN works, this seems to be
# correct. If the IS NOT NULL check is removed then outercol NOT
# IN will return UNKNOWN. If the IS NULL check is removed, then if
# outercol IS NULL we will not match the row.
return condition, needed_inner
def set_empty(self):
self.where.add(NothingNode(), AND)
for query in self.combined_queries:
query.set_empty()
def is_empty(self):
return any(isinstance(c, NothingNode) for c in self.where.children)
def set_limits(self, low=None, high=None):
"""
Adjust the limits on the rows retrieved. Use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, convert them to the appropriate offset and limit values.
Apply any limits passed in here to the existing constraints. Add low
to the current low value and clamp both to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
if self.low_mark == self.high_mark:
self.set_empty()
def clear_limits(self):
"""Clear any existing limits."""
self.low_mark, self.high_mark = 0, None
@property
def is_sliced(self):
return self.low_mark != 0 or self.high_mark is not None
def has_limit_one(self):
return self.high_mark is not None and (self.high_mark - self.low_mark) == 1
def can_filter(self):
"""
Return True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.is_sliced
def clear_select_clause(self):
"""Remove all fields from SELECT clause."""
self.select = ()
self.default_cols = False
self.select_related = False
self.set_extra_mask(())
self.set_annotation_mask(())
def clear_select_fields(self):
"""
Clear the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = ()
self.values_select = ()
def add_select_col(self, col, name):
self.select += col,
self.values_select += name,
def set_select(self, cols):
self.default_cols = False
self.select = tuple(cols)
def add_distinct_fields(self, *field_names):
"""
Add and resolve the given fields to the query's "distinct on" clause.
"""
self.distinct_fields = field_names
self.distinct = True
def add_fields(self, field_names, allow_m2m=True):
"""
Add the given (model) fields to the select set. Add the field names in
the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
cols = []
for name in field_names:
# Join promotion note - we must not remove any rows here, so
# if there is no existing joins, use outer join.
join_info = self.setup_joins(name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m)
targets, final_alias, joins = self.trim_joins(
join_info.targets,
join_info.joins,
join_info.path,
)
for target in targets:
cols.append(join_info.transform_function(target, final_alias))
if cols:
self.set_select(cols)
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
if LOOKUP_SEP in name:
# For lookups spanning over relationships, show the error
# from the model on which the lookup failed.
raise
elif name in self.annotations:
raise FieldError(
"Cannot select the '%s' alias. Use annotate() to promote "
"it." % name
)
else:
names = sorted([
*get_field_names_from_opts(opts), *self.extra,
*self.annotation_select, *self._filtered_relations
])
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
def add_ordering(self, *ordering):
"""
Add items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or OrderBy
expressions.
If 'ordering' is empty, clear all ordering from the query.
"""
errors = []
for item in ordering:
if isinstance(item, str):
if item == '?':
continue
if item.startswith('-'):
item = item[1:]
if item in self.annotations:
continue
if self.extra and item in self.extra:
continue
# names_to_path() validates the lookup. A descriptive
# FieldError will be raise if it's not.
self.names_to_path(item.split(LOOKUP_SEP), self.model._meta)
elif not hasattr(item, 'resolve_expression'):
errors.append(item)
if getattr(item, 'contains_aggregate', False):
raise FieldError(
'Using an aggregate in order_by() without also including '
'it in annotate() is not allowed: %s' % item
)
if errors:
raise FieldError('Invalid order_by arguments: %s' % errors)
if ordering:
self.order_by += ordering
else:
self.default_ordering = False
def clear_ordering(self, force=False, clear_default=True):
"""
Remove any ordering settings if the current query allows it without
side effects, set 'force' to True to clear the ordering regardless.
If 'clear_default' is True, there will be no ordering in the resulting
query (not even the model's default).
"""
if not force and (self.is_sliced or self.distinct_fields or self.select_for_update):
return
self.order_by = ()
self.extra_order_by = ()
if clear_default:
self.default_ordering = False
def set_group_by(self, allow_aliases=True):
"""
Expand the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
# Column names from JOINs to check collisions with aliases.
if allow_aliases:
column_names = set()
seen_models = set()
for join in list(self.alias_map.values())[1:]: # Skip base table.
model = join.join_field.related_model
if model not in seen_models:
column_names.update({
field.column
for field in model._meta.local_concrete_fields
})
seen_models.add(model)
group_by = list(self.select)
if self.annotation_select:
for alias, annotation in self.annotation_select.items():
if not allow_aliases or alias in column_names:
alias = None
group_by_cols = annotation.get_group_by_cols(alias=alias)
group_by.extend(group_by_cols)
self.group_by = tuple(group_by)
def add_select_related(self, fields):
"""
Set up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
if isinstance(self.select_related, bool):
field_dict = {}
else:
field_dict = self.select_related
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Add data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = {}
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
entry = str(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
if pos == 0 or entry[pos - 1] != '%':
entry_params.append(next(param_iter))
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""Remove any fields from the deferred loading set."""
self.deferred_loading = (frozenset(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. Add the new field names to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL column names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
if new_existing := existing.difference(field_names):
self.deferred_loading = new_existing, False
else:
self.clear_deferred_loading()
if new_only := set(field_names).difference(existing):
self.deferred_loading = new_only, True
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, remove
those names from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
field_names = set(field_names)
if 'pk' in field_names:
field_names.remove('pk')
field_names.add(self.get_meta().pk.name)
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = field_names.difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = frozenset(field_names), False
def get_loaded_field_names(self):
"""
If any fields are marked to be deferred, return a dictionary mapping
models to a set of names in those fields that will be loaded. If a
model is not in the returned dictionary, none of its fields are
deferred.
If no fields are marked for deferral, return an empty dictionary.
"""
# We cache this because we call this function multiple times
# (compiler.fill_related_selections, query.iterator)
try:
return self._loaded_field_names_cache
except AttributeError:
collection = {}
self.deferred_to_data(collection, self.get_loaded_field_names_cb)
self._loaded_field_names_cache = collection
return collection
def get_loaded_field_names_cb(self, target, model, fields):
"""Callback used by get_deferred_field_names()."""
target[model] = {f.attname for f in fields}
def set_annotation_mask(self, names):
"""Set the mask of annotations that will be returned by the SELECT."""
if names is None:
self.annotation_select_mask = None
else:
self.annotation_select_mask = set(names)
self._annotation_select_cache = None
def append_annotation_mask(self, names):
if self.annotation_select_mask is not None:
self.set_annotation_mask(self.annotation_select_mask.union(names))
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT.
Don't remove them from the Query since they might be used later.
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
def set_values(self, fields):
self.select_related = False
self.clear_deferred_loading()
self.clear_select_fields()
if fields:
field_names = []
extra_names = []
annotation_names = []
if not self.extra and not self.annotations:
# Shortcut - if there are no extra or annotations, then
# the values() clause must be just field names.
field_names = list(fields)
else:
self.default_cols = False
for f in fields:
if f in self.extra_select:
extra_names.append(f)
elif f in self.annotation_select:
annotation_names.append(f)
else:
field_names.append(f)
self.set_extra_mask(extra_names)
self.set_annotation_mask(annotation_names)
selected = frozenset(field_names + extra_names + annotation_names)
else:
field_names = [f.attname for f in self.model._meta.concrete_fields]
selected = frozenset(field_names)
# Selected annotations must be known before setting the GROUP BY
# clause.
if self.group_by is True:
self.add_fields((f.attname for f in self.model._meta.concrete_fields), False)
# Disable GROUP BY aliases to avoid orphaning references to the
# SELECT clause which is about to be cleared.
self.set_group_by(allow_aliases=False)
self.clear_select_fields()
elif self.group_by:
# Resolve GROUP BY annotation references if they are not part of
# the selected fields anymore.
group_by = []
for expr in self.group_by:
if isinstance(expr, Ref) and expr.refs not in selected:
expr = self.annotations[expr.refs]
group_by.append(expr)
self.group_by = tuple(group_by)
self.values_select = tuple(field_names)
self.add_fields(field_names, True)
@property
def annotation_select(self):
"""
Return the dictionary of aggregate columns that are not masked and
should be used in the SELECT clause. Cache this result for performance.
"""
if self._annotation_select_cache is not None:
return self._annotation_select_cache
elif not self.annotations:
return {}
elif self.annotation_select_mask is not None:
self._annotation_select_cache = {
k: v for k, v in self.annotations.items()
if k in self.annotation_select_mask
}
return self._annotation_select_cache
else:
return self.annotations
@property
def extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
if not self.extra:
return {}
elif self.extra_select_mask is not None:
self._extra_select_cache = {
k: v for k, v in self.extra.items()
if k in self.extra_select_mask
}
return self._extra_select_cache
else:
return self.extra
def trim_start(self, names_with_path):
"""
Trim joins from the start of the join path. The candidates for trim
are the PathInfos in names_with_path structure that are m2m joins.
Also set the select column so the start matches the join.
This method is meant to be used for generating the subquery joins &
cols in split_exclude().
Return a lookup usable for doing outerq.filter(lookup=self) and a
boolean indicating if the joins in the prefix contain a LEFT OUTER join.
_"""
all_paths = []
for _, paths in names_with_path:
all_paths.extend(paths)
contains_louter = False
# Trim and operate only on tables that were generated for
# the lookup part of the query. That is, avoid trimming
# joins generated for F() expressions.
lookup_tables = [
t for t in self.alias_map
if t in self._lookup_joins or t == self.base_table
]
for trimmed_paths, path in enumerate(all_paths):
if path.m2m:
break
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER:
contains_louter = True
alias = lookup_tables[trimmed_paths]
self.unref_alias(alias)
# The path.join_field is a Rel, lets get the other side's field
join_field = path.join_field.field
# Build the filter prefix.
paths_in_prefix = trimmed_paths
trimmed_prefix = []
for name, path in names_with_path:
if paths_in_prefix - len(path) < 0:
break
trimmed_prefix.append(name)
paths_in_prefix -= len(path)
trimmed_prefix.append(
join_field.foreign_related_fields[0].name)
trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)
# Lets still see if we can trim the first join from the inner query
# (that is, self). We can't do this for:
# - LEFT JOINs because we would miss those rows that have nothing on
# the outer side,
# - INNER JOINs from filtered relations because we would miss their
# filters.
first_join = self.alias_map[lookup_tables[trimmed_paths + 1]]
if first_join.join_type != LOUTER and not first_join.filtered_relation:
select_fields = [r[0] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths + 1]
self.unref_alias(lookup_tables[trimmed_paths])
extra_restriction = join_field.get_extra_restriction(None, lookup_tables[trimmed_paths + 1])
if extra_restriction:
self.where.add(extra_restriction, AND)
else:
# TODO: It might be possible to trim more joins from the start of the
# inner query if it happens to have a longer join chain containing the
# values in select_fields. Lets punt this one for now.
select_fields = [r[1] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths]
# The found starting point is likely a Join instead of a BaseTable reference.
# But the first entry in the query's FROM clause must not be a JOIN.
for table in self.alias_map:
if self.alias_refcount[table] > 0:
self.alias_map[table] = BaseTable(self.alias_map[table].table_name, table)
break
self.set_select([f.get_col(select_alias) for f in select_fields])
return trimmed_prefix, contains_louter
def is_nullable(self, field):
"""
Check if the given field should be treated as nullable.
Some backends treat '' as null and Django treats such fields as
nullable for those backends. In such situations field.null can be
False even if we should treat the field as nullable.
"""
# We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
# (nor should it have) knowledge of which connection is going to be
# used. The proper fix would be to defer all decisions where
# is_nullable() is needed to the compiler stage, but that is not easy
# to do currently.
return field.null or (
field.empty_strings_allowed and
connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls
)
def get_order_dir(field, default='ASC'):
"""
Return the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == '-':
return field[1:], dirn[1]
return field, dirn[0]
def add_to_dict(data, key, value):
"""
Add "value" to the set of values for "key", whether or not "key" already
exists.
"""
if key in data:
data[key].add(value)
else:
data[key] = {value}
def is_reverse_o2o(field):
"""
Check if the given field is reverse-o2o. The field is expected to be some
sort of relation field or related object.
"""
return field.is_relation and field.one_to_one and not field.concrete
class JoinPromoter:
"""
A class to abstract away join promotion problems for complex filter
conditions.
"""
def __init__(self, connector, num_children, negated):
self.connector = connector
self.negated = negated
if self.negated:
if connector == AND:
self.effective_connector = OR
else:
self.effective_connector = AND
else:
self.effective_connector = self.connector
self.num_children = num_children
# Maps of table alias to how many times it is seen as required for
# inner and/or outer joins.
self.votes = Counter()
def add_votes(self, votes):
"""
Add single vote per item to self.votes. Parameter can be any
iterable.
"""
self.votes.update(votes)
def update_join_types(self, query):
"""
Change join types so that the generated query is as efficient as
possible, but still correct. So, change as many joins as possible
to INNER, but don't make OUTER joins INNER if that could remove
results from the query.
"""
to_promote = set()
to_demote = set()
# The effective_connector is used so that NOT (a AND b) is treated
# similarly to (a OR b) for join promotion.
for table, votes in self.votes.items():
# We must use outer joins in OR case when the join isn't contained
# in all of the joins. Otherwise the INNER JOIN itself could remove
# valid results. Consider the case where a model with rel_a and
# rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now,
# if rel_a join doesn't produce any results is null (for example
# reverse foreign key or null value in direct foreign key), and
# there is a matching row in rel_b with col=2, then an INNER join
# to rel_a would remove a valid match from the query. So, we need
# to promote any existing INNER to LOUTER (it is possible this
# promotion in turn will be demoted later on).
if self.effective_connector == 'OR' and votes < self.num_children:
to_promote.add(table)
# If connector is AND and there is a filter that can match only
# when there is a joinable row, then use INNER. For example, in
# rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL
# as join output, then the col=1 or col=2 can't match (as
# NULL=anything is always false).
# For the OR case, if all children voted for a join to be inner,
# then we can use INNER for the join. For example:
# (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell)
# then if rel_a doesn't produce any rows, the whole condition
# can't match. Hence we can safely use INNER join.
if self.effective_connector == 'AND' or (
self.effective_connector == 'OR' and votes == self.num_children):
to_demote.add(table)
# Finally, what happens in cases where we have:
# (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0
# Now, we first generate the OR clause, and promote joins for it
# in the first if branch above. Both rel_a and rel_b are promoted
# to LOUTER joins. After that we do the AND case. The OR case
# voted no inner joins but the rel_a__col__gte=0 votes inner join
# for rel_a. We demote it back to INNER join (in AND case a single
# vote is enough). The demotion is OK, if rel_a doesn't produce
# rows, then the rel_a__col__gte=0 clause can't be true, and thus
# the whole clause must be false. So, it is safe to use INNER
# join.
# Note that in this example we could just as well have the __gte
# clause and the OR clause swapped. Or we could replace the __gte
# clause with an OR clause containing rel_a__col=1|rel_a__col=2,
# and again we could safely demote to INNER.
query.promote_joins(to_promote)
query.demote_joins(to_demote)
return to_demote
| 44.661914 | 119 | 0.608874 |
79499c0b454d3795080313931f4ce76b1c451758
| 9,830 |
py
|
Python
|
kubernetes/client/models/v1_api_group.py
|
woqer/python
|
3a6fe8231cefe1fa39a0a69d4b2f33044ab32745
|
[
"Apache-2.0"
] | 1 |
2019-07-12T05:38:06.000Z
|
2019-07-12T05:38:06.000Z
|
kubernetes/client/models/v1_api_group.py
|
woqer/python
|
3a6fe8231cefe1fa39a0a69d4b2f33044ab32745
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_api_group.py
|
woqer/python
|
3a6fe8231cefe1fa39a0a69d4b2f33044ab32745
|
[
"Apache-2.0"
] | 1 |
2021-05-18T12:25:56.000Z
|
2021-05-18T12:25:56.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1APIGroup(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'kind': 'str',
'name': 'str',
'preferred_version': 'V1GroupVersionForDiscovery',
'server_address_by_client_cid_rs': 'list[V1ServerAddressByClientCIDR]',
'versions': 'list[V1GroupVersionForDiscovery]'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'name': 'name',
'preferred_version': 'preferredVersion',
'server_address_by_client_cid_rs': 'serverAddressByClientCIDRs',
'versions': 'versions'
}
def __init__(self, api_version=None, kind=None, name=None, preferred_version=None, server_address_by_client_cid_rs=None, versions=None):
"""
V1APIGroup - a model defined in Swagger
"""
self._api_version = None
self._kind = None
self._name = None
self._preferred_version = None
self._server_address_by_client_cid_rs = None
self._versions = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
self.name = name
if preferred_version is not None:
self.preferred_version = preferred_version
if server_address_by_client_cid_rs is not None:
self.server_address_by_client_cid_rs = server_address_by_client_cid_rs
self.versions = versions
@property
def api_version(self):
"""
Gets the api_version of this V1APIGroup.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1APIGroup.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1APIGroup.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1APIGroup.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V1APIGroup.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1APIGroup.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1APIGroup.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1APIGroup.
:type: str
"""
self._kind = kind
@property
def name(self):
"""
Gets the name of this V1APIGroup.
name is the name of the group.
:return: The name of this V1APIGroup.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1APIGroup.
name is the name of the group.
:param name: The name of this V1APIGroup.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def preferred_version(self):
"""
Gets the preferred_version of this V1APIGroup.
preferredVersion is the version preferred by the API server, which probably is the storage version.
:return: The preferred_version of this V1APIGroup.
:rtype: V1GroupVersionForDiscovery
"""
return self._preferred_version
@preferred_version.setter
def preferred_version(self, preferred_version):
"""
Sets the preferred_version of this V1APIGroup.
preferredVersion is the version preferred by the API server, which probably is the storage version.
:param preferred_version: The preferred_version of this V1APIGroup.
:type: V1GroupVersionForDiscovery
"""
self._preferred_version = preferred_version
@property
def server_address_by_client_cid_rs(self):
"""
Gets the server_address_by_client_cid_rs of this V1APIGroup.
a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
:return: The server_address_by_client_cid_rs of this V1APIGroup.
:rtype: list[V1ServerAddressByClientCIDR]
"""
return self._server_address_by_client_cid_rs
@server_address_by_client_cid_rs.setter
def server_address_by_client_cid_rs(self, server_address_by_client_cid_rs):
"""
Sets the server_address_by_client_cid_rs of this V1APIGroup.
a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
:param server_address_by_client_cid_rs: The server_address_by_client_cid_rs of this V1APIGroup.
:type: list[V1ServerAddressByClientCIDR]
"""
self._server_address_by_client_cid_rs = server_address_by_client_cid_rs
@property
def versions(self):
"""
Gets the versions of this V1APIGroup.
versions are the versions supported in this group.
:return: The versions of this V1APIGroup.
:rtype: list[V1GroupVersionForDiscovery]
"""
return self._versions
@versions.setter
def versions(self, versions):
"""
Sets the versions of this V1APIGroup.
versions are the versions supported in this group.
:param versions: The versions of this V1APIGroup.
:type: list[V1GroupVersionForDiscovery]
"""
if versions is None:
raise ValueError("Invalid value for `versions`, must not be `None`")
self._versions = versions
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1APIGroup):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 36.542751 | 625 | 0.649339 |
79499cf1eb3e39dc24bb2fc222a931ba2e07eed1
| 941 |
py
|
Python
|
tst/regression/scripts/tests/curvilinear/blast_cyl.py
|
luminoctum/athena-crm
|
525ad5d1c442f9f6d66f2307eed88cd6fb723810
|
[
"BSD-3-Clause"
] | null | null | null |
tst/regression/scripts/tests/curvilinear/blast_cyl.py
|
luminoctum/athena-crm
|
525ad5d1c442f9f6d66f2307eed88cd6fb723810
|
[
"BSD-3-Clause"
] | null | null | null |
tst/regression/scripts/tests/curvilinear/blast_cyl.py
|
luminoctum/athena-crm
|
525ad5d1c442f9f6d66f2307eed88cd6fb723810
|
[
"BSD-3-Clause"
] | null | null | null |
# Regression test to check whether blast wave remains spherical in cylindrical coords
# Modules
import numpy as np
import math
import sys
import scripts.utils.athena as athena
import scripts.utils.comparison as comparison
# Prepare Athena++
def prepare():
athena.configure(
prob='blast',
coord='cylindrical')
athena.make()
# Run Athena++
def run():
arguments = ['problem/compute_error=true']
athena.run('hydro/athinput.blast_cyl', arguments)
# Analyze output
def analyze():
# read data from error file
filename = 'bin/blastwave-shape.dat'
data = []
with open(filename, 'r') as f:
raw_data = f.readlines()
for line in raw_data:
if line.split()[0][0] == '#':
continue
data.append([float(val) for val in line.split()])
# check blast is spherical
if data[0][3] > 1.0:
print "Distortion of blast wave in cylindrical coords too large",data[0][3]
return False
return True
| 23.525 | 85 | 0.684378 |
79499dd5422c38e64b12c582fa809e4353e9c9e8
| 4,199 |
py
|
Python
|
clever_player.py
|
MarcusRainbow/TwoPlayerGame
|
a10c0a5f949bcb6e03bdf8197c513511b3c29f98
|
[
"MIT"
] | null | null | null |
clever_player.py
|
MarcusRainbow/TwoPlayerGame
|
a10c0a5f949bcb6e03bdf8197c513511b3c29f98
|
[
"MIT"
] | null | null | null |
clever_player.py
|
MarcusRainbow/TwoPlayerGame
|
a10c0a5f949bcb6e03bdf8197c513511b3c29f98
|
[
"MIT"
] | null | null | null |
from player import Player
from board import Board
from game import Game
from random import choice
from typing import Tuple
class CleverPlayer(Player):
"""
A player that tries to win, assuming it is playing
against the same sort of player.
"""
def __init__(self, game: Game, player: int, depth: int, other = None):
self.game = game
self.player = player
self.depth = depth
tokens = game.tokens()
self.token = tokens[player]
other_player = 0 if player == 1 else 1
# While thinking about a move, we need a
# clever opponent
if other:
self.other = other
else:
self.other = CleverPlayer(game, other_player, depth - 1, self)
def next_move(self, board: Board) -> int:
"""
Finds the best possible move from this point, trying to
win, or at least force a draw.
"""
if not board.available_moves():
raise Exception("I cannot move")
return self.best_move(board, self.depth)[0]
def best_move(self, board: Board, depth: int) -> Tuple[int, int]:
"""
Finds the best available move for this player, and returns
a tuple of the best move, followed by the quality of the
move (-1 = draw, 0 = player 0 win, 1 = player 2 win)
"""
possible_moves = board.available_moves()
if not possible_moves:
return -1, -1
# if we have descended too far into our thought processes
# without an answer, treat it as a draw and pick one
# choice at random
if depth <= 0:
# print("too deep. Returning random choice")
return choice(possible_moves), -1
draw = -1
for move in possible_moves:
# try out this move
test_board = board.clone()
test_board.apply_move(move, self.token)
# print(f"try {move} for player {self.player}")
winner = self.game.test_win(test_board)
if winner == self.player:
# Immediate win. Play this move
# print(f"Win: {move} for player {self.player}")
return move, winner
elif winner == self.other.player:
# immediate lose. Do not play this move
# print(f"Lose: {move} for player {self.player}")
continue
elif not test_board.available_moves():
# this move results in a stalemate. Only play if forced
draw = move
continue
else:
# if we get to here, we have available moves but do not
# yet know if they result in a win or lose. Ask our
# opponent to make their best move. If that results
# in a win for them, that is a lose for us
_, winner = self.other.best_move(test_board, depth - 1)
if winner == self.player:
# print(f"Win: other player lost so {self.player} should play {move}")
return move, winner
elif winner == self.other.player:
# print(f"Lose: other player won so {self.player} should not play {move}")
continue
else:
draw = move
continue
# if we get to here, there are no winning moves. Aim
# for a draw
if draw >= 0:
return draw, -1
# If there is nothing to do but lose, do so
return possible_moves[0], self.other.player
if __name__ == "__main__":
from play import play_game
from connect4 import Connect4
from human_player import HumanPlayer
grid = [
[-1, 0, 0, -1, -1, 1, 1],
[-1, 0, 0, -1, -1, -1, 1],
[ 0, 0, 0, 0, 1, 1, 1],
[ 0, 0, 0, 0, 0, 0, -1],
[ 0, 0, 0, 0, 0, 0, 1],
[ 0, 0, 0, 0, 0, 0, 1]]
game = Connect4(grid)
play_game(HumanPlayer(), CleverPlayer(game, 1, 4), game)
| 36.513043 | 95 | 0.522029 |
79499e9b40ba6bf1da815bb1ab4c657936c1467a
| 1,481 |
py
|
Python
|
src/ralph/lib/transitions/decorators.py
|
DoNnMyTh/ralph
|
97b91639fa68965ad3fd9d0d2652a6545a2a5b72
|
[
"Apache-2.0"
] | 1,668 |
2015-01-01T12:51:20.000Z
|
2022-03-29T09:05:35.000Z
|
src/ralph/lib/transitions/decorators.py
|
hq-git/ralph
|
e2448caf02d6e5abfd81da2cff92aefe0a534883
|
[
"Apache-2.0"
] | 2,314 |
2015-01-02T13:26:26.000Z
|
2022-03-29T04:06:03.000Z
|
src/ralph/lib/transitions/decorators.py
|
hq-git/ralph
|
e2448caf02d6e5abfd81da2cff92aefe0a534883
|
[
"Apache-2.0"
] | 534 |
2015-01-05T12:40:28.000Z
|
2022-03-29T21:10:12.000Z
|
# -*- coding: utf-8 -*-
from functools import wraps
from ralph.lib.transitions.conf import TRANSITION_ATTR_TAG
def transition_action(method=None, **kwargs):
def decorator(func):
func.verbose_name = kwargs.get(
'verbose_name', func.__name__.replace('_', ' ').capitalize()
)
func.return_attachment = kwargs.get('return_attachment', False)
func.form_fields = kwargs.get('form_fields', {})
func.run_after = kwargs.get('run_after', [])
func.help_text = kwargs.get('help_text', '')
func.precondition = kwargs.get(
'precondition', lambda instances, **kwargs: {}
)
func.additional_validation = kwargs.get(
'additional_validation', lambda instances, data: {}
)
func.disable_save_object = kwargs.get('disable_save_object', False)
func.only_one_action = kwargs.get('only_one_action', False)
func.is_async = kwargs.get('is_async', False)
setattr(func, TRANSITION_ATTR_TAG, True)
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
models = []
if 'model' in kwargs:
models = [kwargs['model']]
elif 'models' in kwargs:
models = kwargs['models']
for model in models:
setattr(model, func.__name__, classmethod(wrapper))
return wrapper
if callable(method):
return decorator(method)
return decorator
| 33.659091 | 75 | 0.610398 |
79499ef663e49ecb1a079713762890f2503c20ef
| 19,529 |
py
|
Python
|
plotters.py
|
chris142857/dad_final
|
e791cf85d4eea2da692c59f371518494caf0670d
|
[
"MIT"
] | null | null | null |
plotters.py
|
chris142857/dad_final
|
e791cf85d4eea2da692c59f371518494caf0670d
|
[
"MIT"
] | null | null | null |
plotters.py
|
chris142857/dad_final
|
e791cf85d4eea2da692c59f371518494caf0670d
|
[
"MIT"
] | null | null | null |
"""
# @Description:
Plot utilities for face finding applications
"""
import os
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.collections import LineCollection as lc
from matplotlib.colors import colorConverter
from mpl_toolkits.mplot3d.art3d import Line3DCollection as lc3d
from scipy.interpolate import interp1d
import matplotlib.image as mpimg
import glob
def colored_line_segments(xs, ys, zs=None, color='k', mid_colors=False):
if isinstance(color, str):
color = colorConverter.to_rgba(color)[:-1]
color = np.array([color for i in range(len(xs))])
segs = []
seg_colors = []
lastColor = [color[0][0], color[0][1], color[0][2]]
start = [xs[0], ys[0]]
end = [xs[0], ys[0]]
if not zs is None:
start.append(zs[0])
end.append(zs[0])
else:
zs = [zs] * len(xs)
for x, y, z, c in zip(xs, ys, zs, color):
if mid_colors:
seg_colors.append([(chan + lastChan) * .5 for chan, lastChan in zip(c, lastColor)])
else:
seg_colors.append(c)
lastColor = c[:-1]
if not z is None:
start = [end[0], end[1], end[2]]
end = [x, y, z]
else:
start = [end[0], end[1]]
end = [x, y]
segs.append([start, end])
colors = [(*color, 1) for color in seg_colors]
return segs, colors
def segmented_resample(xs, ys, zs=None, color='k', n_resample=100, mid_colors=False):
n_points = len(xs)
if isinstance(color, str):
color = colorConverter.to_rgba(color)[:-1]
color = np.array([color for i in range(n_points)])
n_segs = (n_points - 1) * (n_resample - 1)
xsInterp = np.linspace(0, 1, n_resample)
segs = []
seg_colors = []
hiResXs = [xs[0]]
hiResYs = [ys[0]]
if not zs is None:
hiResZs = [zs[0]]
RGB = color.swapaxes(0, 1)
for i in range(n_points - 1):
fit_xHiRes = interp1d([0, 1], xs[i:i + 2])
fit_yHiRes = interp1d([0, 1], ys[i:i + 2])
xHiRes = fit_xHiRes(xsInterp)
yHiRes = fit_yHiRes(xsInterp)
hiResXs = hiResXs + list(xHiRes[1:])
hiResYs = hiResYs + list(yHiRes[1:])
R_HiRes = interp1d([0, 1], RGB[0][i:i + 2])(xsInterp)
G_HiRes = interp1d([0, 1], RGB[1][i:i + 2])(xsInterp)
B_HiRes = interp1d([0, 1], RGB[2][i:i + 2])(xsInterp)
lastColor = [R_HiRes[0], G_HiRes[0], B_HiRes[0]]
start = [xHiRes[0], yHiRes[0]]
end = [xHiRes[0], yHiRes[0]]
if not zs is None:
fit_zHiRes = interp1d([0, 1], zs[i:i + 2])
zHiRes = fit_zHiRes(xsInterp)
hiResZs = hiResZs + list(zHiRes[1:])
start.append(zHiRes[0])
end.append(zHiRes[0])
else:
zHiRes = [zs] * len(xHiRes)
if mid_colors: seg_colors.append([R_HiRes[0], G_HiRes[0], B_HiRes[0]])
for x, y, z, r, g, b in zip(xHiRes[1:], yHiRes[1:], zHiRes[1:], R_HiRes[1:], G_HiRes[1:], B_HiRes[1:]):
if mid_colors:
seg_colors.append([(chan + lastChan) * .5 for chan, lastChan in zip((r, g, b), lastColor)])
else:
seg_colors.append([r, g, b])
lastColor = [r, g, b]
if not z is None:
start = [end[0], end[1], end[2]]
end = [x, y, z]
else:
start = [end[0], end[1]]
end = [x, y]
segs.append([start, end])
colors = [(*color, 1) for color in seg_colors]
data = [hiResXs, hiResYs]
if not zs is None:
data = [hiResXs, hiResYs, hiResZs]
return segs, colors, data
def faded_segment_resample(xs, ys, zs=None, color='k', fade_len=20, n_resample=100, direction='Head'):
segs, colors, hiResData = segmented_resample(xs, ys, zs, color, n_resample)
n_segs = len(segs)
if fade_len > len(segs):
fade_len = n_segs
if direction == 'Head':
# Head fade
alphas = np.concatenate((np.zeros(n_segs - fade_len), np.linspace(0, 1, fade_len)))
else:
# Tail fade
alphas = np.concatenate((np.linspace(1, 0, fade_len), np.zeros(n_segs - fade_len)))
colors = [(*color[:-1], alpha) for color, alpha in zip(colors, alphas)]
return segs, colors, hiResData
def test2d():
NPOINTS = 10
RESAMPLE = 10
N_FADE = int(RESAMPLE * NPOINTS * 0.5)
N_SEGS = (NPOINTS - 1) * (RESAMPLE - 1)
SHOW_POINTS_AXI_12 = True
SHOW_POINTS_AXI_34 = True
np.random.seed(11)
xs = np.random.rand(NPOINTS)
ys = np.random.rand(NPOINTS)
MARKER = '.'
CMAP = plt.get_cmap('hsv')
COLORS = np.array([CMAP(i)[:-1] for i in np.linspace(0, 1, NPOINTS)])
MARKER_COLOR = COLORS
N_SCATTER = (NPOINTS - 1) * (RESAMPLE - 1) + 1
COLORS_LONG = np.array([CMAP(i)[:-1] for i in np.linspace(1 / N_SCATTER, 1, N_SCATTER)])
fig = plt.figure(figsize=(12, 8), dpi=100)
ax1 = fig.add_subplot(221) # original data
segs, colors = colored_line_segments(xs, ys, color=COLORS, mid_colors=True)
if SHOW_POINTS_AXI_12: ax1.scatter(xs, ys, marker=MARKER, color=COLORS)
ax1.add_collection(lc(segs, colors=colors))
ax1.text(.05, 1.05, 'Original Data')
ax1.set_ylim(0, 1.2)
ax2 = fig.add_subplot(222, sharex=ax1, sharey=ax1) # resampled data
segs, colors, hiResData = segmented_resample(xs, ys, color=COLORS, n_resample=RESAMPLE)
if SHOW_POINTS_AXI_12: ax2.scatter(hiResData[0], hiResData[1], marker=MARKER, color=COLORS_LONG)
ax2.add_collection(lc(segs, colors=colors))
ax2.text(.05, 1.05, 'Original Data - Resampled')
ax2.set_ylim(0, 1.2)
ax3 = fig.add_subplot(223, sharex=ax1, sharey=ax1) # resampled with linear alpha fade start to finish
segs, colors, hiResData = faded_segment_resample(xs, ys, color=COLORS, fade_len=RESAMPLE * NPOINTS, n_resample=RESAMPLE, direction='Head')
if SHOW_POINTS_AXI_34: ax3.scatter(hiResData[0], hiResData[1], marker=MARKER, color=COLORS_LONG)
ax3.add_collection(lc(segs, colors=colors))
ax3.text(.05, 1.05, 'Resampled - w/Full length fade')
ax3.set_ylim(0, 1.2)
ax4 = fig.add_subplot(224, sharex=ax1, sharey=ax1) # resampled with linear alpha fade N_FADE long
segs, colors, hiResData = faded_segment_resample(xs, ys, color=COLORS, fade_len=N_FADE, n_resample=RESAMPLE, direction='Head')
if SHOW_POINTS_AXI_34: ax4.scatter(hiResData[0], hiResData[1], marker=MARKER, color=COLORS_LONG)
ax4.add_collection(lc(segs, colors=colors))
ax4.text(.05, 1.05, 'Resampled - w/{} point fade'.format(N_FADE))
ax4.set_ylim(0, 1.2)
fig.savefig('2d_fadeSegmentedColorLine.png')
plt.show()
def test3d():
def set_view(axi):
axi.set_xlim(-.65, .65)
axi.set_ylim(-.65, .75)
axi.set_zlim(-.65, .65)
axi.view_init(elev=45, azim=45)
NPOINTS = 40
RESAMPLE = 2
N_FADE = int(RESAMPLE * NPOINTS * 0.5)
N_FADE = 20
N_SEGS = (NPOINTS - 1) * (RESAMPLE - 1)
SHOW_POINTS_AXI_12 = True
SHOW_POINTS_AXI_34 = False
alpha = np.linspace(.5, 1.5, NPOINTS) * np.pi
theta = np.linspace(.25, 1.5, NPOINTS) * np.pi
rad = np.linspace(0, 1, NPOINTS)
xs = rad * np.sin(theta) * np.cos(alpha)
ys = rad * np.sin(theta) * np.sin(alpha)
zs = rad * np.cos(theta)
MARKER = '.'
CMAP = plt.get_cmap('hsv')
COLORS = np.array([CMAP(i)[:-1] for i in np.linspace(0, 1, NPOINTS)])
MARKER_COLOR = COLORS
N_SCATTER = (NPOINTS - 1) * (RESAMPLE - 1) + 1
COLORS_LONG = np.array([CMAP(i)[:-1] for i in np.linspace(1 / N_SCATTER, 1, N_SCATTER)])
fig = plt.figure(figsize=(12, 8), dpi=100)
ax1 = fig.add_subplot(221, projection='3d') # original data
segs, colors = colored_line_segments(xs, ys, zs, color=COLORS, mid_colors=True)
if SHOW_POINTS_AXI_12: ax1.scatter(xs, ys, zs, marker=MARKER, color=COLORS)
ax1.add_collection(lc3d(segs, colors=colors))
ax2 = fig.add_subplot(222, projection='3d', sharex=ax1, sharey=ax1) # resampled data
segs, colors, hiResData = segmented_resample(xs, ys, zs, color=COLORS, n_resample=RESAMPLE)
if SHOW_POINTS_AXI_12: ax2.scatter(hiResData[0], hiResData[1], hiResData[2], marker=MARKER, color=COLORS_LONG)
ax2.add_collection(lc3d(segs, colors=colors))
ax3 = fig.add_subplot(223, projection='3d', sharex=ax1, sharey=ax1) # resampled with linear alpha fade start to finish
segs, colors, hiResData = faded_segment_resample(xs, ys, zs, color=COLORS, fade_len=RESAMPLE * NPOINTS, n_resample=RESAMPLE, direction='Head')
if SHOW_POINTS_AXI_34: ax3.scatter(hiResData[0], hiResData[1], hiResData[2], marker=MARKER, color=COLORS_LONG)
ax3.add_collection(lc3d(segs, colors=colors))
ax4 = fig.add_subplot(224, projection='3d', sharex=ax1, sharey=ax1) # resampled with linear alpha fade N_FADE long
segs, colors, hiResData = faded_segment_resample(xs, ys, zs, color=COLORS, fade_len=N_FADE, n_resample=RESAMPLE, direction='Head')
if SHOW_POINTS_AXI_34: ax4.scatter(hiResData[0], hiResData[1], hiResData[2], marker=MARKER, color=COLORS_LONG)
ax4.add_collection(lc3d(segs, colors=colors))
labels = ('Original Data',
'Original Data - Resampled',
'Resampled - w/Full length fade',
'Resampled - w/{} point fade'.format(N_FADE))
for ax, label in zip((ax1, ax2, ax3, ax4), labels):
set_view(ax)
ax.text(.6, -.6, 1.55, label)
fig.savefig('3d_fadeSegmentedColorLine.png')
plt.show()
def plot_trace_2d(xs, ys, n_trace, true_theta):
NPOINTS = len(xs)
SHOW_POINTS_AXI_12 = True
MARKER = 'o'
CMAP = plt.get_cmap('gist_heat')
COLORS = np.array([CMAP(i)[:-1] for i in np.linspace(0, 1, NPOINTS)])
DotCMAP = plt.get_cmap('plasma')
DotCOLORS = np.array([DotCMAP(i)[:-1] for i in np.linspace(0, 1, NPOINTS)])
fig = plt.figure(figsize=(6, 4), dpi=100, constrained_layout=True)
ax1 = fig.add_subplot()
segs, colors = colored_line_segments(xs, ys, color=COLORS, mid_colors=True)
if SHOW_POINTS_AXI_12: ax1.scatter(xs, ys, marker=MARKER, color=DotCOLORS, alpha=1)
ax1.add_collection(lc(segs, linewidths=1.5, linestyles=':', colors=colors))
ax1.set_xlabel('x')
ax1.set_xlabel('y')
# plot true theta
for theta in true_theta:
ax1.scatter(theta[0], theta[1], c='r', marker='D')
fig.savefig(f"res_dim_2/trace_{n_trace}.png")
plt.close()
def plot_trace_3d(xs, ys, zs, n_trace, true_theta):
NPOINTS = len(xs)
SHOW_POINTS_AXI_12 = True
MARKER = 'o'
CMAP = plt.get_cmap('copper')
COLORS = np.array([CMAP(i)[:-1] for i in np.linspace(0, 1, NPOINTS)])
DotCMAP = plt.get_cmap('plasma')
DotCOLORS = np.array([DotCMAP(i)[:-1] for i in np.linspace(0, 1, NPOINTS)])
fig = plt.figure(figsize=(6, 4), dpi=100, constrained_layout=True)
ax1 = fig.add_subplot(projection='3d') # original data
segs, colors = colored_line_segments(xs, ys, zs, color=COLORS, mid_colors=True)
if SHOW_POINTS_AXI_12: ax1.scatter(xs, ys, zs, marker=MARKER, color=DotCOLORS, alpha=1)
ax1.add_collection(lc3d(segs, linewidths=1.5, linestyles=':', colors=colors))
ax1.view_init(elev=20, azim=20)
ax1.set_xlabel('x')
ax1.set_ylabel('y')
ax1.set_zlabel('z')
# plot true theta
for theta in true_theta:
ax1.scatter(theta[0], theta[1], theta[2], c='r', marker='D')
fig.savefig(f"res_dim_3/trace_{n_trace}.png")
plt.close()
def plot_trace(trace_i, p_dim, T, run_df, true_theta, *args, **kwargs):
mk_plot_dir(f"res_dim_{p_dim}")
if kwargs.get("face_finding"):
xis = run_df[[f'xi_{i}' for i in range(p_dim)]]
dist = np.linalg.norm(np.subtract(xis, true_theta), axis=-1)
face_folder = kwargs.get("face_folder")
num_imgs = len(glob.glob1(face_folder, "target_0*"))
num_col = 5
remain = num_imgs % num_col
num_row = 1 + num_imgs // num_col if not remain else 2 + num_imgs // num_col
fig, axs = plt.subplots(nrows=num_row, ncols=num_col, gridspec_kw={'height_ratios': np.ones(num_row).tolist()})
# Plot recon images
img_idx = 0
for r in range(1, num_row):
for c in range(num_col):
if os.path.exists(os.path.join(face_folder, f'target_{trace_i}_recon_{img_idx}.jpg')):
img = mpimg.imread(os.path.join(face_folder, f'target_{trace_i}_recon_{img_idx}.jpg'))
axs[r, c].imshow(img)
img_idx += 5
axs[r, c].axes.xaxis.set_visible(False)
axs[r, c].axes.yaxis.set_visible(False)
else:
continue
# Plot target image at the last axis
img = mpimg.imread(os.path.join(face_folder, f'target_{trace_i}.jpg'))
axs[-1, -(num_col-remain+1)].imshow(img)
axs[-1, -(num_col-remain+1)].axes.xaxis.set_visible(False)
axs[-1, -(num_col-remain+1)].axes.yaxis.set_visible(False)
# Remove redundant axises
for ax in axs[-1, -(num_col-remain):]:
ax.remove()
# Plot 1d distances at top
gs = axs[0, 0].get_gridspec()
# remove the underlying axes
for ax in axs[0, 0:]:
ax.remove()
axbig = fig.add_subplot(gs[0, :])
axbig.plot(run_df["order"], dist, 'ro--')
# Plot points of shown faces
face_order = []
face_points = []
for p in range(len(dist)):
if p % 5 == 0:
face_order.append(run_df["order"][p])
face_points.append(dist[p])
axbig.plot(face_order, face_points, 'bo', label='displayed faces')
axbig.set(xlabel='order', ylabel='distance', title=f'distance for p is {p_dim}')
axbig.axes.xaxis.set_visible(False)
axbig.grid()
axbig.legend()
# Save plot
plt.savefig(os.path.join(face_folder, f"trace_{trace_i}_combo.jpg"))
plt.close()
if kwargs.get("categorical_face_finding"):
xis = run_df[[f'xi_{i}' for i in range(p_dim)]]
dist = np.linalg.norm(np.subtract(xis, true_theta), axis=-1)
face_folder = kwargs.get("face_folder")
num_imgs = len(glob.glob1(face_folder, "target_0*"))
num_col = 5
remain = num_imgs % num_col
num_row = 1 + num_imgs // num_col if not remain else 2 + num_imgs // num_col
fig, axs = plt.subplots(nrows=num_row, ncols=num_col, gridspec_kw={'height_ratios': np.ones(num_row).tolist()})
# Plot recon images
img_idx = 0
for r in range(1, num_row):
for c in range(num_col):
if os.path.exists(os.path.join(face_folder, f'target_{trace_i}_recon_{img_idx}.jpg')):
img = mpimg.imread(os.path.join(face_folder, f'target_{trace_i}_recon_{img_idx}.jpg'))
axs[r, c].imshow(img)
img_idx += 5
axs[r, c].axes.xaxis.set_visible(False)
axs[r, c].axes.yaxis.set_visible(False)
else:
continue
# Plot target image at the last axis
img = mpimg.imread(os.path.join(face_folder, f'target_{trace_i}.jpg'))
axs[-1, -(num_col-remain+1)].imshow(img)
axs[-1, -(num_col-remain+1)].axes.xaxis.set_visible(False)
axs[-1, -(num_col-remain+1)].axes.yaxis.set_visible(False)
# Remove redundant axises
for ax in axs[-1, -(num_col-remain):]:
ax.remove()
# Plot 1d distances at top
gs = axs[0, 0].get_gridspec()
# remove the underlying axes
for ax in axs[0, 0:]:
ax.remove()
axbig = fig.add_subplot(gs[0, :])
axbig.plot(run_df["order"], dist, 'ko--')
# Decorate points in 1d plot
face_order, face_points = [], []
green_order, green_points = [], []
red_order, red_points = [], []
amber_order, amber_points = [], []
for id in range(len(dist)):
if np.array_equal(run_df["observations"][id], [1,0,0]): # Green
green_order.append(run_df["order"][id])
green_points.append(dist[id])
elif np.array_equal(run_df["observations"][id], [0,1,0]): # Red
red_order.append(run_df["order"][id])
red_points.append(dist[id])
else: # Amber
amber_order.append(run_df["order"][id])
amber_points.append(dist[id])
if id % 5 == 0:
face_order.append(run_df["order"][id])
face_points.append(dist[id])
axbig.plot(face_order, face_points, 'kD', markersize=8)
axbig.plot(green_order, green_points, 'g.')
axbig.plot(red_order, red_points, 'r.')
axbig.plot(amber_order, amber_points, 'y.')
axbig.set(xlabel='order', ylabel='distance', title=f'distance for p is {p_dim}')
axbig.axes.xaxis.set_visible(False)
axbig.grid()
# plt.show()
# Save plot
plt.savefig(os.path.join(face_folder, f"trace_{trace_i}_combo.jpg"))
plt.close()
else:
if p_dim == 1:
fig, ax = plt.subplots()
ax.plot(run_df["order"], run_df[f"xi_0"], 'ro--')
ax.plot(T, true_theta, 'bo')
ax.set(xlabel='order', ylabel='location')
ax.grid()
plt.savefig(f"res_dim_1/trace_{trace_i}.png")
plt.close()
elif p_dim == 2:
plot_trace_2d(run_df["xi_0"], run_df["xi_1"], trace_i, true_theta)
elif p_dim == 3:
plot_trace_3d(run_df["xi_0"], run_df["xi_1"], run_df["xi_2"], trace_i, true_theta)
# Save data
run_df.to_csv(f'res_dim_3/trace_{trace_i}.csv') # save trace data
np.save(f'res_dim_3/target_{trace_i}.npy', true_theta) # save target data
elif p_dim > 3:
xis = run_df[[f'xi_{i}' for i in range(p_dim)]]
dist = np.linalg.norm(np.subtract(xis, true_theta), axis=-1)
fig, ax = plt.subplots()
ax.plot(run_df["order"], dist, 'ro--')
ax.set(xlabel='order', ylabel='distance', title=f'distance for p is {p_dim}')
ax.grid()
plt.savefig(f"trace_{trace_i}.png")
plt.close()
def mk_plot_dir(dir_name):
if os.path.exists(dir_name): return
else:
os.makedirs(dir_name)
if __name__ == "__main__":
# # 2D plot demo
# NPOINTS = 30
# true_theta = np.array([[2, 2]])
# xs = np.random.rand(NPOINTS)
# ys = np.random.rand(NPOINTS)
# plot_trace_2d(xs, ys, 0, true_theta)
#
# # 3D plot demo
# NPOINTS = 30
# true_theta = np.array([[0, 0, 0]])
# alpha = np.linspace(.5, 1.5, NPOINTS) * np.pi
# theta = np.linspace(.25, 1.5, NPOINTS) * np.pi
# rad = np.linspace(0, 1, NPOINTS)
# xs = rad * np.sin(theta) * np.cos(alpha)
# ys = rad * np.sin(theta) * np.sin(alpha)
# zs = rad * np.cos(theta)
# plot_trace_3d(xs, ys, zs, 1, true_theta)
# <editor-fold desc="[+] Face plot">
# fig, axs = plt.subplots(nrows=4, ncols=5, figsize=(10,6))
fig, axs = plt.subplots(nrows=4, ncols=5, gridspec_kw={'height_ratios': [1,1,1,1]})
gs = axs[0, 0].get_gridspec()
# remove the underlying axes
for ax in axs[0, 1:]:
ax.remove()
axbig = fig.add_subplot(gs[0, :])
axbig.annotate('Big Axes \nGridSpec[1:, -1]', (0.1, 0.5),
xycoords='axes fraction', va='center')
fig.tight_layout()
plt.show()
# </editor-fold>
| 38.90239 | 146 | 0.598443 |
79499f319389ea63c9924ae6d6656d6fedfd53eb
| 53,434 |
py
|
Python
|
nio/responses.py
|
tkjenll/matrix-nio
|
8ac48ed0fda5da129c008e129305a512e8619cde
|
[
"Apache-2.0"
] | 1 |
2021-11-19T23:41:23.000Z
|
2021-11-19T23:41:23.000Z
|
nio/responses.py
|
METHLAB-LTD/matrix-nio
|
8ac48ed0fda5da129c008e129305a512e8619cde
|
[
"Apache-2.0"
] | null | null | null |
nio/responses.py
|
METHLAB-LTD/matrix-nio
|
8ac48ed0fda5da129c008e129305a512e8619cde
|
[
"Apache-2.0"
] | 1 |
2021-11-19T23:41:26.000Z
|
2021-11-19T23:41:26.000Z
|
# -*- coding: utf-8 -*-
# Copyright © 2018 Damir Jelić <poljar@termina.org.uk>
# Copyright © 2020 Famedly GmbH
#
# Permission to use, copy, modify, and/or distribute this software for
# any purpose with or without fee is hereby granted, provided that the
# above copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import unicode_literals
from builtins import str
from dataclasses import dataclass, field
from datetime import datetime
from functools import wraps
from typing import Any, Dict, Generator, List, Optional, Set, Tuple, Union
from jsonschema.exceptions import SchemaError, ValidationError
from logbook import Logger
from .event_builders import ToDeviceMessage
from .events import (AccountDataEvent, BadEventType, Event, InviteEvent,
ToDeviceEvent, EphemeralEvent)
from .events.presence import PresenceEvent
from .http import TransportResponse
from .log import logger_group
from .schemas import Schemas, validate_json
logger = Logger("nio.responses")
logger_group.add_logger(logger)
__all__ = [
"ContentRepositoryConfigResponse",
"ContentRepositoryConfigError",
"FileResponse",
"DeleteDevicesAuthResponse",
"DeleteDevicesResponse",
"DeleteDevicesError",
"DeletePushRuleError",
"DeletePushRuleResponse",
"Device",
"DeviceList",
"DevicesResponse",
"DevicesError",
"DeviceOneTimeKeyCount",
"DiscoveryInfoError",
"DiscoveryInfoResponse",
"DownloadResponse",
"DownloadError",
"EnablePushRuleResponse",
"EnablePushRuleError",
"ErrorResponse",
"InviteInfo",
"JoinResponse",
"JoinError",
"JoinedMembersResponse",
"JoinedMembersError",
"JoinedRoomsResponse",
"JoinedRoomsError",
"KeysClaimResponse",
"KeysClaimError",
"KeysQueryResponse",
"KeysQueryError",
"KeysUploadResponse",
"KeysUploadError",
"RegisterResponse",
"LoginResponse",
"LoginError",
"LoginInfoResponse",
"LoginInfoError",
"LogoutResponse",
"LogoutError",
"Response",
"RoomBanResponse",
"RoomBanError",
"RoomCreateResponse",
"RoomCreateError",
"RoomDeleteAliasError",
"RoomDeleteAliasResponse",
"RoomInfo",
"RoomInviteResponse",
"RoomInviteError",
"RoomKickResponse",
"RoomKickError",
"RoomLeaveResponse",
"RoomLeaveError",
"RoomForgetResponse",
"RoomForgetError",
"RoomMember",
"RoomMessagesResponse",
"RoomMessagesError",
"RoomGetStateResponse",
"RoomGetStateError",
"RoomGetStateEventResponse",
"RoomGetStateEventError",
"RoomGetEventResponse",
"RoomGetEventError",
"RoomGetVisibilityResponse",
"RoomPutAliasResponse",
"RoomPutStateResponse",
"RoomPutStateError",
"RoomRedactResponse",
"RoomRedactError",
"RoomResolveAliasResponse",
"RoomResolveAliasError",
"RoomSendResponse",
"RoomSendError",
"RoomSummary",
"RoomUnbanResponse",
"RoomUnbanError",
"Rooms",
"SetPushRuleError",
"SetPushRuleResponse",
"SetPushRuleActionsError",
"SetPushRuleActionsResponse",
"ShareGroupSessionResponse",
"ShareGroupSessionError",
"SyncResponse",
"SyncError",
"Timeline",
"UpdateDeviceResponse",
"UpdateDeviceError",
"RoomTypingResponse",
"RoomTypingError",
"RoomReadMarkersResponse",
"RoomReadMarkersError",
"UploadResponse",
"UploadError",
"ProfileGetResponse",
"ProfileGetError",
"ProfileGetDisplayNameResponse",
"ProfileGetDisplayNameError",
"ProfileSetDisplayNameResponse",
"ProfileSetDisplayNameError",
"ProfileGetAvatarResponse",
"ProfileGetAvatarError",
"ProfileSetAvatarResponse",
"ProfileSetAvatarError",
"PresenceGetResponse",
"PresenceGetError",
"PresenceSetResponse",
"PresenceSetError",
"RoomKeyRequestResponse",
"RoomKeyRequestError",
"ThumbnailResponse",
"ThumbnailError",
"ToDeviceResponse",
"ToDeviceError",
"RoomContextResponse",
"RoomContextError",
"UploadFilterError",
"UploadFilterResponse",
"UpdateReceiptMarkerError",
"UpdateReceiptMarkerResponse",
]
def verify(schema, error_class, pass_arguments=True):
def decorator(f):
@wraps(f)
def wrapper(cls, parsed_dict, *args, **kwargs):
try:
logger.info("Validating response schema")
validate_json(parsed_dict, schema)
except (SchemaError, ValidationError) as e:
logger.warn("Error validating response: " + str(e.message))
if pass_arguments:
return error_class.from_dict(parsed_dict, *args, **kwargs)
else:
return error_class.from_dict(parsed_dict)
return f(cls, parsed_dict, *args, **kwargs)
return wrapper
return decorator
@dataclass
class Rooms:
invite: Dict[str, "InviteInfo"] = field()
join: Dict[str, "RoomInfo"] = field()
leave: Dict[str, "RoomInfo"] = field()
@dataclass
class DeviceOneTimeKeyCount:
curve25519: Optional[int] = field()
signed_curve25519: Optional[int] = field()
@dataclass
class DeviceList:
changed: List[str] = field()
left: List[str] = field()
@dataclass
class Timeline:
events: List = field()
limited: bool = field()
prev_batch: Optional[str] = field()
@dataclass
class InviteInfo:
invite_state: List = field()
@dataclass
class RoomSummary:
invited_member_count: Optional[int] = None
joined_member_count: Optional[int] = None
heroes: Optional[List[str]] = None
@dataclass
class UnreadNotifications:
notification_count: Optional[int] = None
highlight_count: Optional[int] = None
@dataclass
class RoomInfo:
timeline: Timeline = field()
state: List = field()
ephemeral: List = field()
account_data: List = field()
summary: Optional[RoomSummary] = None
unread_notifications: Optional[UnreadNotifications] = None
@staticmethod
def parse_account_data(event_dict):
"""Parse the account data dictionary and produce a list of events."""
events = []
for event in event_dict:
events.append(AccountDataEvent.parse_event(event))
return events
@dataclass
class RoomMember:
user_id: str = field()
display_name: str = field()
avatar_url: str = field()
@dataclass
class Device:
id: str = field()
display_name: str = field()
last_seen_ip: str = field()
last_seen_date: datetime = field()
@classmethod
def from_dict(cls, parsed_dict):
date = None
if parsed_dict["last_seen_ts"] is not None:
date = datetime.fromtimestamp(parsed_dict["last_seen_ts"] / 1000)
return cls(
parsed_dict["device_id"],
parsed_dict["display_name"],
parsed_dict["last_seen_ip"],
date
)
@dataclass
class Response:
uuid: str = field(default="", init=False)
start_time: Optional[float] = field(default=None, init=False)
end_time: Optional[float] = field(default=None, init=False)
timeout: int = field(default=0, init=False)
transport_response: Optional[TransportResponse] = field(
init=False, default=None,
)
@property
def elapsed(self):
if not self.start_time or not self.end_time:
return 0
elapsed = self.end_time - self.start_time
return max(0, elapsed - (self.timeout / 1000))
@dataclass
class FileResponse(Response):
"""A response representing a successful file content request.
Attributes:
body (bytes): The file's content in bytes.
content_type (str): The content MIME type of the file,
e.g. "image/png".
filename (str, optional): The file's name returned by the server.
"""
body: bytes = field()
content_type: str = field()
filename: Optional[str] = field()
def __str__(self):
return "{} bytes, content type: {}, filename: {}".format(
len(self.body),
self.content_type,
self.filename
)
@classmethod
def from_data(cls, data, content_type, filename=None):
"""Create a FileResponse from file content returned by the server.
Args:
data (bytes): The file's content in bytes.
content_type (str): The content MIME type of the file,
e.g. "image/png".
"""
raise NotImplementedError()
@dataclass
class ErrorResponse(Response):
message: str = field()
status_code: Optional[str] = None
retry_after_ms: Optional[int] = None
soft_logout: bool = False
def __str__(self) -> str:
if self.status_code and self.message:
e = "{} {}".format(self.status_code, self.message)
elif self.message:
e = self.message
elif self.status_code:
e = "{} unknown error".format(self.status_code)
else:
e = "unknown error"
if self.retry_after_ms:
e = "{} - retry after {}ms".format(e, self.retry_after_ms)
return "{}: {}".format(self.__class__.__name__, e)
@classmethod
def from_dict(cls, parsed_dict: Dict[Any, Any]):
# type: (...) -> ErrorResponse
try:
validate_json(parsed_dict, Schemas.error)
except (SchemaError, ValidationError):
return cls("unknown error")
return cls(
parsed_dict["error"],
parsed_dict["errcode"],
parsed_dict.get("retry_after_ms"),
parsed_dict.get("soft_logout", False),
)
@dataclass
class _ErrorWithRoomId(ErrorResponse):
room_id: str = ""
@classmethod
def from_dict(cls, parsed_dict, room_id):
try:
validate_json(parsed_dict, Schemas.error)
except (SchemaError, ValidationError):
return cls("unknown error")
return cls(
parsed_dict["error"],
parsed_dict["errcode"],
parsed_dict.get("retry_after_ms"),
parsed_dict.get("soft_logout", False),
room_id
)
class LoginError(ErrorResponse):
pass
class LogoutError(ErrorResponse):
pass
class SyncError(ErrorResponse):
pass
class RoomSendError(_ErrorWithRoomId):
pass
class RoomGetStateError(_ErrorWithRoomId):
"""A response representing an unsuccessful room state query."""
pass
class RoomGetStateEventError(_ErrorWithRoomId):
"""A response representing an unsuccessful room state query."""
pass
class RoomGetEventError(ErrorResponse):
"""A response representing an unsuccessful room get event request."""
pass
class RoomPutStateError(_ErrorWithRoomId):
"""A response representing an unsuccessful room state sending request."""
pass
class RoomRedactError(_ErrorWithRoomId):
pass
class RoomResolveAliasError(ErrorResponse):
"""A response representing an unsuccessful room alias query."""
pass
class RoomDeleteAliasError(ErrorResponse):
"""A response representing an unsuccessful room alias delete request."""
pass
class RoomPutAliasError(ErrorResponse):
"""A response representing an unsuccessful room alias put request."""
pass
class RoomGetVisibilityError(ErrorResponse):
"""A response representing an unsuccessful room get visibility request."""
pass
class RoomTypingError(_ErrorWithRoomId):
"""A response representing a unsuccessful room typing request."""
pass
class UpdateReceiptMarkerError(ErrorResponse):
pass
class RoomReadMarkersError(_ErrorWithRoomId):
"""A response representing a unsuccessful room read markers request."""
pass
class RoomKickError(ErrorResponse):
pass
class RoomBanError(ErrorResponse):
pass
class RoomUnbanError(ErrorResponse):
pass
class RoomInviteError(ErrorResponse):
pass
class RoomCreateError(ErrorResponse):
"""A response representing a unsuccessful create room request."""
pass
class JoinError(ErrorResponse):
pass
class RoomLeaveError(ErrorResponse):
pass
class RoomForgetError(_ErrorWithRoomId):
pass
class RoomMessagesError(_ErrorWithRoomId):
pass
class KeysUploadError(ErrorResponse):
pass
class KeysQueryError(ErrorResponse):
pass
class KeysClaimError(_ErrorWithRoomId):
pass
class ContentRepositoryConfigError(ErrorResponse):
"""A response for a unsuccessful content repository config request."""
class UploadError(ErrorResponse):
"""A response representing a unsuccessful upload request."""
class DownloadError(ErrorResponse):
"""A response representing a unsuccessful download request."""
class ThumbnailError(ErrorResponse):
"""A response representing a unsuccessful thumbnail request."""
@dataclass
class ShareGroupSessionError(_ErrorWithRoomId):
"""Response representing unsuccessful group sessions sharing request."""
users_shared_with: Set[Tuple[str, str]] = field(default_factory=set)
@classmethod
def from_dict(cls, parsed_dict, room_id, users_shared_with):
try:
validate_json(parsed_dict, Schemas.error)
except (SchemaError, ValidationError):
return cls("unknown error")
return cls(parsed_dict["error"], parsed_dict["errcode"], room_id,
users_shared_with)
class DevicesError(ErrorResponse):
pass
class DeleteDevicesError(ErrorResponse):
pass
class UpdateDeviceError(ErrorResponse):
pass
class JoinedMembersError(_ErrorWithRoomId):
pass
class JoinedRoomsError(ErrorResponse):
"""A response representing an unsuccessful joined rooms query."""
pass
class ProfileGetError(ErrorResponse):
pass
class ProfileGetDisplayNameError(ErrorResponse):
pass
class ProfileSetDisplayNameError(ErrorResponse):
pass
class ProfileGetAvatarError(ErrorResponse):
pass
class PresenceGetError(ErrorResponse):
"""Response representing a unsuccessful get presence request."""
pass
class PresenceSetError(ErrorResponse):
"""Response representing a unsuccessful set presence request."""
pass
class ProfileSetAvatarError(ErrorResponse):
pass
@dataclass
class DiscoveryInfoError(ErrorResponse):
pass
@dataclass
class DiscoveryInfoResponse(Response):
"""A response for a successful discovery info request.
Attributes:
homeserver_url (str): The base URL of the homeserver corresponding to
the requested domain.
identity_server_url (str, optional): The base URL of the identity
server corresponding to the requested domain, if any.
"""
homeserver_url: str = field()
identity_server_url: Optional[str] = None
@classmethod
@verify(Schemas.discovery_info, DiscoveryInfoError)
def from_dict(
cls, parsed_dict: Dict[str, Any],
) -> Union["DiscoveryInfoResponse", DiscoveryInfoError]:
homeserver_url = parsed_dict["m.homeserver"]["base_url"].rstrip("/")
identity_server_url = parsed_dict.get(
"m.identity_server", {},
).get("base_url", "").rstrip("/") or None
return cls(homeserver_url, identity_server_url)
@dataclass
class RegisterErrorResponse(ErrorResponse):
pass
@dataclass
class RegisterResponse(Response):
user_id: str = field()
device_id: str = field()
access_token: str = field()
def __str__(self) -> str:
return "Registered {}, device id {}.".format(
self.user_id, self.device_id,
)
@classmethod
@verify(Schemas.register, RegisterErrorResponse)
def from_dict(cls, parsed_dict):
return cls(
parsed_dict["user_id"],
parsed_dict["device_id"],
parsed_dict["access_token"],
)
@dataclass
class LoginInfoError(ErrorResponse):
pass
@dataclass
class LoginInfoResponse(Response):
flows: List[str] = field()
@classmethod
@verify(Schemas.login_info, LoginInfoError)
def from_dict(cls, parsed_dict: Dict[Any, Any]):
# type: (...) -> Union[LoginInfoResponse, ErrorResponse]
flow_types = [flow["type"] for flow in parsed_dict["flows"]]
return cls(flow_types)
@dataclass
class LoginResponse(Response):
user_id: str = field()
device_id: str = field()
access_token: str = field()
def __str__(self) -> str:
return "Logged in as {}, device id: {}.".format(
self.user_id, self.device_id
)
@classmethod
@verify(Schemas.login, LoginError)
def from_dict(cls, parsed_dict: Dict[Any, Any]):
# type: (...) -> Union[LoginResponse, ErrorResponse]
return cls(
parsed_dict["user_id"],
parsed_dict["device_id"],
parsed_dict["access_token"],
)
@dataclass
class LogoutResponse(Response):
def __str__(self) -> str:
return "Logged out"
@classmethod
@verify(Schemas.empty, LogoutError)
def from_dict(cls, parsed_dict: Dict[Any, Any]):
# type: (...) -> Union[LogoutResponse, ErrorResponse]
"""Create a response for logout response from server."""
return cls()
@dataclass
class JoinedMembersResponse(Response):
members: List[RoomMember] = field()
room_id: str = field()
@classmethod
@verify(Schemas.joined_members, JoinedMembersError)
def from_dict(
cls,
parsed_dict: Dict[Any, Any],
room_id: str,
):
# type: (...) -> Union[JoinedMembersResponse, ErrorResponse]
members = []
for user_id, user_info in parsed_dict["joined"].items():
user = RoomMember(
user_id,
user_info.get("display_name", None),
user_info.get("avatar_url", None)
)
members.append(user)
return cls(members, room_id)
@dataclass
class JoinedRoomsResponse(Response):
"""A response containing a list of joined rooms.
Attributes:
rooms (List[str]): The rooms joined by the account.
"""
rooms: List[str] = field()
@classmethod
@verify(Schemas.joined_rooms, JoinedRoomsError)
def from_dict(
cls,
parsed_dict: Dict[Any, Any],
):
# type: (...) -> Union[JoinedRoomsResponse, ErrorResponse]
return cls(parsed_dict["joined_rooms"])
@dataclass
class ContentRepositoryConfigResponse(Response):
"""A response for a successful content repository config request.
Attributes:
upload_size (Optional[int]): The maximum file size in bytes for an
upload. If `None`, the limit is unknown.
"""
upload_size: Optional[int] = None
@classmethod
@verify(Schemas.content_repository_config, ContentRepositoryConfigError)
def from_dict(
cls,
parsed_dict: dict,
) -> Union["ContentRepositoryConfigResponse", ErrorResponse]:
return cls(parsed_dict.get("m.upload.size"))
@dataclass
class UploadResponse(Response):
"""A response representing a successful upload request."""
content_uri: str = field()
@classmethod
@verify(Schemas.upload, UploadError)
def from_dict(cls, parsed_dict: Dict[Any, Any]):
# type: (...) -> Union[UploadResponse, ErrorResponse]
return cls(
parsed_dict["content_uri"],
)
@dataclass
class DownloadResponse(FileResponse):
"""A response representing a successful download request."""
@classmethod
def from_data(
cls,
data: bytes,
content_type: str,
filename: Optional[str] = None
):
# type: (...) -> Union[DownloadResponse, DownloadError]
if isinstance(data, bytes):
return cls(body=data, content_type=content_type, filename=filename)
if isinstance(data, dict):
return DownloadError.from_dict(data)
return DownloadError("invalid data")
@dataclass
class ThumbnailResponse(FileResponse):
"""A response representing a successful thumbnail request."""
@classmethod
def from_data(
cls,
data: bytes,
content_type: str,
filename: Optional[str] = None
):
# type: (...) -> Union[ThumbnailResponse, ThumbnailError]
if not content_type.startswith("image/"):
return ThumbnailError(f"invalid content type: {content_type}")
if isinstance(data, bytes):
return cls(body=data, content_type=content_type, filename=filename)
if isinstance(data, dict):
return ThumbnailError.from_dict(data)
return ThumbnailError("invalid data")
@dataclass
class RoomEventIdResponse(Response):
event_id: str = field()
room_id: str = field()
@staticmethod
def create_error(parsed_dict, _room_id):
return ErrorResponse.from_dict(parsed_dict)
@classmethod
def from_dict(
cls,
parsed_dict: Dict[Any, Any],
room_id: str,
):
# type: (...) -> Union[RoomEventIdResponse, ErrorResponse]
try:
validate_json(parsed_dict, Schemas.room_event_id)
except (SchemaError, ValidationError):
return cls.create_error(parsed_dict, room_id)
return cls(parsed_dict["event_id"], room_id)
class RoomSendResponse(RoomEventIdResponse):
@staticmethod
def create_error(parsed_dict, room_id):
return RoomSendError.from_dict(parsed_dict, room_id)
@dataclass
class RoomGetStateResponse(Response):
"""A response containing the state of a room.
Attributes:
events (List): The events making up the room state.
room_id (str): The ID of the room.
"""
events: List = field()
room_id: str = field()
@staticmethod
def create_error(parsed_dict, room_id):
return RoomGetStateError.from_dict(parsed_dict, room_id)
@classmethod
def from_dict(
cls,
parsed_dict: List[Dict[Any, Any]],
room_id: str,
):
# type: (...) -> Union[RoomGetStateResponse, RoomGetStateError]
try:
validate_json(parsed_dict, Schemas.room_state)
except (SchemaError, ValidationError):
return cls.create_error(parsed_dict, room_id)
return cls(parsed_dict, room_id)
@dataclass
class RoomGetStateEventResponse(Response):
"""A response containing the content of a specific bit of room state.
Attributes:
content (Dict): The content of the state event.
event_type (str): The type of the state event.
state_key (str): The key of the state event.
room_id (str): The ID of the room that the state event comes from.
"""
content: Dict = field()
event_type: str = field()
state_key: str = field()
room_id: str = field()
@staticmethod
def create_error(parsed_dict, room_id):
return RoomGetStateEventError.from_dict(parsed_dict, room_id)
@classmethod
def from_dict(
cls,
parsed_dict: Dict[str, Any],
event_type: str,
state_key: str,
room_id: str,
) -> Union["RoomGetStateEventResponse", RoomGetStateEventError]:
return cls(parsed_dict, event_type, state_key, room_id)
class RoomGetEventResponse(Response):
"""A response indicating successful room get event request.
Attributes:
event (Event): The requested event.
"""
event: Event = field()
@classmethod
@verify(
Schemas.room_event,
RoomGetEventError,
pass_arguments=False,
)
def from_dict(
cls,
parsed_dict: Dict[str, Any]
) -> Union["RoomGetEventResponse", RoomGetEventError]:
event = Event.parse_event(parsed_dict)
resp = cls()
resp.event = event
return resp
class RoomPutStateResponse(RoomEventIdResponse):
"""A response indicating successful sending of room state."""
@staticmethod
def create_error(parsed_dict, room_id):
return RoomPutStateError.from_dict(parsed_dict, room_id)
class RoomRedactResponse(RoomEventIdResponse):
@staticmethod
def create_error(parsed_dict, room_id):
return RoomRedactError.from_dict(parsed_dict, room_id)
@dataclass
class RoomResolveAliasResponse(Response):
"""A response containing the result of resolving an alias.
Attributes:
room_alias (str): The alias of the room.
room_id (str): The resolved id of the room.
servers (List[str]): Servers participating in the room.
"""
room_alias: str = field()
room_id: str = field()
servers: List[str] = field()
@classmethod
@verify(
Schemas.room_resolve_alias,
RoomResolveAliasError,
pass_arguments=False,
)
def from_dict(
cls,
parsed_dict: Dict[Any, Any],
room_alias: str,
):
# type: (...) -> Union[RoomResolveAliasResponse, ErrorResponse]
room_id = parsed_dict["room_id"]
servers = parsed_dict["servers"]
return cls(room_alias, room_id, servers)
@dataclass
class RoomDeleteAliasResponse(Response):
"""A response containing the result of deleting an alias.
"""
room_alias: str = field()
@classmethod
def from_dict(cls, parsed_dict: Dict[Any, Any], room_alias: str):
# type: (...) -> Union[RoomDeleteAliasResponse, ErrorResponse]
return cls(room_alias)
@dataclass
class RoomPutAliasResponse(Response):
"""A response containing the result of adding an alias.
"""
room_alias: str = field()
room_id: str = field()
@classmethod
def from_dict(cls, parsed_dict: Dict[Any, Any], room_alias: str, room_id: str):
# type: (...) -> Union[RoomPutAliasResponse, ErrorResponse]
return cls(room_alias, room_id)
@dataclass
class RoomGetVisibilityResponse(Response):
"""A response containing the result of a get visibility request.
"""
room_id: str = field()
visibility: str = field()
@classmethod
@verify(
Schemas.room_get_visibility,
RoomGetVisibilityError,
pass_arguments=False,
)
def from_dict(cls, parsed_dict: Dict[Any, Any], room_id: str):
# type: (...) -> Union[RoomGetVisibilityResponse, ErrorResponse]
visibility = parsed_dict["visibility"]
return cls(room_id, visibility)
class EmptyResponse(Response):
@staticmethod
def create_error(parsed_dict):
return ErrorResponse.from_dict(parsed_dict)
@classmethod
def from_dict(cls, parsed_dict: Dict[Any, Any]):
# type: (...) -> Union[Any, ErrorResponse]
try:
validate_json(parsed_dict, Schemas.empty)
except (SchemaError, ValidationError):
return cls.create_error(parsed_dict)
return cls()
@dataclass
class _EmptyResponseWithRoomId(Response):
room_id: str = field()
@staticmethod
def create_error(parsed_dict, room_id):
return _ErrorWithRoomId.from_dict(parsed_dict, room_id)
@classmethod
def from_dict(cls, parsed_dict: Dict[Any, Any], room_id: str):
# type: (...) -> Union[Any, ErrorResponse]
try:
validate_json(parsed_dict, Schemas.empty)
except (SchemaError, ValidationError):
return cls.create_error(parsed_dict, room_id)
return cls(room_id)
class RoomKickResponse(EmptyResponse):
@staticmethod
def create_error(parsed_dict):
return RoomKickError.from_dict(parsed_dict)
class RoomBanResponse(EmptyResponse):
@staticmethod
def create_error(parsed_dict):
return RoomBanError.from_dict(parsed_dict)
class RoomUnbanResponse(EmptyResponse):
@staticmethod
def create_error(parsed_dict):
return RoomUnbanError.from_dict(parsed_dict)
class RoomInviteResponse(EmptyResponse):
@staticmethod
def create_error(parsed_dict):
return RoomInviteError.from_dict(parsed_dict)
@dataclass
class ShareGroupSessionResponse(Response):
"""Response representing a successful group sessions sharing request.
Attributes:
room_id (str): The room id of the group session.
users_shared_with (Set[Tuple[str, str]]): A set containing a tuple of
user id device id pairs with whom we shared the group session in
this request.
"""
room_id: str = field()
users_shared_with: set = field()
@classmethod
@verify(Schemas.empty, ShareGroupSessionError)
def from_dict(
cls,
_: Dict[Any, Any],
room_id: str,
users_shared_with: Set[Tuple[str, str]],
):
# type: (...) -> Union[ShareGroupSessionResponse, ErrorResponse]
"""Create a response from the json dict the server returns.
Args:
parsed_dict (Dict): The dict containing the raw json response.
room_id (str): The room id of the room to which the group session
belongs to.
users_shared_with (Set[Tuple[str, str]]): A set containing a tuple
of user id device id pairs with whom we shared the group
session in this request.
"""
return cls(room_id, users_shared_with)
class RoomTypingResponse(_EmptyResponseWithRoomId):
"""A response representing a successful room typing request."""
@staticmethod
def create_error(parsed_dict, room_id):
return RoomTypingError.from_dict(parsed_dict, room_id)
class UpdateReceiptMarkerResponse(EmptyResponse):
@staticmethod
def create_error(parsed_dict):
return UpdateReceiptMarkerError.from_dict(parsed_dict)
class RoomReadMarkersResponse(_EmptyResponseWithRoomId):
"""A response representing a successful room read markers request."""
@staticmethod
def create_error(parsed_dict, room_id):
return RoomTypingError.from_dict(parsed_dict, room_id)
@dataclass
class DeleteDevicesAuthResponse(Response):
session: str = field()
flows: Dict = field()
params: Dict = field()
@classmethod
@verify(Schemas.delete_devices, DeleteDevicesError)
def from_dict(
cls,
parsed_dict: Dict[Any, Any],
):
# type: (...) -> Union[DeleteDevicesAuthResponse, ErrorResponse]
return cls(
parsed_dict["session"],
parsed_dict["flows"],
parsed_dict["params"]
)
class DeleteDevicesResponse(EmptyResponse):
@staticmethod
def create_error(parsed_dict):
return DeleteDevicesError.from_dict(parsed_dict)
@dataclass
class RoomMessagesResponse(Response):
room_id: str = field()
chunk: List[Union[Event, BadEventType]] = field()
start: str = field()
end: str = field()
@classmethod
@verify(Schemas.room_messages, RoomMessagesError)
def from_dict(
cls,
parsed_dict: Dict[Any, Any],
room_id: str,
):
# type: (...) -> Union[RoomMessagesResponse, ErrorResponse]
chunk: List[Union[Event, BadEventType]] = []
chunk = SyncResponse._get_room_events(parsed_dict["chunk"])
return cls(room_id, chunk, parsed_dict["start"], parsed_dict["end"])
@dataclass
class RoomIdResponse(Response):
room_id: str = field()
@staticmethod
def create_error(parsed_dict):
return ErrorResponse.from_dict(parsed_dict)
@classmethod
def from_dict(cls, parsed_dict: Dict[Any, Any]):
# type: (...) -> Union[RoomIdResponse, ErrorResponse]
try:
validate_json(parsed_dict, Schemas.room_id)
except (SchemaError, ValidationError):
return cls.create_error(parsed_dict)
return cls(parsed_dict["room_id"])
@dataclass
class RoomCreateResponse(Response):
"""Response representing a successful create room request."""
room_id: str = field()
@classmethod
@verify(
Schemas.room_create_response, RoomCreateError, pass_arguments=False,
)
def from_dict(
cls,
parsed_dict: Dict[Any, Any],
):
# type: (...) -> Union[RoomCreateResponse, RoomCreateError]
return cls(parsed_dict["room_id"])
class JoinResponse(RoomIdResponse):
@staticmethod
def create_error(parsed_dict):
return JoinError.from_dict(parsed_dict)
class RoomLeaveResponse(EmptyResponse):
@staticmethod
def create_error(parsed_dict):
return RoomLeaveError.from_dict(parsed_dict)
class RoomForgetResponse(_EmptyResponseWithRoomId):
"""Response representing a successful forget room request."""
@staticmethod
def create_error(parsed_dict, room_id):
return RoomForgetError.from_dict(parsed_dict, room_id)
@dataclass
class KeysUploadResponse(Response):
curve25519_count: int = field()
signed_curve25519_count: int = field()
@classmethod
@verify(Schemas.keys_upload, KeysUploadError)
def from_dict(cls, parsed_dict: Dict[Any, Any]):
# type: (...) -> Union[KeysUploadResponse, ErrorResponse]
counts = parsed_dict["one_time_key_counts"]
return cls(counts["curve25519"], counts["signed_curve25519"])
@dataclass
class KeysQueryResponse(Response):
device_keys: Dict = field()
failures: Dict = field()
changed: Dict[str, Dict[str, Any]] = field(
init=False, default_factory=dict,
)
@classmethod
@verify(Schemas.keys_query, KeysQueryError)
def from_dict(cls, parsed_dict: Dict[Any, Any]):
# type: (...) -> Union[KeysQueryResponse, ErrorResponse]
device_keys = parsed_dict["device_keys"]
failures = parsed_dict["failures"]
return cls(device_keys, failures)
@dataclass
class KeysClaimResponse(Response):
one_time_keys: Dict[Any, Any] = field()
failures: Dict[Any, Any] = field()
room_id: str = ""
@classmethod
@verify(Schemas.keys_claim, KeysClaimError)
def from_dict(
cls,
parsed_dict: Dict[Any, Any],
room_id: str = "",
):
# type: (...) -> Union[KeysClaimResponse, ErrorResponse]
one_time_keys = parsed_dict["one_time_keys"]
failures = parsed_dict["failures"]
return cls(one_time_keys, failures, room_id)
@dataclass
class DevicesResponse(Response):
devices: List[Device] = field()
@classmethod
@verify(Schemas.devices, DevicesError)
def from_dict(cls, parsed_dict: Dict[Any, Any]):
# type: (...) -> Union[DevicesResponse, ErrorResponse]
devices = []
for device_dict in parsed_dict["devices"]:
try:
device = Device.from_dict(device_dict)
except ValueError:
continue
devices.append(device)
return cls(devices)
@dataclass
class RoomKeyRequestError(ErrorResponse):
"""Response representing a failed room key request."""
pass
@dataclass
class RoomKeyRequestResponse(Response):
"""Response representing a successful room key request.
Attributes:
request_id (str): The id of the that uniquely identifies this key
request that was requested, if we receive a to_device event it will
contain the same request id.
session_id (str): The id of the session that we requested.
room_id (str): The id of the room that the session belongs to.
algorithm (str): The encryption algorithm of the session.
"""
request_id: str = field()
session_id: str = field()
room_id: str = field()
algorithm: str = field()
@classmethod
@verify(Schemas.empty, RoomKeyRequestError, False)
def from_dict(cls, _, request_id, session_id, room_id, algorithm):
"""Create a RoomKeyRequestResponse from a json response.
Args:
parsed_dict (Dict): The dictionary containing the json response.
request_id (str): The id of that uniquely identifies this key
request that was requested, if we receive a to_device event
it will contain the same request id.
session_id (str): The id of the session that we requested.
room_id (str): The id of the room that the session belongs to.
algorithm (str): The encryption algorithm of the session.
"""
return cls(request_id, session_id, room_id, algorithm)
class UpdateDeviceResponse(EmptyResponse):
@staticmethod
def create_error(parsed_dict):
return UpdateDeviceError.from_dict(parsed_dict)
@dataclass
class ProfileGetResponse(Response):
"""Response representing a successful get profile request.
Attributes:
displayname (str, optional): The display name of the user.
None if the user doesn't have a display name.
avatar_url (str, optional): The matrix content URI for the user's
avatar. None if the user doesn't have an avatar.
other_info (dict): Contains any other information returned for the
user's profile.
"""
displayname: Optional[str] = None
avatar_url: Optional[str] = None
other_info: Dict[Any, Any] = field(default_factory=dict)
def __str__(self) -> str:
return "Display name: {}, avatar URL: {}, other info: {}".format(
self.displayname,
self.avatar_url,
self.other_info,
)
@classmethod
@verify(Schemas.get_profile, ProfileGetError)
def from_dict(cls, parsed_dict: Dict[Any, Any]):
# type: (...) -> Union[ProfileGetResponse, ErrorResponse]
return cls(
parsed_dict.get("displayname"),
parsed_dict.get("avatar_url"),
{k: v for k, v in parsed_dict.items()
if k not in ("displayname", "avatar_url")},
)
@dataclass
class ProfileGetDisplayNameResponse(Response):
"""Response representing a successful get display name request.
Attributes:
displayname (str, optional): The display name of the user.
None if the user doesn't have a display name.
"""
displayname: Optional[str] = None
def __str__(self) -> str:
return "Display name: {}".format(self.displayname)
@classmethod
@verify(Schemas.get_displayname, ProfileGetDisplayNameError)
def from_dict(
cls,
parsed_dict: (Dict[Any, Any]),
):
# type: (...) -> Union[ProfileGetDisplayNameResponse, ErrorResponse]
return cls(parsed_dict.get("displayname"))
class ProfileSetDisplayNameResponse(EmptyResponse):
@staticmethod
def create_error(parsed_dict):
return ProfileSetDisplayNameError.from_dict(parsed_dict)
@dataclass
class ProfileGetAvatarResponse(Response):
"""Response representing a successful get avatar request.
Attributes:
avatar_url (str, optional): The matrix content URI for the user's
avatar. None if the user doesn't have an avatar.
"""
avatar_url: Optional[str] = None
def __str__(self) -> str:
return "Avatar URL: {}".format(self.avatar_url)
@classmethod
@verify(Schemas.get_avatar, ProfileGetAvatarError)
def from_dict(
cls,
parsed_dict: (Dict[Any, Any]),
):
# type: (...) -> Union[ProfileGetAvatarResponse, ErrorResponse]
return cls(parsed_dict.get("avatar_url"))
class ProfileSetAvatarResponse(EmptyResponse):
@staticmethod
def create_error(parsed_dict):
return ProfileSetAvatarError.from_dict(parsed_dict)
@dataclass
class PresenceGetResponse(Response):
"""Response representing a successful get presence request.
Attributes:
user_id (str): The user´s id
presence (str): The user's presence state. One of: ["online",
"offline", "unavailable"]
last_active_ago (int, optional): The length of time in milliseconds
since an action was performed by this user. None if not set.
currently_active (bool, optional): Whether the user is currently
active. None if not set.
status_msg (str, optional): The state message for this user. None if
not set.
"""
user_id: str
presence: str
last_active_ago: Optional[int]
currently_active: Optional[bool]
status_msg: Optional[str]
@classmethod
@verify(Schemas.get_presence, PresenceGetError, pass_arguments=False)
def from_dict(
cls,
parsed_dict: Dict[Any, Any],
user_id: str
) -> Union["PresenceGetResponse", PresenceGetError]:
return cls(
user_id,
parsed_dict.get("presence", "offline"),
parsed_dict.get("last_active_ago"),
parsed_dict.get("currently_active"),
parsed_dict.get("status_msg")
)
class PresenceSetResponse(EmptyResponse):
"""Response representing a successful set presence request."""
@staticmethod
def create_error(parsed_dict):
return PresenceSetError.from_dict(parsed_dict)
@dataclass
class ToDeviceError(ErrorResponse):
"""Response representing a unsuccessful room key request."""
to_device_message: Optional[ToDeviceMessage] = None
@classmethod
def from_dict(cls, parsed_dict, message):
try:
validate_json(parsed_dict, Schemas.error)
except (SchemaError, ValidationError):
return cls("unknown error", None, message)
return cls(parsed_dict["error"], parsed_dict["errcode"], message)
@dataclass
class ToDeviceResponse(Response):
"""Response representing a successful room key request."""
to_device_message: ToDeviceMessage = field()
@classmethod
@verify(Schemas.empty, ToDeviceError)
def from_dict(cls, parsed_dict, message):
"""Create a ToDeviceResponse from a json response."""
return cls(message)
@dataclass
class RoomContextError(_ErrorWithRoomId):
"""Response representing a unsuccessful room context request."""
@dataclass
class RoomContextResponse(Response):
"""Room event context response.
This Response holds a number of events that happened just before and after
a specified event.
Attributes:
room_id(str): The room id of the room which the events belong to.
start(str): A token that can be used to paginate backwards with.
end(str): A token that can be used to paginate forwards with.
events_before(List[Event]): A list of room events that happened just
before the requested event, in reverse-chronological order.
event(Event): Details of the requested event.
events_after(List[Event]): A list of room events that happened just
after the requested event, in chronological order.
state(List[Event]): The state of the room at the last event returned.
"""
room_id: str = field()
start: str = field()
end: str = field()
event: Optional[Union[Event, BadEventType]] = field()
events_before: List[Union[Event, BadEventType]] = field()
events_after: List[Union[Event, BadEventType]] = field()
state: List[Union[Event, BadEventType]] = field()
@classmethod
@verify(Schemas.room_context, RoomContextError)
def from_dict(
cls,
parsed_dict: Dict[Any, Any],
room_id: str,
):
# type: (...) -> Union[RoomContextResponse, ErrorResponse]
events_before = SyncResponse._get_room_events(
parsed_dict["events_before"]
)
events_after = SyncResponse._get_room_events(
parsed_dict["events_after"]
)
event = Event.parse_event(parsed_dict["event"])
state = SyncResponse._get_room_events(
parsed_dict["state"]
)
return cls(room_id, parsed_dict["start"], parsed_dict["end"],
event, events_before, events_after, state)
@dataclass
class SyncResponse(Response):
next_batch: str = field()
rooms: Rooms = field()
device_key_count: DeviceOneTimeKeyCount = field()
device_list: DeviceList = field()
to_device_events: List[ToDeviceEvent] = field()
presence_events: List[PresenceEvent] = field()
account_data_events: List[AccountDataEvent] = field(default_factory=list)
def __str__(self) -> str:
result = []
for room_id, room_info in self.rooms.join.items():
room_header = " Messages for room {}:\n ".format(room_id)
messages = []
for event in room_info.timeline.events:
messages.append(str(event))
room_message = room_header + "\n ".join(messages)
result.append(room_message)
if len(self.to_device_events) > 0:
result.append(" Device messages:")
for event in self.to_device_events:
result.append(" {}".format(event))
body = "\n".join(result)
string = ("Sync response until batch: {}:\n{}").format(
self.next_batch, body
)
return string
@staticmethod
def _get_room_events(parsed_dict: List[Dict[Any, Any]]) -> List[Union[Event, BadEventType]]:
events: List[Union[Event, BadEventType]] = []
for event_dict in parsed_dict:
event = Event.parse_event(event_dict)
if event:
events.append(event)
return events
@staticmethod
def _get_to_device(parsed_dict: Dict[Any, Any]):
# type: (...) -> List[ToDeviceEvent]
events: List[ToDeviceEvent] = []
for event_dict in parsed_dict.get("events", []):
event = ToDeviceEvent.parse_event(event_dict)
if event:
events.append(event)
return events
@staticmethod
def _get_timeline(parsed_dict: Dict[Any, Any]) -> Timeline:
validate_json(parsed_dict, Schemas.room_timeline)
events = SyncResponse._get_room_events(parsed_dict.get("events", []))
return Timeline(
events, parsed_dict["limited"], parsed_dict.get("prev_batch")
)
@staticmethod
def _get_state(parsed_dict: Dict[Any, Any]) -> List[Union[Event, BadEventType]]:
validate_json(parsed_dict, Schemas.sync_room_state)
events = SyncResponse._get_room_events(
parsed_dict.get("events", []),
)
return events
@staticmethod
def _get_invite_state(parsed_dict):
validate_json(parsed_dict, Schemas.sync_room_state)
events = []
for event_dict in parsed_dict.get("events", []):
event = InviteEvent.parse_event(event_dict)
if event:
events.append(event)
return events
@staticmethod
def _get_ephemeral_events(parsed_dict):
events = []
for event_dict in parsed_dict:
event = EphemeralEvent.parse_event(event_dict)
if event:
events.append(event)
return events
@staticmethod
def _get_join_info(
state_events: List[Any],
timeline_events: List[Any],
prev_batch: Optional[str],
limited: bool,
ephemeral_events: List[Any],
summary_events: Dict[str, Any],
unread_notification_events: Dict[str, Any],
account_data_events: List[Any],
) -> RoomInfo:
state = SyncResponse._get_room_events(state_events)
events = SyncResponse._get_room_events(timeline_events)
timeline = Timeline(events, limited, prev_batch)
ephemeral_event_list = SyncResponse._get_ephemeral_events(
ephemeral_events
)
summary = RoomSummary(
summary_events.get("m.invited_member_count"),
summary_events.get("m.joined_member_count"),
summary_events.get("m.heroes"),
)
unread_notifications = UnreadNotifications(
unread_notification_events.get("notification_count"),
unread_notification_events.get("highlight_count"),
)
account_data = RoomInfo.parse_account_data(account_data_events)
return RoomInfo(
timeline,
state,
ephemeral_event_list,
account_data,
summary,
unread_notifications,
)
@staticmethod
def _get_room_info(parsed_dict: Dict[Any, Any]) -> Rooms:
joined_rooms: Dict[str, RoomInfo] = {}
invited_rooms: Dict[str, InviteInfo] = {}
left_rooms: Dict[str, RoomInfo] = {}
for room_id, room_dict in parsed_dict.get("invite", {}).items():
state = SyncResponse._get_invite_state(room_dict["invite_state"])
invite_info = InviteInfo(state)
invited_rooms[room_id] = invite_info
for room_id, room_dict in parsed_dict.get("leave", {}).items():
state = SyncResponse._get_state(room_dict["state"])
timeline = SyncResponse._get_timeline(room_dict["timeline"])
leave_info = RoomInfo(timeline, state, [], [])
left_rooms[room_id] = leave_info
for room_id, room_dict in parsed_dict.get("join", {}).items():
join_info = SyncResponse._get_join_info(
room_dict["state"]["events"],
room_dict["timeline"]["events"],
room_dict["timeline"].get("prev_batch"),
room_dict["timeline"]["limited"],
room_dict["ephemeral"]["events"],
room_dict.get("summary", {}),
room_dict.get("unread_notifications", {}),
room_dict["account_data"]["events"],
)
joined_rooms[room_id] = join_info
return Rooms(invited_rooms, joined_rooms, left_rooms)
@staticmethod
def _get_presence(parsed_dict) -> List[PresenceEvent]:
presence_events = []
for presence_dict in parsed_dict.get("presence", {}).get("events", []):
presence_events.append(PresenceEvent.from_dict(presence_dict))
return presence_events
@staticmethod
def _get_account_data(
parsed_dict: Dict[str, Any],
) -> Generator[AccountDataEvent, None, None]:
for ev_dict in parsed_dict.get("account_data", {}).get("events", []):
yield AccountDataEvent.parse_event(ev_dict)
@classmethod
@verify(Schemas.sync, SyncError, False)
def from_dict(
cls,
parsed_dict: Dict[Any, Any],
):
# type: (...) -> Union[SyncResponse, ErrorResponse]
to_device = cls._get_to_device(parsed_dict.get("to_device", {}))
key_count_dict = parsed_dict.get("device_one_time_keys_count", {})
key_count = DeviceOneTimeKeyCount(
key_count_dict.get("curve25519"),
key_count_dict.get("signed_curve25519")
)
devices = DeviceList(
parsed_dict.get("device_lists", {}).get("changed", []),
parsed_dict.get("device_lists", {}).get("left", []),
)
presence_events = SyncResponse._get_presence(parsed_dict)
rooms = SyncResponse._get_room_info(parsed_dict.get("rooms", {}))
return SyncResponse(
parsed_dict["next_batch"],
rooms,
key_count,
devices,
to_device,
presence_events,
list(SyncResponse._get_account_data(parsed_dict)),
)
class UploadFilterError(ErrorResponse):
pass
@dataclass
class UploadFilterResponse(Response):
"""Response representing a successful filter upload request.
Attributes:
filter_id (str): A filter ID that may be used in
future requests to restrict which events are returned to the
client.
"""
filter_id: str = field()
@classmethod
@verify(Schemas.upload_filter, UploadFilterError)
def from_dict(
cls, parsed_dict: Dict[Any, Any],
) -> Union["UploadFilterResponse", UploadFilterError]:
return cls(parsed_dict["filter_id"])
class WhoamiError(ErrorResponse):
pass
@dataclass
class WhoamiResponse(Response):
user_id: str = field()
@classmethod
@verify(Schemas.whoami, WhoamiError)
def from_dict(
cls, parsed_dict: Dict[Any, Any],
) -> Union["WhoamiResponse", WhoamiError]:
return cls(parsed_dict["user_id"])
@dataclass
class SetPushRuleResponse(EmptyResponse):
@staticmethod
def create_error(parsed_dict: Dict[str, Any]):
return SetPushRuleError.from_dict(parsed_dict)
class SetPushRuleError(ErrorResponse):
pass
@dataclass
class DeletePushRuleResponse(EmptyResponse):
@staticmethod
def create_error(parsed_dict: Dict[str, Any]):
return DeletePushRuleError.from_dict(parsed_dict)
class DeletePushRuleError(ErrorResponse):
pass
@dataclass
class EnablePushRuleResponse(EmptyResponse):
@staticmethod
def create_error(parsed_dict: Dict[str, Any]):
return EnablePushRuleError.from_dict(parsed_dict)
class EnablePushRuleError(ErrorResponse):
pass
@dataclass
class SetPushRuleActionsResponse(EmptyResponse):
@staticmethod
def create_error(parsed_dict: Dict[str, Any]):
return SetPushRuleActionsError.from_dict(parsed_dict)
class SetPushRuleActionsError(ErrorResponse):
pass
| 27.844711 | 96 | 0.657896 |
79499fbffbd5e6dd955306e5be5c6c951cd40791
| 394 |
py
|
Python
|
examples/__init__.py
|
rishavpramanik/mealpy
|
d4a4d5810f15837764e4ee61517350fef3dc92b3
|
[
"MIT"
] | null | null | null |
examples/__init__.py
|
rishavpramanik/mealpy
|
d4a4d5810f15837764e4ee61517350fef3dc92b3
|
[
"MIT"
] | null | null | null |
examples/__init__.py
|
rishavpramanik/mealpy
|
d4a4d5810f15837764e4ee61517350fef3dc92b3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Created by "Thieu" at 10:07, 02/03/2022 ----------%
# Email: nguyenthieu2102@gmail.com %
# Github: https://github.com/thieu1995 %
# --------------------------------------------------%
| 65.666667 | 132 | 0.253807 |
7949a2e9b1aa25b53d4552e2044ecdd7b2e275c7
| 6,900 |
py
|
Python
|
src/solutions/common/bizz/bulk_invite.py
|
goubertbrent/oca-backend
|
b9f59cc02568aecb55d4b54aec05245790ea25fd
|
[
"Apache-2.0"
] | null | null | null |
src/solutions/common/bizz/bulk_invite.py
|
goubertbrent/oca-backend
|
b9f59cc02568aecb55d4b54aec05245790ea25fd
|
[
"Apache-2.0"
] | null | null | null |
src/solutions/common/bizz/bulk_invite.py
|
goubertbrent/oca-backend
|
b9f59cc02568aecb55d4b54aec05245790ea25fd
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import logging
import time
from types import NoneType
from google.appengine.ext import deferred, db
from mcfw.rpc import returns, arguments
from rogerthat.rpc import users
from rogerthat.service.api import system
from rogerthat.to.service import UserDetailsTO
from rogerthat.translations import DEFAULT_LANGUAGE
from rogerthat.utils import now
from rogerthat.utils.app import create_app_user_by_email
from rogerthat.utils.channel import send_message
from rogerthat.utils.models import reconstruct_key
from rogerthat.utils.transactions import run_in_xg_transaction
from solutions import translate as common_translate
from solutions.common.bizz import SERVICE_AUTOCONNECT_INVITE_TAG
from solutions.common.bizz.inbox import create_solution_inbox_message
from solutions.common.bizz.messaging import send_inbox_forwarders_message
from solutions.common.bizz.settings import get_service_info
from solutions.common.dal import get_solution_settings
from solutions.common.models import RestaurantInvite, SolutionSettings, SolutionInboxMessage
from solutions.common.to import SolutionInboxMessageTO
@returns(NoneType)
@arguments(service_user=users.User, service_identity=unicode, emails=[unicode], message=unicode, app_id=unicode)
def bulk_invite(service_user, service_identity, emails, message, app_id=None):
def trans(app_id):
emailz = list(emails)
counter = 0
while emailz:
counter += 1
if counter < 4:
email = emailz.pop()
deferred.defer(_create_restaurant_invite, service_user, service_identity,
email, message, app_id, _transactional=True)
else:
deferred.defer(bulk_invite, service_user, service_identity,
emailz, message, app_id, _transactional=True)
break
if not app_id:
app_id = system.get_info().app_ids[0]
db.run_in_transaction(trans, app_id)
@returns(NoneType)
@arguments(service_user=users.User, service_identity=unicode, invitee=unicode, message=unicode, app_id=unicode)
def _create_restaurant_invite(service_user, service_identity, invitee, message, app_id):
def trans():
# 1: Check if invitee has been send from service in the last month
sln_settings = get_solution_settings(service_user)
db_key = RestaurantInvite.create_key(service_user, service_identity, invitee, sln_settings.solution)
old_invite = db.get(db_key)
t = time.time()
# 2: Store in datastore
if old_invite:
if old_invite.status == RestaurantInvite.STATUS_ACCEPTED:
return
if not old_invite.epoch < t - 30 * 24 * 60 * 60:
return
else:
old_invite.epoch = t
old_invite.put()
else:
invite = RestaurantInvite(key=db_key)
invite.epoch = t
invite.put()
# 3: Do invite
deferred.defer(_restaurant_invite, service_user, service_identity, invitee, message, unicode(db_key), sln_settings, app_id,
_transactional=True)
xg_on = db.create_transaction_options(xg=True)
db.run_in_transaction_options(xg_on, trans)
@returns(NoneType)
@arguments(service_user=users.User, service_identity=unicode, invitee=unicode, message=unicode, tag=unicode, sln_settings=SolutionSettings, app_id=unicode)
def _restaurant_invite(service_user, service_identity, invitee, message, tag, sln_settings, app_id):
from rogerthat.service.api.friends import invite as invite_api_call
from rogerthat.bizz.friends import CanNotInviteFriendException
language = sln_settings.main_language or DEFAULT_LANGUAGE
users.set_user(service_user)
try:
invite_api_call(invitee, None, message, language, tag, service_identity, app_id)
except CanNotInviteFriendException:
logging.debug('%s is already connected with %s', invitee, service_user)
pass
finally:
users.clear_user()
@returns(NoneType)
@arguments(service_user=users.User, service_identity=unicode, tag=unicode, email=unicode, result=unicode, user_details=[UserDetailsTO])
def bulk_invite_result(service_user, service_identity, tag, email, result, user_details):
if not tag:
logging.exception("Expected tag in bulk_invite_result")
return
if tag in (SERVICE_AUTOCONNECT_INVITE_TAG,):
return
try:
key = db.Key(tag)
except db.BadKeyError:
logging.info('Tag is no db.Key: %s. Ignoring...', tag)
return
def trans():
invite = db.get(reconstruct_key(key))
if not invite:
logging.error("Invite object not found in datastore")
return
save_message = False
if "accepted" == result:
invite.status = RestaurantInvite.STATUS_ACCEPTED
save_message = True
else:
invite.status = RestaurantInvite.STATUS_REJECTED
invite.put()
return save_message
save_message = run_in_xg_transaction(trans)
if save_message:
now_ = now()
sln_settings = get_solution_settings(service_user)
msg = common_translate(sln_settings.main_language, 'if-accepted-invitation',
if_name=user_details[0].name,
if_email=user_details[0].email)
message = create_solution_inbox_message(
service_user, service_identity, SolutionInboxMessage.CATEGORY_BULK_INVITE, None, False, user_details, now_, msg, False)
app_user = create_app_user_by_email(user_details[0].email, user_details[0].app_id)
send_inbox_forwarders_message(service_user, service_identity, app_user, msg, {
'if_name': user_details[0].name,
'if_email': user_details[0].email
}, message_key=message.solution_inbox_message_key, reply_enabled=message.reply_enabled, send_reminder=False)
service_info = get_service_info(service_user, service_identity)
send_message(service_user, u"solutions.common.messaging.update",
service_identity=service_identity,
message=SolutionInboxMessageTO.fromModel(message, sln_settings, service_info, True).to_dict())
| 42.592593 | 155 | 0.708841 |
7949a2fbf4bf59334f5d269f90370e3d17e5be01
| 3,291 |
py
|
Python
|
covidfaq/scrape/convert_scrape.py
|
dialoguemd/covidfaq
|
a493ed72e07b83cdf736684ce1cc9ee47b9bfb3f
|
[
"MIT"
] | 3 |
2020-06-22T17:05:22.000Z
|
2021-07-18T20:51:57.000Z
|
covidfaq/scrape/convert_scrape.py
|
dialoguemd/covidfaq
|
a493ed72e07b83cdf736684ce1cc9ee47b9bfb3f
|
[
"MIT"
] | 25 |
2020-03-21T14:58:05.000Z
|
2021-04-02T14:27:28.000Z
|
covidfaq/scrape/convert_scrape.py
|
dialoguemd/covidfaq
|
a493ed72e07b83cdf736684ce1cc9ee47b9bfb3f
|
[
"MIT"
] | 6 |
2020-03-21T23:33:02.000Z
|
2020-07-27T15:12:22.000Z
|
import argparse
import json
import logging
import os
logger = logging.getLogger(__name__)
def collapse_jsons(json_files):
collapsed = {}
for json_file in json_files:
with open(json_file, "r", encoding="utf-8") as fh:
faq = json.load(fh)
for k, v in faq.items():
if k != "document_URL":
collapsed[k] = v
return collapsed
def json_to_passages(collapsed_json, passage_id_start_idx=0):
passages = []
passage_id = passage_id_start_idx
for entry in collapsed_json.values():
passage = {
"passage_id": passage_id,
"source": entry["source"],
"uri": entry["url"],
"time_of_scrape": entry["time"],
"reference_type": entry["type"],
"reference": {
"page_title": "".join(entry["nested_title"]).strip(),
"section_headers": [entry["title"]],
"section_content": "".join(entry["plaintext"]).strip(),
"section_raw_html": entry["html"],
"section_converted_html": entry["converted_html"],
"selected_span": None,
},
}
passage_id += 1
passages.append(passage)
logger.info("generated {} passages from the scrape provided".format(len(passages)))
return passages
def dump_passages(passages, fname):
with open(fname, "w", encoding="utf-8") as f:
json.dump({"passages": passages}, f, indent=6, ensure_ascii=False)
def get_scraped_json_filenames(scrapes_path, source, lang, is_faq=True):
matches = [source, lang, ".json"]
if is_faq:
matches += ["faq"]
return [
os.path.join(scrapes_path, f)
for f in os.listdir(scrapes_path)
if all(match in f for match in matches)
]
else:
return [
os.path.join(scrapes_path, f)
for f in os.listdir(scrapes_path)
if all(match in f for match in matches) and "faq" not in f
]
def scrapes_to_passages(scrapes_path, source, lang, is_faq):
"""Writes the scrapes to the proper format in output_file"""
json_filenames = get_scraped_json_filenames(scrapes_path, source, lang, is_faq)
collapsed_json = collapse_jsons(json_filenames)
passages = json_to_passages(collapsed_json)
return passages
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input",
help="input json (list of files to convert, can use a regex, e.g. quebec-en-faq*.json ",
required=True,
nargs="+",
)
parser.add_argument("--output-passages", help="output file in bert_reranker format")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
# Load json scrapes and collapse them to a single json
collapsed_json = collapse_jsons(args.input)
logger.info(
"collapsed {} files into a single dict with {} elements".format(
len(args.input), len(collapsed_json)
)
)
# generate passages from the scrape to the appropriate format
passages = json_to_passages(collapsed_json)
if args.output_passages is not None:
dump_passages(passages, args.output_passages)
if __name__ == "__main__":
main()
| 30.757009 | 96 | 0.614099 |
7949a3f6da293abdd85512209242bae76ab4d816
| 13,672 |
py
|
Python
|
tensorflow/contrib/eager/python/evaluator.py
|
tianyapiaozi/tensorflow
|
fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a
|
[
"Apache-2.0"
] | 71 |
2017-05-25T16:02:15.000Z
|
2021-06-09T16:08:08.000Z
|
tensorflow/contrib/eager/python/evaluator.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 133 |
2017-04-26T16:49:49.000Z
|
2019-10-15T11:39:26.000Z
|
tensorflow/contrib/eager/python/evaluator.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 31 |
2018-09-11T02:17:17.000Z
|
2021-12-15T10:33:35.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class Evaluator holds Metrics for the duration of an evaluation run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib.eager.python import datasets
from tensorflow.contrib.eager.python import metrics
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import summary_ops_v2 as summary_ops
class Evaluator(object):
"""This holds and updates Metrics for the duration of a single eval run.
Usage:
evaluator = my_model.evaluator() # or MyEvaluator(my_model)
for example_batch in ...:
evaluator(example_batch)
results = evaluator.all_metric_results(optional_summary_logdir)
Or, if you are getting your examples from a tf.data.Dataset, you can use
the evaluate_on_dataset() method.
Implementers of Evaluators should
(a) Call `track_metric()` and/or `track_evaluator()` in __init__().
(b) Override the `call()` method. It will be passed the output of the
model's `eval_data()` method, and should call its contained metrics
(treating them as callables) and any child Evaluators (using their
call() method to avoid calling eval_data() again).
Args:
model: A `Model` object with an `eval_data()` method.
"""
def __init__(self, model):
self._model = model
self._metrics = {}
self._evaluators = {}
if not context.executing_eagerly():
self.call = function.defun(self.call)
# ---- API for users ----
def __call__(self, *args, **kwargs):
"""Update metrics with a minibatch of input examples.
Args:
*args:
**kwargs: Arguments representing an input mini-batch of examples to
pass to self.model.eval_data().
Returns:
The op to execute or None if executing eagerly.
"""
return self.call(self._model.eval_data(*args, **kwargs))
def init_variables(self):
"""Return an op for initializing all contained uninitialized variables.
Only for graph execution. Should be called after variables are created
in the first execution of __call__().
Returns:
An op.
Raises:
RuntimeError: if eager execution is enabled.
@compatibility(eager)
Only for graph execution.
@end_compatibility
"""
if context.executing_eagerly():
raise RuntimeError("Evaluator.init_variables() not needed when "
"eager execution is enabled.")
return control_flow_ops.group([m.init_variables() for _, m in self.metrics])
def all_metric_results(self, summary_logdir=None):
"""Computes results for all contained metrics.
Args:
summary_logdir: An optional string. If specified, metric results
will be written as summaries to this directory.
Returns:
A `dict` mapping string names to tensors.
"""
if summary_logdir is None:
with summary_ops.never_record_summaries():
return self._all_metric_results()
else:
def f():
with summary_ops.create_file_writer(
summary_logdir).as_default(), summary_ops.always_record_summaries():
return self._all_metric_results()
if context.executing_eagerly():
return f()
else:
return function.defun(f)()
def _all_metric_results(self):
"""Implementation of `all_metric_results` in the summary context."""
results = {}
for name, metric in six.iteritems(self._metrics):
results[name] = metric.result()
for prefix, evaluator in six.iteritems(self._evaluators):
for name, metric in six.iteritems(evaluator._metrics): # pylint: disable=protected-access
results[prefix + "/" + name] = metric.result()
return results
def evaluate_on_dataset(self, dataset, *args, **kwargs):
"""Convenience method for performing an eval on a Dataset.
Args:
dataset: Dataset object with the input data to evaluate on.
*args:
**kwargs: Optional additional arguments to __call__(), except
`summary_logdir`: if specified, metrics will be written as summaries
to this directory.
Returns:
@compatibility(eager)
When eager execution is enabled, this returns the result of performing
an evaluation as a dictionary. With graph execution, this returns a tuple
(init_op, call_op, results_op) which may be executed using this code:
```python
sess.run(init_op)
try:
while True:
sess.run(call_op)
except tf.errors.OutOfRangeError:
pass
return sess.run(results_op) # A dictionary
# equivalently:
return evaluator.run_evaluation(init_op, call_op, results_op, sess=sess)
```
@end_compatibility
"""
summary_logdir = kwargs.pop("summary_logdir", None)
if context.executing_eagerly():
for example in datasets.Iterator(dataset):
self.__call__(example, *args, **kwargs)
return self.all_metric_results(summary_logdir)
# Graph construction
call_op = self.__call__(dataset.make_one_shot_iterator().get_next(), *args,
**kwargs)
init_op = self.init_variables()
results_op = self.all_metric_results(summary_logdir)
return (init_op, call_op, results_op)
@staticmethod
def run_evaluation(init_op, call_op, results_op, sess=None):
"""Convenience method for running the ops returned by evaluate_on_dataset.
Args:
init_op: An op that initializes/resets evaluation state.
call_op: An op that updates evaluation state on a mini-batch of examples.
Must generate an tf.errors.OutOfRangeError when done.
results_op: A dictionary of tensors that compute the final evaluation
results from the evaluation state.
sess: The Session to run the evaluation in. Defaults to the default
Session.
Returns:
A dictionary of values, parallel to results_op.
Raises:
RuntimeError: if eager execution is enabled.
@compatibility(eager)
Only for graph execution.
@end_compatibility
"""
if context.executing_eagerly():
raise RuntimeError("Evaluator.run_evaluation() not supported when "
"eager execution is enabled.")
sess = sess or ops.get_default_session()
sess.run(init_op)
try:
while True:
sess.run(call_op)
except errors_impl.OutOfRangeError:
pass
return sess.run(results_op)
# ---- To be implemented by descendants ---
def call(self, eval_data):
"""Update metrics using the output of self.model.
Note: This function is executed as a graph function in graph mode.
This means:
a) Operations on the same resource are executed in textual order.
This should make it easier to do things like add the updated
value of a variable to another, for example.
b) You don't need to worry about collecting the update ops to execute.
All update ops added to the graph by this function will be executed.
As a result, code should generally work the same way with graph or
eager execution.
Args:
eval_data: The output of self.model.eval_data() on a mini-batch of
examples.
"""
raise NotImplementedError("Evaluators must define a call member function.")
# ---- For use by descendants ---
@property
def model(self):
return self._model
def track_metric(self, metric):
"""Add a Metric to be tracked.
Metrics can only be tracked by one `Evaluator`. Metrics must be
tracked or they will not appear in `all_metric_results()`.
Args:
metric: A `Metric` object.
Returns:
The `metric` passed into this function.
Raises:
RuntimeError: If called before __init__.
TypeError: If `metric` is not of the correct type.
ValueError: If there is a name collision between Metrics or `metric`
has already been added to another `Evaluator`.
"""
if not hasattr(self, "_metrics"):
raise RuntimeError(
"Need to call Evaluator.__init__ before adding metrics")
if not isinstance(metric, metrics.Metric):
raise TypeError(
"Evaluator.track_metric() passed type %s, not a tfe.metrics.Metric" %
(type(metric),))
if metric.name in self._metrics:
if metric is self._metrics[metric.name]:
return metric
raise ValueError(
"Attempt to add two Metrics with the name '%s' to the same Evaluator "
"'%s'" % (metric.name, self.name))
# pylint: disable=protected-access
if hasattr(metric, "_added_to_an_evaluator"):
raise ValueError("Metric %s already added to Evaluator %s" %
(metric.name, metric._added_to_an_evaluator))
metric._added_to_an_evaluator = self.__class__.__name__
# pylint: enable=protected-access
self._metrics[metric.name] = metric
return metric
def track_evaluator(self, prefix, evaluator):
"""Add a contained `Evaluator`.
This is for delegating to another `Evaluator`, e.g. for when you have a
model with multiple heads. Users should manually invoke the child
`Evaluator`'s `call` method from their `call` method.
Args:
prefix: A string. Metrics from `evaluator` are exported with this
prefix and a '/'.
evaluator: An `Evaluator` object.
Returns:
The value of `evaluator` passed into this function.
Raises:
RuntimeError: If called before __init__.
TypeError: If `evaluator` is not of the correct type.
ValueError: If an `Evaluator` has already been added with that `prefix`.
"""
if not hasattr(self, "_evaluators"):
raise RuntimeError(
"Need to call Evaluator.__init__ before adding evaluators")
if not isinstance(evaluator, Evaluator):
raise TypeError(
"Evaluator.track_evaluator() passed type %s, not a tfe.Evaluator." %
(type(evaluator),))
if prefix in self._evaluators:
if evaluator is self._evaluators[prefix]:
return evaluator
raise RuntimeError(
"Attempt to add two Evaluators with the same prefix '%s'." % prefix)
self._evaluators[prefix] = evaluator
return evaluator
@property
def metric_variables(self):
v = []
for metric in six.itervalues(self._metrics):
v += metric.variables
for evaluator in six.itervalues(self._evaluators):
v += evaluator.metric_variables
return v
@property
def metrics(self):
"""Returns a list of (prefix, metric) pairs."""
m = []
for metric in six.itervalues(self._metrics):
m.append(("", metric))
for prefix, evaluator in six.iteritems(self._evaluators):
m += [(prefix + "/" + p, m) for p, m in evaluator.metrics]
return m
class SparseSoftmaxEvaluator(Evaluator):
"""Evaluator for a sparse softmax model.
Computes a standard set of metrics for single-label, multi-class
models.
Args:
model: A `SparseSoftmaxModel` object or a `Model` whose `eval_data()`
method produces a `dict` containing values for the loss, true
label, predicted class, and optional weights.
loss_key: Optional key for looking up the value of the loss in the
`eval_data()` dict. Defaults to "loss".
label_key: Optional key for looking up the value of the label in the
`eval_data()` dict. Defaults to "label".
predicted_class_key: Optional key for looking up the value of the
predicted class in the `eval_data()` dict. Defaults to "predicted_class".
weights_key: Optional key for looking up the value of the weights
in the `eval_data()` dict. Defaults to "weights". Note that weights
are optional, and default to 1 if not present in `eval_data`.
"""
def __init__(self, model, loss_key="loss", label_key="label",
predicted_class_key="predicted_class", weights_key="weights"):
super(SparseSoftmaxEvaluator, self).__init__(model)
# TODO(josh11b): Expand this to include everything from the standard
# SparseSoftmax Head.
self.avg_loss = self.track_metric(metrics.Mean("Avg Loss"))
self.accuracy = self.track_metric(metrics.Accuracy())
self.loss_key = loss_key
self.label_key = label_key
self.predicted_class_key = predicted_class_key
self.weights_key = weights_key
def call(self, eval_data):
"""Update metrics for `eval_data` dict (described above)."""
weights = eval_data.get(self.weights_key, None)
if weights is None:
self.avg_loss(eval_data[self.loss_key])
self.accuracy(eval_data[self.label_key],
eval_data[self.predicted_class_key])
else:
self.avg_loss(eval_data[self.loss_key], weights=weights)
self.accuracy(eval_data[self.label_key],
eval_data[self.predicted_class_key],
weights=weights)
| 36.654155 | 96 | 0.684391 |
7949a41509759ff35a5b27c6e39e494eb8ab73d7
| 2,096 |
py
|
Python
|
extensions/.stubs/clrclasses/__clrclasses__/System/Media/__init__.py
|
vicwjb/Pycad
|
7391cd694b7a91ad9f9964ec95833c1081bc1f84
|
[
"MIT"
] | 1 |
2020-03-25T03:27:24.000Z
|
2020-03-25T03:27:24.000Z
|
extensions/.stubs/clrclasses/__clrclasses__/System/Media/__init__.py
|
vicwjb/Pycad
|
7391cd694b7a91ad9f9964ec95833c1081bc1f84
|
[
"MIT"
] | null | null | null |
extensions/.stubs/clrclasses/__clrclasses__/System/Media/__init__.py
|
vicwjb/Pycad
|
7391cd694b7a91ad9f9964ec95833c1081bc1f84
|
[
"MIT"
] | null | null | null |
from __clrclasses__.System import EventHandler as _n_0_t_0
from __clrclasses__.System.ComponentModel import Component as _n_1_t_0
from __clrclasses__.System.ComponentModel import IComponent as _n_1_t_1
from __clrclasses__.System.ComponentModel import AsyncCompletedEventHandler as _n_1_t_2
from __clrclasses__.System.IO import Stream as _n_2_t_0
from __clrclasses__.System.Runtime.Serialization import ISerializable as _n_3_t_0
import typing
class SoundPlayer(_n_1_t_0, _n_1_t_1, _n_3_t_0):
@property
def IsLoadCompleted(self) -> bool:"""IsLoadCompleted { get; } -> bool"""
@property
def LoadTimeout(self) -> int:"""LoadTimeout { get; set; } -> int"""
@property
def SoundLocation(self) -> str:"""SoundLocation { get; set; } -> str"""
@property
def Stream(self) -> _n_2_t_0:"""Stream { get; set; } -> Stream"""
@property
def Tag(self) -> object:"""Tag { get; set; } -> object"""
@property
def LoadCompleted(self) -> _n_1_t_2:
"""LoadCompleted Event: AsyncCompletedEventHandler"""
@property
def SoundLocationChanged(self) -> _n_0_t_0:
"""SoundLocationChanged Event: EventHandler"""
@property
def StreamChanged(self) -> _n_0_t_0:
"""StreamChanged Event: EventHandler"""
def __init__(self, stream: _n_2_t_0) -> SoundPlayer:...
def __init__(self, soundLocation: str) -> SoundPlayer:...
def __init__(self) -> SoundPlayer:...
def Load(self):...
def LoadAsync(self):...
def Play(self):...
def PlayLooping(self):...
def PlaySync(self):...
def Stop(self):...
class SystemSound(object):
def Play(self):...
class SystemSounds(object):
@property
def Asterisk(self) -> SystemSound:"""Asterisk { get; } -> SystemSound"""
@property
def Beep(self) -> SystemSound:"""Beep { get; } -> SystemSound"""
@property
def Exclamation(self) -> SystemSound:"""Exclamation { get; } -> SystemSound"""
@property
def Hand(self) -> SystemSound:"""Hand { get; } -> SystemSound"""
@property
def Question(self) -> SystemSound:"""Question { get; } -> SystemSound"""
| 41.92 | 87 | 0.676527 |
7949a423d406c26f49a6ef631947026a6f579f79
| 1,858 |
py
|
Python
|
app/pathfinder.py
|
Pulotum/BattleSnake2019
|
87b53fd38ad158b73c3eeb9a651d5339edf103cf
|
[
"MIT"
] | null | null | null |
app/pathfinder.py
|
Pulotum/BattleSnake2019
|
87b53fd38ad158b73c3eeb9a651d5339edf103cf
|
[
"MIT"
] | 1 |
2019-11-06T23:57:51.000Z
|
2019-11-06T23:57:51.000Z
|
app/pathfinder.py
|
Pulotum/BattleSnake
|
87b53fd38ad158b73c3eeb9a651d5339edf103cf
|
[
"MIT"
] | null | null | null |
import returnMap
import returnHazards
import mapCoordinates
def find_path(data, start, goal):
width = data['board']['width']
height = data['board']['height']
map = returnMap.returnMap(data)
# dist and visited arrays for visited and unvisited vertices
dist = [10000 for x in range(width*height)]
vertices = [i for i in range(width*height)]
visited = [-1 for x in range(width*height)]
# remove unreachable vertices
hazards = returnHazards.returnHazards(map)
for hazard in hazards:
if goal == hazard:
return False
val = mapCoordinates.mapToVertex(hazard, width)
vertices.remove(val)
# set initial distance to 0 (need conversion method)
start_index = mapCoordinates.mapToVertex(start)
dist[start_index] = 0
while len(vertices) > 0:
# set u as vertex with min distance
u = min_index(vertices)
# check for end condition
coord = mapCoordinates.VertexToMap(u)
if coord['x'] == goal['x'] and coord['y'] == goal['y']:
path = []
if (visited[u] or (coord['x'] == start['x'] and coord['y'] == start['y'])):
while u is not -1:
move = mapCoordinates.VertexToMap(u)
[move] + path
u = visited[u]
return path
# remove u from vertices
vertices.remove(u)
neighbours = mapCoordinates.neighbours(u)
for neighbour in neighbours:
alt = dist[u] + 1
if alt < dist[neighbour]:
dist[neighbour] = alt
visited[neighbour] = u
return None
def distance(start, goal):
dist_x = abs(start['x'] - goal['x'])
dist_y = abs(start['y'] - goal['y'])
return dist_x + dist_y
def min_index(num_list):
return num_list.index(min(num_list))
| 29.492063 | 87 | 0.583961 |
7949a471ecdfabe8772a8ec1beedf01c56e025af
| 1,273 |
py
|
Python
|
examples/units_and_coordinates/planet_locations.py
|
fluxtransport/sunpy
|
351d3edca97e779179f367670292c95574c7a222
|
[
"BSD-2-Clause"
] | 1 |
2021-07-03T14:08:05.000Z
|
2021-07-03T14:08:05.000Z
|
examples/units_and_coordinates/planet_locations.py
|
fluxtransport/sunpy
|
351d3edca97e779179f367670292c95574c7a222
|
[
"BSD-2-Clause"
] | null | null | null |
examples/units_and_coordinates/planet_locations.py
|
fluxtransport/sunpy
|
351d3edca97e779179f367670292c95574c7a222
|
[
"BSD-2-Clause"
] | null | null | null |
"""
===================================
Getting the location of the planets
===================================
How to get the position of planetary bodies im the solar system using
`astropy's solar system ephemeris <http://docs.astropy.org/en/stable/coordinates/solarsystem.html#solar-system-ephemerides>`__ information and SunPy.
"""
import matplotlib.pyplot as plt
from astropy.time import Time
from sunpy.coordinates import get_body_heliographic_stonyhurst
##############################################################################
# Lets grab the positions of each of the planets in Heliographic Stonyhurst
# coordinates.
obstime = Time('2014-05-15T07:54:00.005')
planet_list = ['earth', 'venus', 'mars', 'mercury', 'jupiter', 'neptune', 'uranus', 'sun']
planet_coord = [get_body_heliographic_stonyhurst(
this_planet, time=obstime) for this_planet in planet_list]
##############################################################################
# Let's plot the results. Remember the Sun is at the center of this coordinate
# system.
fig = plt.figure()
ax = plt.subplot(projection='polar')
for this_planet, this_coord in zip(planet_list, planet_coord):
ax.plot(this_coord.lon.to('rad'), this_coord.radius, 'o', label=this_planet)
ax.legend()
plt.show()
| 39.78125 | 149 | 0.628437 |
7949a4adf673f5c810c5944cdfd7ae2475e44266
| 7,276 |
py
|
Python
|
wordpress/api.py
|
DivvyHQ/wp-api-python
|
6028d265c00f5fb6db0aa10520d649c6696d0dd0
|
[
"MIT"
] | null | null | null |
wordpress/api.py
|
DivvyHQ/wp-api-python
|
6028d265c00f5fb6db0aa10520d649c6696d0dd0
|
[
"MIT"
] | 1 |
2018-07-20T13:46:59.000Z
|
2018-10-31T17:51:14.000Z
|
wordpress/api.py
|
DivvyHQ/wp-api-python
|
6028d265c00f5fb6db0aa10520d649c6696d0dd0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Wordpress API Class
"""
from __future__ import unicode_literals
import logging
from json import dumps as jsonencode
from wordpress.auth import BasicAuth, OAuth, OAuth_3Leg, NoAuth
from wordpress.helpers import StrUtils, UrlUtils
from wordpress.transport import API_Requests_Wrapper
__title__ = "wordpress-api"
class API(object):
""" API Class """
def __init__(self, url, consumer_key, consumer_secret, **kwargs):
self.logger = logging.getLogger(__name__)
self.requester = API_Requests_Wrapper(url=url, **kwargs)
auth_kwargs = dict(
requester=self.requester,
consumer_key=consumer_key,
consumer_secret=consumer_secret,
)
auth_kwargs.update(kwargs)
auth_class = OAuth
if kwargs.get('basic_auth'):
auth_class = BasicAuth
elif kwargs.get('oauth1a_3leg'):
auth_class = OAuth_3Leg
elif kwargs.get('no_auth'):
auth_class = NoAuth
if kwargs.get('version', '').startswith('wc') and kwargs.get('oauth1a_3leg'):
self.logger.warn("WooCommerce JSON Api does not seem to support 3leg")
self.auth = auth_class(**auth_kwargs)
@property
def url(self):
return self.requester.url
@property
def timeout(self):
return self.requester.timeout
@property
def namespace(self):
return self.requester.api
@property
def version(self):
return self.requester.api_version
@property
def verify_ssl(self):
return self.requester.verify_ssl
@property
def is_ssl(self):
return self.requester.is_ssl
@property
def consumer_key(self):
return self.auth.consumer_key
@property
def consumer_secret(self):
return self.auth.consumer_secret
@property
def callback(self):
return self.auth.callback
def request_post_mortem(self, response=None):
"""
Attempt to diagnose what went wrong in a request
"""
reason = None
remedy = None
response_json = {}
try:
response_json = response.json()
except ValueError:
pass
# import pudb; pudb.set_trace()
request_body = {}
request_url = ""
if hasattr(response, 'request'):
if hasattr(response.request, 'url'):
request_url = response.request.url
if hasattr(response.request, 'body'):
request_body = response.request.body
if 'code' in response_json or 'message' in response_json:
reason = u" - ".join([
unicode(response_json.get(key)) for key in ['code', 'message', 'data'] \
if key in response_json
])
if 'code' == 'rest_user_invalid_email':
remedy = "Try checking the email %s doesn't already exist" % \
request_body.get('email')
elif 'code' == 'json_oauth1_consumer_mismatch':
remedy = "Try deleting the cached credentials at %s" % \
self.auth.creds_store
elif 'code' == 'woocommerce_rest_cannot_view':
if not self.auth.query_string_auth:
remedy = "Try enabling query_string_auth"
else:
remedy = (
"This error is super generic and can be caused by just "
"about anything. Here are some things to try: \n"
" - Check that the account which as assigned to your "
"oAuth creds has the correct access level\n"
" - Enable logging and check for error messages in "
"wp-content and wp-content/uploads/wc-logs\n"
" - Check that your query string parameters are valid\n"
" - Make sure your server is not messing with authentication headers\n"
" - Try a different endpoint\n"
" - Try enabling HTTPS and using basic authentication\n"
)
response_headers = {}
if hasattr(response, 'headers'):
response_headers = response.headers
if not reason:
requester_api_url = self.requester.api_url
if hasattr(response, 'links') and response.links:
links = response.links
first_link_key = list(links)[0]
header_api_url = links[first_link_key].get('url', '')
if header_api_url:
header_api_url = StrUtils.eviscerate(header_api_url, '/')
if header_api_url and requester_api_url\
and header_api_url != requester_api_url:
reason = "hostname mismatch. %s != %s" % (
header_api_url, requester_api_url
)
header_url = StrUtils.eviscerate(header_api_url, '/')
header_url = StrUtils.eviscerate(header_url, self.requester.api)
header_url = StrUtils.eviscerate(header_url, '/')
remedy = "try changing url to %s" % header_url
msg = "API call to %s returned \nCODE: %s\nRESPONSE:%s \nHEADERS: %s\nREQ_BODY:%s" % (
request_url,
unicode(response.status_code),
UrlUtils.beautify_response(response),
unicode(response_headers),
unicode(request_body.encode('utf-8'))[:1000]
)
if reason:
msg += "\nBecause of %s" % reason
if remedy:
msg += "\n%s" % remedy
raise UserWarning(msg)
def __request(self, method, endpoint, data, **kwargs):
""" Do requests """
endpoint_url = self.requester.endpoint_url(endpoint)
endpoint_url = self.auth.get_auth_url(endpoint_url, method, **kwargs)
auth = self.auth.get_auth()
content_type = kwargs.get('headers', {}).get('content-type', 'application/json')
if data is not None and content_type.startswith('application/json'):
data = jsonencode(data, ensure_ascii=False).encode('utf-8')
response = self.requester.request(
method=method,
url=endpoint_url,
auth=auth,
data=data,
**kwargs
)
if response.status_code not in [200, 201, 202]:
self.request_post_mortem(response)
return response
# TODO add kwargs option for headers
def get(self, endpoint, **kwargs):
""" Get requests """
return self.__request("GET", endpoint, None, **kwargs)
def post(self, endpoint, data, **kwargs):
""" POST requests """
return self.__request("POST", endpoint, data, **kwargs)
def put(self, endpoint, data, **kwargs):
""" PUT requests """
return self.__request("PUT", endpoint, data, **kwargs)
def delete(self, endpoint, **kwargs):
""" DELETE requests """
return self.__request("DELETE", endpoint, None, **kwargs)
def options(self, endpoint, **kwargs):
""" OPTIONS requests """
return self.__request("OPTIONS", endpoint, None, **kwargs)
| 33.376147 | 95 | 0.574079 |
7949a4e3925ae45f67d1099e076495f2842afb16
| 5,058 |
py
|
Python
|
websecurityscanner/noxfile.py
|
hugovk/google-cloud-python
|
b387134827dbc3be0e1b431201e0875798002fda
|
[
"Apache-2.0"
] | 1 |
2019-03-26T21:44:51.000Z
|
2019-03-26T21:44:51.000Z
|
websecurityscanner/noxfile.py
|
hugovk/google-cloud-python
|
b387134827dbc3be0e1b431201e0875798002fda
|
[
"Apache-2.0"
] | 40 |
2019-07-16T10:04:48.000Z
|
2020-01-20T09:04:59.000Z
|
websecurityscanner/noxfile.py
|
hugovk/google-cloud-python
|
b387134827dbc3be0e1b431201e0875798002fda
|
[
"Apache-2.0"
] | 2 |
2019-07-18T00:05:31.000Z
|
2019-11-27T14:17:22.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by synthtool. DO NOT EDIT!
from __future__ import absolute_import
import os
import shutil
import nox
LOCAL_DEPS = (os.path.join("..", "api_core"), os.path.join("..", "core"))
BLACK_VERSION = "black==19.3b0"
BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"]
if os.path.exists("samples"):
BLACK_PATHS.append("samples")
@nox.session(python="3.7")
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install("flake8", BLACK_VERSION, *LOCAL_DEPS)
session.run("black", "--check", *BLACK_PATHS)
session.run("flake8", "google", "tests")
@nox.session(python="3.6")
def blacken(session):
"""Run black.
Format code to uniform standard.
This currently uses Python 3.6 due to the automated Kokoro run of synthtool.
That run uses an image that doesn't have 3.6 installed. Before updating this
check the state of the `gcp_ubuntu_config` we use for that Kokoro run.
"""
session.install(BLACK_VERSION)
session.run("black", *BLACK_PATHS)
@nox.session(python="3.7")
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install("docutils", "pygments")
session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
def default(session):
# Install all test dependencies, then install this package in-place.
session.install("mock", "pytest", "pytest-cov")
for local_dep in LOCAL_DEPS:
session.install("-e", local_dep)
session.install("-e", ".")
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
"--cov=google.cloud",
"--cov=tests.unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=0",
os.path.join("tests", "unit"),
*session.posargs,
)
@nox.session(python=["2.7", "3.5", "3.6", "3.7", "3.8"])
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python=["2.7", "3.7"])
def system(session):
"""Run the system test suite."""
system_test_path = os.path.join("tests", "system.py")
system_test_folder_path = os.path.join("tests", "system")
# Sanity check: Only run tests if the environment variable is set.
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
session.skip("Credentials must be set via environment variable")
system_test_exists = os.path.exists(system_test_path)
system_test_folder_exists = os.path.exists(system_test_folder_path)
# Sanity check: only run tests if found.
if not system_test_exists and not system_test_folder_exists:
session.skip("System tests were not found")
# Use pre-release gRPC for system tests.
session.install("--pre", "grpcio")
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install("mock", "pytest")
for local_dep in LOCAL_DEPS:
session.install("-e", local_dep)
session.install("-e", "../test_utils/")
session.install("-e", ".")
# Run py.test against the system tests.
if system_test_exists:
session.run("py.test", "--quiet", system_test_path, *session.posargs)
if system_test_folder_exists:
session.run("py.test", "--quiet", system_test_folder_path, *session.posargs)
@nox.session(python="3.7")
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=100")
session.run("coverage", "erase")
@nox.session(python="3.7")
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
session.install("sphinx", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
| 31.416149 | 84 | 0.653618 |
7949a58fe6ea4495f40bf1fb98bc2818e79a23ba
| 5,347 |
py
|
Python
|
voicefixer/base.py
|
anonymous20211004/iclr2022-vf
|
01464bab64812a299d3e63fbf280ccb8e3c5ab9c
|
[
"MIT"
] | 1 |
2022-02-21T07:49:20.000Z
|
2022-02-21T07:49:20.000Z
|
voicefixer/base.py
|
anonymous20211004/iclr2022-vf
|
01464bab64812a299d3e63fbf280ccb8e3c5ab9c
|
[
"MIT"
] | null | null | null |
voicefixer/base.py
|
anonymous20211004/iclr2022-vf
|
01464bab64812a299d3e63fbf280ccb8e3c5ab9c
|
[
"MIT"
] | null | null | null |
import librosa.display
from voicefixer.tools.pytorch_util import *
from voicefixer.tools.wav import *
from voicefixer.restorer.model import VoiceFixer as voicefixer_fe
import os
EPS=1e-8
class VoiceFixer():
def __init__(self):
self._model = voicefixer_fe(channels=2, sample_rate=44100)
self._model = self._model.load_from_checkpoint(os.path.join(os.path.expanduser('~'), ".cache/voicefixer/analysis_module/checkpoints/epoch=15_trimed_bn.ckpt"))
self._model.eval()
def _load_wav_energy(self, path, sample_rate, threshold=0.95):
wav_10k, _ = librosa.load(path, sr=sample_rate)
stft = np.log10(np.abs(librosa.stft(wav_10k))+1.0)
fbins = stft.shape[0]
e_stft = np.sum(stft, axis=1)
for i in range(e_stft.shape[0]):
e_stft[-i-1] = np.sum(e_stft[:-i-1])
total = e_stft[-1]
for i in range(e_stft.shape[0]):
if(e_stft[i] < total*threshold):continue
else: break
return wav_10k, int((sample_rate//2) * (i/fbins))
def _load_wav(self, path, sample_rate, threshold=0.95):
wav_10k, _ = librosa.load(path, sr=sample_rate)
return wav_10k
def _amp_to_original_f(self, mel_sp_est, mel_sp_target, cutoff=0.2):
freq_dim = mel_sp_target.size()[-1]
mel_sp_est_low, mel_sp_target_low = mel_sp_est[..., 5:int(freq_dim * cutoff)], mel_sp_target[..., 5:int(freq_dim * cutoff)]
energy_est, energy_target = torch.mean(mel_sp_est_low, dim=(2, 3)), torch.mean(mel_sp_target_low, dim=(2, 3))
amp_ratio = energy_target / energy_est
return mel_sp_est * amp_ratio[..., None, None], mel_sp_target
def _trim_center(self, est, ref):
diff = np.abs(est.shape[-1] - ref.shape[-1])
if (est.shape[-1] == ref.shape[-1]):
return est, ref
elif (est.shape[-1] > ref.shape[-1]):
min_len = min(est.shape[-1], ref.shape[-1])
est, ref = est[..., int(diff // 2):-int(diff // 2)], ref
est, ref = est[..., :min_len], ref[..., :min_len]
return est, ref
else:
min_len = min(est.shape[-1], ref.shape[-1])
est, ref = est, ref[..., int(diff // 2):-int(diff // 2)]
est, ref = est[..., :min_len], ref[..., :min_len]
return est, ref
def _pre(self, model, input, cuda):
input = input[None, None, ...]
input = torch.tensor(input)
if(cuda and torch.cuda.is_available()):
input = input.cuda()
sp, _, _ = model.f_helper.wav_to_spectrogram_phase(input)
mel_orig = model.mel(sp.permute(0,1,3,2)).permute(0,1,3,2)
# return models.to_log(sp), models.to_log(mel_orig)
return sp, mel_orig
def remove_higher_frequency(self, wav, ratio=0.95):
stft = librosa.stft(wav)
real, img = np.real(stft), np.imag(stft)
mag = (real ** 2 + img ** 2) ** 0.5
cos, sin = real / (mag+EPS), img / (mag+EPS)
spec = np.abs(stft) # [1025,T]
feature = spec.copy()
feature = np.log10(feature+EPS)
feature[feature < 0] = 0
energy_level = np.sum(feature, axis=1)
threshold = np.sum(energy_level) * ratio
curent_level, i = energy_level[0], 0
while (i < energy_level.shape[0] and curent_level < threshold):
curent_level += energy_level[i + 1, ...]
i += 1
print(i)
spec[i:, ...] = np.zeros_like(spec[i:, ...])
stft = spec * cos + 1j * spec * sin
return librosa.istft(stft)
@torch.no_grad()
def restore_inmem(self, wav_10k, cuda=False, mode=0, your_vocoder_func=None):
if(cuda and torch.cuda.is_available()):
self._model = self._model.cuda()
# metrics = {}
if(mode == 0):
self._model.eval()
elif(mode == 1):
self._model.eval()
elif(mode == 2):
self._model.train() # More effective on seriously demaged speech
res = []
seg_length = 44100*30
break_point = seg_length
while break_point < wav_10k.shape[0]+seg_length:
segment = wav_10k[break_point-seg_length:break_point]
if (mode == 1):
segment = self.remove_higher_frequency(segment)
sp,mel_noisy = self._pre(self._model, segment, cuda)
out_model = self._model(sp, mel_noisy)
denoised_mel = from_log(out_model['mel'])
if(your_vocoder_func is None):
out = self._model.vocoder(denoised_mel)
else:
out = your_vocoder_func(denoised_mel)
# unify energy
if(torch.max(torch.abs(out)) > 1.0):
out = out / torch.max(torch.abs(out))
print("Warning: Exceed energy limit,", input)
# frame alignment
out, _ = self._trim_center(out, segment)
res.append(out)
break_point += seg_length
out = torch.cat(res,-1)
return tensor2numpy(out.squeeze(0))
def restore(self, input, output, cuda=False, mode=0, your_vocoder_func=None):
wav_10k = self._load_wav(input, sample_rate=44100)
out_np_wav = self.restore_inmem(wav_10k, cuda=cuda, mode=mode, your_vocoder_func=your_vocoder_func)
save_wave(out_np_wav,fname=output,sample_rate=44100)
| 42.436508 | 166 | 0.587993 |
7949a60a26a342498163f65cde07a95bd785da98
| 7,392 |
py
|
Python
|
src/clients/python/api_v1/examples/simple_shm_string_client.py
|
AliAzG/triton-inference-server
|
fbce250035d049d13f32c362e2d76a5cb787da51
|
[
"BSD-3-Clause"
] | null | null | null |
src/clients/python/api_v1/examples/simple_shm_string_client.py
|
AliAzG/triton-inference-server
|
fbce250035d049d13f32c362e2d76a5cb787da51
|
[
"BSD-3-Clause"
] | null | null | null |
src/clients/python/api_v1/examples/simple_shm_string_client.py
|
AliAzG/triton-inference-server
|
fbce250035d049d13f32c362e2d76a5cb787da51
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import numpy as np
import os
from builtins import range
from tensorrtserver.api import *
import tensorrtserver.shared_memory as shm
from ctypes import *
FLAGS = None
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action="store_true", required=False, default=False,
help='Enable verbose output')
parser.add_argument('-u', '--url', type=str, required=False, default='localhost:8000',
help='Inference server URL. Default is localhost:8000.')
parser.add_argument('-i', '--protocol', type=str, required=False, default='http',
help='Protocol ("http"/"grpc") used to ' +
'communicate with inference service. Default is "http".')
parser.add_argument('-H', dest='http_headers', metavar="HTTP_HEADER",
required=False, action='append',
help='HTTP headers to add to inference server requests. ' +
'Format is -H"Header:Value".')
FLAGS = parser.parse_args()
protocol = ProtocolType.from_str(FLAGS.protocol)
# We use a simple model that takes 2 input tensors of 16 strings
# each and returns 2 output tensors of 16 strings each. The input
# strings must represent integers. One output tensor is the
# element-wise sum of the inputs and one output is the element-wise
# difference.
model_name = "simple_string"
model_version = -1
batch_size = 1
# Create the inference context for the model.
infer_ctx = InferContext(FLAGS.url, protocol, model_name, model_version, FLAGS.verbose)
# Create the shared memory control context
shared_memory_ctx = SharedMemoryControlContext(FLAGS.url, protocol, \
http_headers=FLAGS.http_headers, verbose=FLAGS.verbose)
# Create the data for the two input tensors. Initialize the first
# to unique integers and the second to all ones. The input tensors
# are the string representation of these values.
in0 = np.arange(start=0, stop=16, dtype=np.int32)
in1 = np.ones(shape=16, dtype=np.int32)
expected_sum = np.add(in0, in1)
expected_diff = np.subtract(in0, in1)
in0n = np.array([str(x) for x in in0.reshape(in0.size)], dtype=object)
input0_data = in0n.reshape(in0.shape)
in1n = np.array([str(x) for x in in1.reshape(in1.size)], dtype=object)
input1_data = in1n.reshape(in1.shape)
# serialize the string tensors
input0_data_serialized = serialize_string_tensor(input0_data)
input1_data_serialized = serialize_string_tensor(input1_data)
# Use the size of the serialized tensors to create the shared memory regions
input0_byte_size = input0_data_serialized.size * input0_data_serialized.itemsize
input1_byte_size = input1_data_serialized.size * input1_data_serialized.itemsize
output_byte_size = max(input0_byte_size, input1_byte_size) + 1
output_byte_size = max(input0_byte_size, input1_byte_size) + 1
# Create Output0 and Output1 in Shared Memory and store shared memory handles
shm_op0_handle = shm.create_shared_memory_region("output0_data", "/output0_simple", output_byte_size)
shm_op1_handle = shm.create_shared_memory_region("output1_data", "/output1_simple", output_byte_size)
# Register Output0 and Output1 shared memory with TRTIS
shared_memory_ctx.register(shm_op0_handle)
shared_memory_ctx.register(shm_op1_handle)
# Create Input0 and Input1 in Shared Memory and store shared memory handles
shm_ip0_handle = shm.create_shared_memory_region("input0_data", "/input0_simple", input0_byte_size)
shm_ip1_handle = shm.create_shared_memory_region("input1_data", "/input1_simple", input1_byte_size)
# Put input data values into shared memory
shm.set_shared_memory_region(shm_ip0_handle, [input0_data_serialized])
shm.set_shared_memory_region(shm_ip1_handle, [input1_data_serialized])
# Register Input0 and Input1 shared memory with TRTIS
shared_memory_ctx.register(shm_ip0_handle)
shared_memory_ctx.register(shm_ip1_handle)
# Send inference request to the inference server. Get results for both
# output tensors. Passing shape of input tensors is necessary for
# String and variable size tensors.
results = infer_ctx.run({ 'INPUT0' : (shm_ip0_handle, input0_data.shape),
'INPUT1' : (shm_ip1_handle, input1_data.shape)},
{ 'OUTPUT0' : (InferContext.ResultFormat.RAW, shm_op0_handle),
'OUTPUT1' : (InferContext.ResultFormat.RAW, shm_op1_handle) },
batch_size)
# We expect there to be 2 results (each with batch-size 1). Walk
# over all 16 result elements and print the sum and difference
# calculated by the model.
output0_data = results['OUTPUT0'][0]
output1_data = results['OUTPUT1'][0]
for i in range(16):
print(str(input0_data[i]) + " + " + str(input1_data[i]) + " = " + output0_data[i].decode("utf-8"))
print(str(input0_data[i]) + " - " + str(input1_data[i]) + " = " + output1_data[i].decode("utf-8"))
# Convert result from string to int to check result
r0 = int(output0_data[i])
r1 = int(output1_data[i])
if expected_sum[i] != r0:
print("error: incorrect sum");
sys.exit(1);
if expected_diff[i] != r1:
print("error: incorrect difference");
sys.exit(1);
print(shared_memory_ctx.get_shared_memory_status())
shared_memory_ctx.unregister_all()
shm.destroy_shared_memory_region(shm_ip0_handle)
shm.destroy_shared_memory_region(shm_ip1_handle)
shm.destroy_shared_memory_region(shm_op0_handle)
shm.destroy_shared_memory_region(shm_op1_handle)
| 49.610738 | 106 | 0.71131 |
7949a7a44fab71462847083e9085e08d237760af
| 4,790 |
py
|
Python
|
homeassistant/components/sensor/dht.py
|
michaelarnauts/home-assistant
|
7d905e6c0c99a4454de26d63af0581b454f01ca1
|
[
"MIT"
] | 1 |
2017-12-02T11:40:01.000Z
|
2017-12-02T11:40:01.000Z
|
homeassistant/components/sensor/dht.py
|
michaelarnauts/home-assistant
|
7d905e6c0c99a4454de26d63af0581b454f01ca1
|
[
"MIT"
] | null | null | null |
homeassistant/components/sensor/dht.py
|
michaelarnauts/home-assistant
|
7d905e6c0c99a4454de26d63af0581b454f01ca1
|
[
"MIT"
] | null | null | null |
"""
homeassistant.components.sensor.dht
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Adafruit DHT temperature and humidity sensor.
You need a Python3 compatible version of the Adafruit_Python_DHT library
(e.g. https://github.com/mala-zaba/Adafruit_Python_DHT,
also see requirements.txt).
As this requires access to the GPIO, you will need to run home-assistant
as root.
Configuration:
To use the Adafruit DHT sensor you will need to
add something like the following to your config/configuration.yaml:
sensor:
platform: dht
sensor: DHT22
pin: 23
monitored_conditions:
- temperature
- humidity
Variables:
sensor
*Required
The sensor type, DHT11, DHT22 or AM2302
pin
*Required
The pin the sensor is connected to, something like
'P8_11' for Beaglebone, '23' for Raspberry Pi
monitored_conditions
*Optional
Conditions to monitor. Available conditions are temperature and humidity.
"""
import logging
from datetime import timedelta
from homeassistant.util import Throttle
from homeassistant.const import TEMP_FAHRENHEIT
from homeassistant.helpers.entity import Entity
# update this requirement to upstream as soon as it supports python3
REQUIREMENTS = ['http://github.com/mala-zaba/Adafruit_Python_DHT/archive/' +
'4101340de8d2457dd194bca1e8d11cbfc237e919.zip']
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPES = {
'temperature': ['Temperature', ''],
'humidity': ['Humidity', '%']
}
# Return cached results if last scan was less then this time ago
# DHT11 is able to deliver data once per second, DHT22 once every two
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Get the DHT sensor. """
try:
import Adafruit_DHT
except ImportError:
_LOGGER.exception(
"Unable to import Adafruit_DHT. "
"Did you maybe not install the 'Adafruit_DHT' package?")
return False
SENSOR_TYPES['temperature'][1] = hass.config.temperature_unit
unit = hass.config.temperature_unit
available_sensors = {
"DHT11": Adafruit_DHT.DHT11,
"DHT22": Adafruit_DHT.DHT22,
"AM2302": Adafruit_DHT.AM2302
}
sensor = available_sensors[config['sensor']]
pin = config['pin']
if not sensor or not pin:
_LOGGER.error(
"Config error "
"Please check your settings for DHT, sensor not supported.")
return None
data = DHTClient(Adafruit_DHT, sensor, pin)
dev = []
try:
for variable in config['monitored_conditions']:
if variable not in SENSOR_TYPES:
_LOGGER.error('Sensor type: "%s" does not exist', variable)
else:
dev.append(DHTSensor(data, variable, unit))
except KeyError:
pass
add_devices(dev)
# pylint: disable=too-few-public-methods
class DHTSensor(Entity):
""" Implements an DHT sensor. """
def __init__(self, dht_client, sensor_type, temp_unit):
self.client_name = 'DHT sensor'
self._name = SENSOR_TYPES[sensor_type][0]
self.dht_client = dht_client
self.temp_unit = temp_unit
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self.update()
@property
def name(self):
return '{} {}'.format(self.client_name, self._name)
@property
def state(self):
""" Returns the state of the device. """
return self._state
@property
def unit_of_measurement(self):
""" Unit of measurement of this entity, if any. """
return self._unit_of_measurement
def update(self):
""" Gets the latest data from the DHT and updates the states. """
self.dht_client.update()
data = self.dht_client.data
if self.type == 'temperature':
self._state = round(data['temperature'], 1)
if self.temp_unit == TEMP_FAHRENHEIT:
self._state = round(data['temperature'] * 1.8 + 32, 1)
elif self.type == 'humidity':
self._state = round(data['humidity'], 1)
class DHTClient(object):
""" Gets the latest data from the DHT sensor. """
def __init__(self, adafruit_dht, sensor, pin):
self.adafruit_dht = adafruit_dht
self.sensor = sensor
self.pin = pin
self.data = dict()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
""" Gets the latest data the DHT sensor. """
humidity, temperature = self.adafruit_dht.read_retry(self.sensor,
self.pin)
if temperature:
self.data['temperature'] = temperature
if humidity:
self.data['humidity'] = humidity
| 29.030303 | 76 | 0.650522 |
7949a8bc4ea4939bd8bb6c9c28700c7947e7e546
| 15,137 |
py
|
Python
|
zhusuan/legacy/variational_legacy.py
|
weiwang2330/BayesNeuralNet
|
6be81289d9bc46657a1b14ded440c8160721a464
|
[
"MIT"
] | null | null | null |
zhusuan/legacy/variational_legacy.py
|
weiwang2330/BayesNeuralNet
|
6be81289d9bc46657a1b14ded440c8160721a464
|
[
"MIT"
] | null | null | null |
zhusuan/legacy/variational_legacy.py
|
weiwang2330/BayesNeuralNet
|
6be81289d9bc46657a1b14ded440c8160721a464
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import warnings
import tensorflow as tf
import six
from six.moves import zip, map
from tensorflow.python.training import moving_averages
from zhusuan.utils import log_mean_exp, merge_dicts
from zhusuan.evaluation import is_loglikelihood
__all__ = [
'sgvb',
'iwae',
'rws',
'nvil',
'vimco'
]
def sgvb(log_joint, observed, latent, axis=None):
"""
Implements the stochastic gradient variational bayes (SGVB) algorithm
from (Kingma, 2013). This only works for continuous latent
`StochasticTensor` s that can be reparameterized (Kingma, 2013).
:param log_joint: A function that accepts a dictionary argument of
``(string, Tensor)`` pairs, which are mappings from all
`StochasticTensor` names in the model to their observed values. The
function should return a Tensor, representing the log joint likelihood
of the model.
:param observed: A dictionary of ``(string, Tensor)`` pairs. Mapping from
names of observed `StochasticTensor` s to their values
:param latent: A dictionary of ``(string, (Tensor, Tensor))`` pairs.
Mapping from names of latent `StochasticTensor` s to their samples and
log probabilities.
:param axis: The sample dimension(s) to reduce when computing the
outer expectation in variational lower bound. If `None`, no dimension
is reduced.
:return: A Tensor. The variational lower bound.
"""
warnings.warn("sgvb(): This function will be deprecated in the coming "
"version (0.3.1). Variational utilities are moving to "
"`zs.variational`. The new sgvb gradient estimator can be "
"accessed by first constructing the elbo objective (using "
"`zs.variational.elbo` and then calling its sgvb() method.",
category=FutureWarning)
latent_k, latent_v = map(list, zip(*six.iteritems(latent)))
latent_outputs = dict(zip(latent_k, map(lambda x: x[0], latent_v)))
latent_logpdfs = map(lambda x: x[1], latent_v)
joint_obs = merge_dicts(observed, latent_outputs)
lower_bound = log_joint(joint_obs) - sum(latent_logpdfs)
if axis is not None:
lower_bound = tf.reduce_mean(lower_bound, axis)
return lower_bound
def iwae(log_joint, observed, latent, axis=None):
"""
Implements the importance weighted lower bound from (Burda, 2015).
This only works for continuous latent `StochasticTensor` s that
can be reparameterized (Kingma, 2013).
:param log_joint: A function that accepts a dictionary argument of
``(string, Tensor)`` pairs, which are mappings from all
`StochasticTensor` names in the model to their observed values. The
function should return a Tensor, representing the log joint likelihood
of the model.
:param observed: A dictionary of ``(string, Tensor)`` pairs. Mapping from
names of observed `StochasticTensor` s to their values.
:param latent: A dictionary of ``(string, (Tensor, Tensor))``) pairs.
Mapping from names of latent `StochasticTensor` s to their samples and
log probabilities.
:param axis: The sample dimension(s) to reduce when computing the
outer expectation in variational lower bound. If `None`, no dimension
is reduced.
:return: A Tensor. The importance weighted lower bound.
"""
warnings.warn("iwae(): This function will be deprecated in the coming "
"version (0.3.1). Variational utilities are moving to "
"`zs.variational`. The new iwae gradient estimator can be "
"accessed by first constructing the importance weighted "
"objective (using `zs.variational.iw_objective` and then "
"calling its sgvb() method.", category=FutureWarning)
return is_loglikelihood(log_joint, observed, latent, axis)
def rws(log_joint, observed, latent, axis=None):
"""
Implements Reweighted Wake-sleep from (Bornschein, 2015). This works for
both continuous and discrete latent `StochasticTensor` s.
:param log_joint: A function that accepts a dictionary argument of
``(string, Tensor)`` pairs, which are mappings from all
`StochasticTensor` names in the model to their observed values. The
function should return a Tensor, representing the log joint likelihood
of the model.
:param observed: A dictionary of ``(string, Tensor)`` pairs. Mapping from
names of observed `StochasticTensor` s to their values.
:param latent: A dictionary of ``(string, (Tensor, Tensor))``) pairs.
Mapping from names of latent `StochasticTensor` s to their samples and
log probabilities.
:param axis: The sample dimension(s) to reduce when computing the
outer expectation in log likelihood and in the cost for adapting
proposals. If `None`, no dimension is reduced.
:return: A Tensor. The surrogate cost to minimize.
:return: A Tensor. Estimated log likelihoods.
"""
warnings.warn("rws(): This function will be deprecated in the coming "
"version (0.3.1). Variational utilities are moving to "
"`zs.variational`. Features of the original rws() can be "
"achieved by two new variational objectives. For learning "
"model parameters, please use the importance weighted "
"objective: `zs.variational.iw_objective()`. For adapting "
"the proposal, the new rws gradient estimator can be "
"accessed by first constructing the inclusive KL divergence "
"objective using `zs.variational.klpq` and then calling "
"its rws() method.", category=FutureWarning)
latent_k, latent_v = map(list, zip(*six.iteritems(latent)))
latent_outputs = dict(zip(latent_k, map(lambda x: x[0], latent_v)))
latent_logpdfs = map(lambda x: x[1], latent_v)
joint_obs = merge_dicts(observed, latent_outputs)
log_joint_value = log_joint(joint_obs)
entropy = -sum(latent_logpdfs)
log_w = log_joint_value + entropy
if axis is not None:
log_w_max = tf.reduce_max(log_w, axis, keep_dims=True)
w_u = tf.exp(log_w - log_w_max)
w_tilde = tf.stop_gradient(
w_u / tf.reduce_sum(w_u, axis, keep_dims=True))
log_likelihood = log_mean_exp(log_w, axis)
fake_log_joint_cost = -tf.reduce_sum(w_tilde * log_joint_value, axis)
fake_proposal_cost = tf.reduce_sum(w_tilde * entropy, axis)
cost = fake_log_joint_cost + fake_proposal_cost
else:
cost = log_w
log_likelihood = log_w
return cost, log_likelihood
def nvil(log_joint,
observed,
latent,
baseline=None,
decay=0.8,
variance_normalization=False,
axis=None):
"""
Implements the variance reduced score function estimator for gradients
of the variational lower bound from (Mnih, 2014). This algorithm is also
called "REINFORCE" or "baseline". This works for both continuous and
discrete latent `StochasticTensor` s.
:param log_joint: A function that accepts a dictionary argument of
``(string, Tensor)`` pairs, which are mappings from all
`StochasticTensor` names in the model to their observed values. The
function should return a Tensor, representing the log joint likelihood
of the model.
:param observed: A dictionary of ``(string, Tensor)`` pairs. Mapping from
names of observed `StochasticTensor` s to their values.
:param latent: A dictionary of ``(string, (Tensor, Tensor))``) pairs.
Mapping from names of latent `StochasticTensor` s to their samples and
log probabilities.
:param baseline: A Tensor that can broadcast to match the shape returned
by `log_joint`. A trainable estimation for the scale of the
variational lower bound, which is typically dependent on observed
values, e.g., a neural network with observed values as inputs.
:param variance_normalization: Whether to use variance normalization.
:param decay: Float. The moving average decay for variance normalization.
:param axis: The sample dimension(s) to reduce when computing the
outer expectation in variational lower bound. If `None`, no dimension
is reduced.
:return: A Tensor. The surrogate cost to minimize.
:return: A Tensor. The variational lower bound.
"""
warnings.warn("nvil(): This function will be deprecated in the coming "
"version (0.3.1). Variational utilities are moving to "
"`zs.variational`. The new nvil gradient estimator can be "
"accessed by first constructing the elbo objective (using "
"`zs.variational.elbo` and then calling its reinforce() "
"method.", category=FutureWarning)
latent_k, latent_v = map(list, zip(*six.iteritems(latent)))
latent_outputs = dict(zip(latent_k, map(lambda x: x[0], latent_v)))
latent_logpdfs = map(lambda x: x[1], latent_v)
joint_obs = merge_dicts(observed, latent_outputs)
log_joint_value = log_joint(joint_obs)
entropy = -sum(latent_logpdfs)
l_signal = log_joint_value + entropy
cost = 0.
if baseline is not None:
baseline_cost = 0.5 * tf.square(tf.stop_gradient(l_signal) - baseline)
l_signal = l_signal - baseline
cost += baseline_cost
if variance_normalization is True:
# TODO: extend to non-scalar
bc = tf.reduce_mean(l_signal)
bv = tf.reduce_mean(tf.square(l_signal - bc))
moving_mean = tf.get_variable(
'moving_mean', shape=[], initializer=tf.constant_initializer(0.),
trainable=False)
moving_variance = tf.get_variable(
'moving_variance', shape=[],
initializer=tf.constant_initializer(1.), trainable=False)
update_mean = moving_averages.assign_moving_average(
moving_mean, bc, decay=decay)
update_variance = moving_averages.assign_moving_average(
moving_variance, bv, decay=decay)
l_signal = (l_signal - moving_mean) / tf.maximum(
1., tf.sqrt(moving_variance))
with tf.control_dependencies([update_mean, update_variance]):
l_signal = tf.identity(l_signal)
fake_log_joint_cost = -log_joint_value
fake_variational_cost = tf.stop_gradient(l_signal) * entropy
cost += fake_log_joint_cost + fake_variational_cost
lower_bound = log_joint_value + entropy
if axis is not None:
cost = tf.reduce_mean(cost, axis)
lower_bound = tf.reduce_mean(lower_bound, axis)
return cost, lower_bound
def vimco(log_joint, observed, latent, axis=None):
"""
Implements the multi-sample variance reduced score function estimator for
gradients of the variational lower bound from (Minh, 2016). This works for
both continuous and discrete latent `StochasticTensor` s.
.. note::
:func:`vimco` is a multi-sample objective, size along `axis` in the
objective should be larger than 1, else an error is raised.
:param log_joint: A function that accepts a dictionary argument of
``(string, Tensor)`` pairs, which are mappings from all
`StochasticTensor` names in the model to their observed values. The
function should return a Tensor, representing the log joint likelihood
of the model.
:param observed: A dictionary of ``(string, Tensor)`` pairs. Mapping from
names of observed `StochasticTensor` s to their values.
:param latent: A dictionary of ``(string, (Tensor, Tensor))``) pairs.
Mapping from names of latent `StochasticTensor` s to their samples and
log probabilities.
:param axis: The sample dimension to reduce when computing the
outer expectation in variational lower bound. Must be specified. If
`None`, an error is raised.
:return: A Tensor. The surrogate cost to minimize.
:return: A Tensor. The variational lower bound.
"""
warnings.warn("vimco(): This function will be deprecated in the coming "
"version (0.3.1). Variational utilities are moving to "
"`zs.variational`. The new vimco gradient estimator can be "
"accessed by first constructing the importance weighted "
"objective (using `zs.variational.iw_objective` and then "
"calling its vimco() method.", category=FutureWarning)
if axis is None:
raise ValueError("vimco is a multi-sample objective, "
"the 'axis' argument must be specified.")
latent_k, latent_v = map(list, zip(*six.iteritems(latent)))
latent_outputs = dict(zip(latent_k, map(lambda x: x[0], latent_v)))
latent_logpdfs = map(lambda x: x[1], latent_v)
joint_obs = merge_dicts(observed, latent_outputs)
log_joint_value = log_joint(joint_obs)
entropy = -sum(latent_logpdfs)
l_signal = log_joint_value + entropy
# check size along the sample axis
err_msg = "vimco() is a multi-sample objective, " \
"size along 'axis' in the objective should be larger than 1."
if l_signal.get_shape()[axis:axis + 1].is_fully_defined():
if l_signal.get_shape()[axis].value < 2:
raise ValueError(err_msg)
_assert_size_along_axis = tf.assert_greater_equal(
tf.shape(l_signal)[axis], 2, message=err_msg)
with tf.control_dependencies([_assert_size_along_axis]):
l_signal = tf.identity(l_signal)
# compute variance reduction term
mean_except_signal = (tf.reduce_sum(l_signal, axis, keep_dims=True) -
l_signal) / tf.to_float(tf.shape(l_signal)[axis] - 1)
x, sub_x = tf.to_float(l_signal), tf.to_float(mean_except_signal)
n_dim = tf.rank(x)
axis_dim_mask = tf.cast(tf.one_hot(axis, n_dim), tf.bool)
original_mask = tf.cast(tf.one_hot(n_dim - 1, n_dim), tf.bool)
axis_dim = tf.ones([n_dim], tf.int32) * axis
originals = tf.ones([n_dim], tf.int32) * (n_dim - 1)
perm = tf.where(original_mask, axis_dim, tf.range(n_dim))
perm = tf.where(axis_dim_mask, originals, perm)
multiples = tf.concat([tf.ones([n_dim], tf.int32), [tf.shape(x)[axis]]], 0)
x = tf.transpose(x, perm=perm)
sub_x = tf.transpose(sub_x, perm=perm)
x_ex = tf.tile(tf.expand_dims(x, n_dim), multiples)
x_ex = x_ex - tf.matrix_diag(x) + tf.matrix_diag(sub_x)
control_variate = tf.transpose(log_mean_exp(x_ex, n_dim - 1), perm=perm)
# variance reduced objective
l_signal = log_mean_exp(l_signal, axis, keep_dims=True) - control_variate
fake_term = tf.reduce_sum(-entropy * tf.stop_gradient(l_signal), axis)
lower_bound = log_mean_exp(log_joint_value + entropy, axis)
cost = -fake_term - log_mean_exp(log_joint_value + entropy, axis)
return cost, lower_bound
| 47.303125 | 79 | 0.675101 |
7949a9ada8a9da49612835b859316455db3c0fbd
| 1,763 |
py
|
Python
|
sahara/plugins/mapr/versions/version_handler_factory.py
|
ksshanam/sahara
|
0d259f7a71447cd0cefe4f11184cc2ee335f4e33
|
[
"Apache-2.0"
] | null | null | null |
sahara/plugins/mapr/versions/version_handler_factory.py
|
ksshanam/sahara
|
0d259f7a71447cd0cefe4f11184cc2ee335f4e33
|
[
"Apache-2.0"
] | 1 |
2020-10-06T07:50:12.000Z
|
2020-10-06T07:50:12.000Z
|
sahara/plugins/mapr/versions/version_handler_factory.py
|
ksshanam/sahara
|
0d259f7a71447cd0cefe4f11184cc2ee335f4e33
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2015, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
def _load_versions():
d_name = os.path.dirname(__file__)
m_template = 'sahara.plugins.mapr.versions.%s.version_handler'
def predicate(v_dir):
if v_dir != "__pycache__":
v_dir = os.path.join(d_name, v_dir)
return os.path.isdir(v_dir) and os.listdir(v_dir)
return False
def mapper(v_dir):
return m_template % v_dir
v_dirs = filter(predicate, os.listdir(d_name))
m_names = map(mapper, v_dirs)
versions = {}
for m_name in m_names:
m = __import__(m_name, fromlist=['sahara'])
versions[m.version] = getattr(m, 'VersionHandler')()
return versions
class VersionHandlerFactory(object):
instance = None
versions = None
@staticmethod
def get():
if not VersionHandlerFactory.instance:
VersionHandlerFactory.versions = _load_versions()
VersionHandlerFactory.instance = VersionHandlerFactory()
return VersionHandlerFactory.instance
def get_versions(self):
return list(VersionHandlerFactory.versions.keys())
def get_handler(self, version):
return VersionHandlerFactory.versions[version]
| 30.396552 | 75 | 0.695973 |
7949aac7eedc2737d3d8740ce795efdb018af698
| 141 |
py
|
Python
|
pyroSAR/tests/test_snap.py
|
ibaris/pyroSAR
|
04924500c61674a68e9dc56c1f71b7dd195c480a
|
[
"MIT"
] | 1 |
2020-03-09T10:33:06.000Z
|
2020-03-09T10:33:06.000Z
|
pyroSAR/tests/test_snap.py
|
ibaris/pyroSAR
|
04924500c61674a68e9dc56c1f71b7dd195c480a
|
[
"MIT"
] | null | null | null |
pyroSAR/tests/test_snap.py
|
ibaris/pyroSAR
|
04924500c61674a68e9dc56c1f71b7dd195c480a
|
[
"MIT"
] | null | null | null |
from pyroSAR.snap import geocode
def test_geocode(tmpdir, testdata):
scene = testdata['s1']
geocode(scene, str(tmpdir), test=True)
| 20.142857 | 42 | 0.716312 |
7949aade4a27764ef39330da2261e19b21c9c51f
| 351 |
py
|
Python
|
renamer.py
|
Billthedeve/QuRen-Rename-tool
|
d8c989183a97b607187941b74983062ad6b5ae37
|
[
"MIT"
] | null | null | null |
renamer.py
|
Billthedeve/QuRen-Rename-tool
|
d8c989183a97b607187941b74983062ad6b5ae37
|
[
"MIT"
] | null | null | null |
renamer.py
|
Billthedeve/QuRen-Rename-tool
|
d8c989183a97b607187941b74983062ad6b5ae37
|
[
"MIT"
] | null | null | null |
import os
fpath = input("Enter the directory the file is in... ")
if "C:\\" not in fpath:
print("invalid directory. File must be in C:/")
if "C:\\" in fpath:
os.system("cd " + fpath)
name = input("Enter file name...")
newname = input("Enter new file name...")
os.system("ren " + name + " " + newname)
print("file renamed")
| 23.4 | 55 | 0.586895 |
7949abc36838e16ca57348298b7a85f1c479f4cb
| 1,872 |
py
|
Python
|
streamblocks/migrations/0003_auto_20200207_1852.py
|
andywar65/rpnew_root
|
9281cb16783313a1cd23b1394f2bad485ac1b33d
|
[
"BSD-2-Clause"
] | null | null | null |
streamblocks/migrations/0003_auto_20200207_1852.py
|
andywar65/rpnew_root
|
9281cb16783313a1cd23b1394f2bad485ac1b33d
|
[
"BSD-2-Clause"
] | null | null | null |
streamblocks/migrations/0003_auto_20200207_1852.py
|
andywar65/rpnew_root
|
9281cb16783313a1cd23b1394f2bad485ac1b33d
|
[
"BSD-2-Clause"
] | null | null | null |
# Generated by Django 3.0.2 on 2020-02-07 17:52
from django.db import migrations, models
import filebrowser.fields
class Migration(migrations.Migration):
dependencies = [
('streamblocks', '0002_indexedparagraph_height'),
]
operations = [
migrations.CreateModel(
name='DownloadableFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fb_file', filebrowser.fields.FileBrowseField(blank=True, max_length=200, null=True, verbose_name='File')),
('description', models.CharField(blank=True, max_length=200, null=True, verbose_name='Descrizione')),
],
options={
'verbose_name': 'File scaricabile',
'verbose_name_plural': 'File scaricabili',
},
),
migrations.AlterField(
model_name='captionedimage',
name='caption',
field=models.CharField(blank=True, max_length=200, null=True, verbose_name='Didascalia'),
),
migrations.AlterField(
model_name='indexedparagraph',
name='body',
field=models.TextField(blank=True, help_text='Accetta tag HTML', null=True, verbose_name='Testo'),
),
migrations.AlterField(
model_name='indexedparagraph',
name='height',
field=models.CharField(choices=[('1', 'Troppo grande'), ('2', 'Molto grande'), ('3', 'Grande'), ('4', 'Medio'), ('5', 'Piccolo'), ('6', 'Molto piccolo')], default='4', max_length=1, verbose_name='Altezza titolo'),
),
migrations.AlterField(
model_name='indexedparagraph',
name='title',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Titolo'),
),
]
| 39.829787 | 225 | 0.59188 |
7949abd1c9e92bd409a495431b195a20387797cc
| 1,075 |
py
|
Python
|
setup.py
|
negillett/osc-ingest-tools
|
c1536771f8a3dee8b0b155542cb57c3d46ab3f21
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
negillett/osc-ingest-tools
|
c1536771f8a3dee8b0b155542cb57c3d46ab3f21
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
negillett/osc-ingest-tools
|
c1536771f8a3dee8b0b155542cb57c3d46ab3f21
|
[
"Apache-2.0"
] | null | null | null |
import pathlib
from setuptools import find_packages, setup
# The directory containing this file
HERE = pathlib.Path(__file__).resolve().parent
# The text of the README file is used as a description
README = (HERE / "README.md").read_text()
setup(
name="osc-ingest-tools",
version="0.3.1",
description="python tools to assist with standardized data ingestion workflows for the OS-Climate project",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/os-climate/osc-ingest-tools",
author="OS-Climate",
author_email="eje@redhat.com",
license="Apache-2.0",
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
],
packages=find_packages(),
include_package_data=True,
install_requires=["pandas", "trino", "boto3", "sqlalchemy", "sqlalchemy-trino", "python-dotenv"],
entry_points={"console_scripts": []},
)
| 33.59375 | 111 | 0.686512 |
7949ac0ecb939062524779c2c9288726fd866ad9
| 35,892 |
py
|
Python
|
flavio/statistics/test_probability.py
|
Felicia56/flavio
|
ea735bd8febbb961d249eddf338a4960c1fbee69
|
[
"MIT"
] | 61 |
2016-03-09T16:19:39.000Z
|
2022-03-30T00:55:51.000Z
|
flavio/statistics/test_probability.py
|
Felicia56/flavio
|
ea735bd8febbb961d249eddf338a4960c1fbee69
|
[
"MIT"
] | 167 |
2016-03-15T15:25:57.000Z
|
2022-02-27T22:19:22.000Z
|
flavio/statistics/test_probability.py
|
Felicia56/flavio
|
ea735bd8febbb961d249eddf338a4960c1fbee69
|
[
"MIT"
] | 57 |
2016-03-15T14:24:23.000Z
|
2022-01-14T01:00:03.000Z
|
import unittest
import numpy as np
import numpy.testing as npt
import scipy.stats
from math import pi, sqrt, exp, log
from flavio.statistics.probability import *
import itertools
import yaml
class TestProbability(unittest.TestCase):
def test_multiv_normal(self):
# test that the rescaling of the MultivariateNormalDistribution
# does not affect the log PDF!
c = np.array([1e-3, 2])
cov = np.array([[(0.2e-3)**2, 0.2e-3*0.5*0.3],[0.2e-3*0.5*0.3, 0.5**2]])
pdf = MultivariateNormalDistribution(c, cov)
x=np.array([1.5e-3, 0.8])
num_lpdf = pdf.logpdf(x)
ana_lpdf = log(1/sqrt(4*pi**2*np.linalg.det(cov))*exp(-np.dot(np.dot(x-c,np.linalg.inv(cov)),x-c)/2))
self.assertAlmostEqual(num_lpdf, ana_lpdf, delta=1e-6)
self.assertEqual(len(pdf.get_random(10)), 10)
def test_normal(self):
d = NormalDistribution(2, 0.3)
self.assertEqual(d.cdf(2), 0.5)
self.assertEqual(d.ppf(0.5), 2)
def test_halfnormal(self):
pdf_p_1 = HalfNormalDistribution(1.7, 0.3)
pdf_n_1 = HalfNormalDistribution(1.7, -0.3)
pdf_p_2 = AsymmetricNormalDistribution(1.7, 0.3, 0.0001)
pdf_n_2 = AsymmetricNormalDistribution(1.7, 0.0001, 0.3)
self.assertAlmostEqual(pdf_p_1.logpdf(1.99), pdf_p_2.logpdf(1.99), delta=0.001)
self.assertEqual(pdf_p_1.logpdf(1.55), -np.inf)
self.assertAlmostEqual(pdf_n_1.logpdf(1.55), pdf_n_2.logpdf(1.55), delta=0.001)
self.assertEqual(pdf_n_1.logpdf(1.99), -np.inf)
self.assertEqual(len(pdf_p_1.get_random(10)), 10)
self.assertEqual(len(pdf_p_2.get_random(10)), 10)
d = HalfNormalDistribution(2, 0.3)
self.assertEqual(d.cdf(2), 0.0)
self.assertAlmostEqual(d.cdf(2.3), 0.6827, places=4)
self.assertAlmostEqual(d.ppf(0.6827), 2.3, places=4)
def test_lognormal(self):
with self.assertRaises(ValueError):
LogNormalDistribution(1, 0.8)
with self.assertRaises(ValueError):
LogNormalDistribution(1, -1.2)
pdf = LogNormalDistribution(3, 2)
self.assertAlmostEqual(pdf.get_error_left(), 1.5)
self.assertAlmostEqual(pdf.get_error_right(), 3)
pdf2 = LogNormalDistribution(-3, 2)
self.assertAlmostEqual(pdf2.get_error_right(), 1.5)
self.assertAlmostEqual(pdf2.get_error_left(), 3)
self.assertEqual(pdf2.pdf(-2.7), pdf.pdf(2.7))
self.assertEqual(pdf2.cdf(-2.7), 1 - pdf.cdf(2.7))
self.assertEqual(pdf2.ppf(0.25), -pdf.ppf(0.75))
def test_limit(self):
p1 = GaussianUpperLimit(2*1.78, 0.9544997)
p2 = HalfNormalDistribution(0, 1.78)
self.assertAlmostEqual(p1.logpdf(0.237), p2.logpdf(0.237), delta=0.0001)
self.assertEqual(p2.logpdf(-1), -np.inf)
self.assertAlmostEqual(p1.cdf(2*1.78), 0.9544997, delta=0.0001)
def test_gamma(self):
# check for loc above and below a-1
for loc in (-5, -15):
p = GammaDistribution(a=11, loc=loc, scale=1)
self.assertEqual(p.central_value, loc + 10)
r = p.get_random(10)
self.assertEqual(len(r), 10)
self.assertAlmostEqual(p.cdf(p.support[1]), 1-2e-9, delta=0.1e-9)
self.assertAlmostEqual(p.ppf(1-2e-9), p.support[1], delta=0.0001)
self.assertEqual(loc, p.support[0])
# nearly normal distribution
p = GammaDistribution(a=10001, loc=0, scale=1)
self.assertAlmostEqual(p.error_left, sqrt(10000), delta=1)
self.assertAlmostEqual(p.get_error_left(nsigma=2), 2*sqrt(10000), delta=2)
self.assertAlmostEqual(p.error_right, sqrt(10000), delta=1)
self.assertAlmostEqual(p.get_error_right(nsigma=2), 2*sqrt(10000), delta=2)
def test_gamma_positive(self):
# check for loc above and below a-1
for loc in (-5, -15):
p = GammaDistributionPositive(a=11, loc=loc, scale=1)
self.assertEqual(p.central_value, max(loc + 10, 0))
r = p.get_random(10)
self.assertEqual(len(r), 10)
self.assertTrue(np.min(r) >= 0)
self.assertEqual(p.logpdf(-0.1), -np.inf)
self.assertEqual(p.cdf(0), 0)
self.assertAlmostEqual(p.cdf(p.support[1]), 1-2e-9, delta=0.1e-9)
self.assertAlmostEqual(p.ppf(0), 0, places=14)
self.assertAlmostEqual(p.ppf(1-2e-9), p.support[1], delta=0.0001)
self.assertEqual(p.cdf(-1), 0)
p = GammaDistributionPositive(a=11, loc=-9, scale=1)
self.assertEqual(p.central_value, 1)
self.assertEqual(p.error_left, 1)
# nearly normal distribution
p = GammaDistributionPositive(a=10001, loc=0, scale=1)
self.assertAlmostEqual(p.error_left, sqrt(10000), delta=1)
self.assertAlmostEqual(p.get_error_left(nsigma=2), 2*sqrt(10000), delta=2)
self.assertAlmostEqual(p.error_right, sqrt(10000), delta=1)
self.assertAlmostEqual(p.get_error_right(nsigma=2), 2*sqrt(10000), delta=2)
def test_gamma_limit(self):
p = GammaUpperLimit(counts_total=30, counts_background=10,
limit=2e-5, confidence_level=0.68)
self.assertAlmostEqual(p.cdf(2e-5), 0.68, delta=0.0001)
# no counts
p = GammaUpperLimit(counts_total=0, counts_background=0,
limit=2e-5, confidence_level=0.68)
self.assertAlmostEqual(p.cdf(2e-5), 0.68, delta=0.0001)
# background excess
p = GammaUpperLimit(counts_total=30, counts_background=50,
limit=2e5, confidence_level=0.68)
self.assertAlmostEqual(p.cdf(2e5), 0.68, delta=0.0001)
p = GammaUpperLimit(counts_total=10000, counts_background=10000,
limit=3., confidence_level=0.95)
p_norm = GaussianUpperLimit(limit=3., confidence_level=0.95)
# check that large-statistics Gamma and Gauss give nearly same PDF
for x in [0, 1, 2, 3, 4]:
self.assertAlmostEqual(p.logpdf(x), p_norm.logpdf(x), delta=0.1)
def test_general_gamma_limit(self):
p = GeneralGammaUpperLimit(counts_total=30, counts_background=10,
limit=2e-5, confidence_level=0.68,
background_variance=5)
self.assertAlmostEqual(p.cdf(2e-5), 0.68, delta=0.0001)
# background excess
p = GeneralGammaUpperLimit(counts_total=30, counts_background=50,
limit=2e5, confidence_level=0.68,
background_variance=25)
self.assertAlmostEqual(p.cdf(2e5), 0.68, delta=0.0001)
p = GeneralGammaUpperLimit(counts_total=10000, counts_background=10000,
limit=3., confidence_level=0.95,
background_variance=1000)
p_norm = GaussianUpperLimit(limit=3., confidence_level=0.95)
# check that large-statistics Gamma and Gauss give nearly same PDF
for x in [1, 2, 3, 4]:
self.assertAlmostEqual(p.logpdf(x), p_norm.logpdf(x), delta=0.1)
# check that warning is raised for very small background variance
with self.assertWarns(Warning):
GeneralGammaUpperLimit(counts_total=10000, counts_background=10000,
limit=3., confidence_level=0.95,
background_variance=10)
def test_numerical(self):
x = np.arange(-5,7,0.01)
y = scipy.stats.norm.pdf(x, loc=1)
y_crazy = 14.7 * y # multiply PDF by crazy number
p_num = NumericalDistribution(x, y_crazy)
p_norm = NormalDistribution(1, 1)
self.assertAlmostEqual(p_num.logpdf(0.237), p_norm.logpdf(0.237), delta=0.02)
self.assertAlmostEqual(p_num.logpdf(-2.61), p_norm.logpdf(-2.61), delta=0.02)
self.assertAlmostEqual(p_num.ppf_interp(0.1), scipy.stats.norm.ppf(0.1, loc=1), delta=0.02)
self.assertAlmostEqual(p_num.ppf_interp(0.95), scipy.stats.norm.ppf(0.95, loc=1), delta=0.02)
self.assertEqual(len(p_num.get_random(10)), 10)
def test_multiv_numerical(self):
x0 = np.arange(-5,5,0.01)
x1 = np.arange(-4,6,0.02)
cov = [[0.2**2, 0.5*0.2*0.4], [0.5*0.2*0.4, 0.4**2]]
y = scipy.stats.multivariate_normal.pdf(np.array(list(itertools.product(x0, x1))), mean=[0, 1], cov=cov)
y = y.reshape(len(x0), len(x1))
y_crazy = 14.7 * y # multiply PDF by crazy number
p_num = MultivariateNumericalDistribution((x0, x1), y_crazy)
p_norm = MultivariateNormalDistribution([0, 1], cov)
self.assertAlmostEqual(p_num.logpdf([0.237, 0.346]), p_norm.logpdf([0.237, 0.346]), delta=0.02)
self.assertAlmostEqual(p_num.logpdf([0.237], exclude=(1,)),
p_norm.logpdf([0.237], exclude=(1,)), delta=0.02)
# try again with length-2 xi
p_num = MultivariateNumericalDistribution(([-5, 4.99], [-4, 5.98]), y_crazy)
self.assertAlmostEqual(p_num.logpdf([0.237, 0.346]), p_norm.logpdf([0.237, 0.346]), delta=0.02)
self.assertAlmostEqual(p_num.logpdf([0.237], exclude=(1,)),
p_norm.logpdf([0.237], exclude=(1,)), delta=0.02)
# test exceptions
with self.assertRaises(NotImplementedError):
p_num.error_left
with self.assertRaises(NotImplementedError):
p_num.error_right
self.assertEqual(len(p_num.get_random(10)), 10)
def test_numerical_from_analytic(self):
p_norm = NormalDistribution(1.64, 0.32)
p_norm_num = NumericalDistribution.from_pd(p_norm)
self.assertEqual(p_norm.central_value, p_norm_num.central_value)
self.assertEqual(p_norm.support, p_norm_num.support)
npt.assert_array_almost_equal(p_norm.logpdf([0.7, 1.9]), p_norm_num.logpdf([0.7, 1.9]), decimal=3)
p_asym = AsymmetricNormalDistribution(1.64, 0.32, 0.67)
p_asym_num = NumericalDistribution.from_pd(p_asym)
npt.assert_array_almost_equal(p_asym.logpdf([0.7, 1.9]), p_asym_num.logpdf([0.7, 1.9]), decimal=3)
p_unif = UniformDistribution(1.64, 0.32)
p_unif_num = NumericalDistribution.from_pd(p_unif)
npt.assert_array_almost_equal(p_unif.logpdf([0.7, 1.9]), p_unif_num.logpdf([0.7, 1.9]), decimal=3)
p_half = HalfNormalDistribution(1.64, -0.32)
p_half_num = NumericalDistribution.from_pd(p_half)
npt.assert_array_almost_equal(p_half.logpdf([0.7, 1.3]), p_half_num.logpdf([0.7, 1.3]), decimal=3)
def test_numerical_from_analytic_mv(self):
p = MultivariateNormalDistribution([2, 5], [[(0.2)**2, 0.2e-3*0.5*0.3],[0.2*0.5*0.3, 0.5**2]])
p_num = MultivariateNumericalDistribution.from_pd(p)
npt.assert_array_equal(p.central_value, p_num.central_value)
npt.assert_array_equal(p.support, p_num.support)
npt.assert_array_almost_equal(p.logpdf([1.6, 2.5]), p_num.logpdf([1.6, 2.5]), decimal=2)
npt.assert_array_almost_equal(p.logpdf([2.33, 7]), p_num.logpdf([2.33, 7]), decimal=2)
def test_convolve_normal(self):
p_1 = NormalDistribution(12.4, 0.346)
p_2 = NormalDistribution(12.4, 2.463)
p_x = NormalDistribution(12.3, 2.463)
from flavio.statistics.probability import convolve_distributions
# error if not the same central value:
with self.assertRaises(AssertionError):
convolve_distributions([p_1, p_x])
p_comb = convolve_distributions([p_1, p_2])
self.assertIsInstance(p_comb, NormalDistribution)
self.assertEqual(p_comb.central_value, 12.4)
self.assertEqual(p_comb.standard_deviation, sqrt(0.346**2+2.463**2))
# check for addition of central values
p_comb = convolve_distributions([p_1, p_x], central_values='sum')
self.assertIsInstance(p_comb, NormalDistribution)
self.assertAlmostEqual(p_comb.central_value, 24.7)
self.assertEqual(p_comb.standard_deviation, sqrt(0.346**2+2.463**2))
def test_convolve_delta(self):
p_1 = DeltaDistribution(12.4)
p_2 = NormalDistribution(12.4, 2.463)
p_x = DeltaDistribution(12.3)
from flavio.statistics.probability import convolve_distributions
with self.assertRaises(NotImplementedError):
convolve_distributions([p_1, p_x], central_values='sum')
with self.assertRaises(AssertionError):
convolve_distributions([p_x, p_2])
p_comb = convolve_distributions([p_1, p_2])
self.assertIsInstance(p_comb, NormalDistribution)
self.assertEqual(p_comb.central_value, 12.4)
self.assertEqual(p_comb.standard_deviation, 2.463)
def test_convolve_numerical(self):
from flavio.statistics.probability import _convolve_numerical
p_1 = NumericalDistribution.from_pd(NormalDistribution(12.4, 0.346))
p_2 = NumericalDistribution.from_pd(NormalDistribution(12.4, 2.463))
p_3 = NumericalDistribution.from_pd(NormalDistribution(12.4, 1.397))
conv_p_12 = _convolve_numerical([p_1, p_2])
comb_p_12 = NormalDistribution(12.4, sqrt(0.346**2 + 2.463**2))
conv_p_123 = _convolve_numerical([p_1, p_2, p_3])
comb_p_123 = NormalDistribution(12.4, sqrt(0.346**2 + 2.463**2 + 1.397**2))
x = np.linspace(2, 20, 10)
npt.assert_array_almost_equal(conv_p_12.logpdf(x), comb_p_12.logpdf(x), decimal=1)
npt.assert_array_almost_equal(conv_p_123.logpdf(x), comb_p_123.logpdf(x), decimal=1)
# same again for addition
p_1 = NumericalDistribution.from_pd(NormalDistribution(-986, 0.346))
p_2 = NumericalDistribution.from_pd(NormalDistribution(16, 2.463))
p_3 = NumericalDistribution.from_pd(NormalDistribution(107, 1.397))
conv_p_12 = _convolve_numerical([p_1, p_2], central_values='sum')
comb_p_12 = NormalDistribution(-970, sqrt(0.346**2 + 2.463**2))
conv_p_123 = _convolve_numerical([p_1, p_2, p_3], central_values='sum')
comb_p_123 = NormalDistribution(-863, sqrt(0.346**2 + 2.463**2 + 1.397**2))
x = np.linspace(-10, 10, 10)
npt.assert_array_almost_equal(conv_p_12.logpdf(x-970), comb_p_12.logpdf(x-970), decimal=1)
npt.assert_array_almost_equal(conv_p_123.logpdf(x-863), comb_p_123.logpdf(x-863), decimal=1)
def test_convolve_multivariate_gaussian(self):
from flavio.statistics.probability import _convolve_multivariate_gaussians
cov1 = np.array([[(0.2e-3)**2, 0.2e-3*0.5*0.3],[0.2e-3*0.5*0.3, 0.5**2]])
cov2 = np.array([[0.2**2, 0.5*0.2*0.4], [0.5*0.2*0.4, 0.4**2]])
cov12 = cov1 + cov2
c1 = [2, 5]
c2 = [-100, -250]
p_11 = MultivariateNormalDistribution(c1, cov1)
p_12 = MultivariateNormalDistribution(c1, cov2)
p_22 = MultivariateNormalDistribution(c2, cov2)
conv_11_12 = convolve_distributions([p_11, p_12])
self.assertIsInstance(conv_11_12, MultivariateNormalDistribution)
npt.assert_array_equal(conv_11_12.central_value, [2, 5])
npt.assert_array_almost_equal(conv_11_12.covariance, cov12, decimal=15)
with self.assertRaises(AssertionError):
convolve_distributions([p_11, p_22])
conv_11_22 = convolve_distributions([p_11, p_22], central_values='sum')
self.assertIsInstance(conv_11_22, MultivariateNormalDistribution)
npt.assert_array_almost_equal(conv_11_22.covariance, cov12, decimal=15)
npt.assert_array_equal(conv_11_22.central_value, [-100+2, -250+5])
def test_convolve_multivariate_gaussian_numerical(self):
from flavio.statistics.probability import convolve_distributions
cov1 = [[(0.1)**2, 0.1*0.5*0.3],[0.1*0.5*0.3, 0.5**2]]
cov2 = [[0.2**2, 0.5*0.2*0.4], [0.5*0.2*0.4, 0.4**2]]
c1 = [2, 5]
c2 = [-100, -250]
p_11 = MultivariateNormalDistribution(c1, cov1)
p_12 = MultivariateNormalDistribution(c1, cov2)
p_22 = MultivariateNormalDistribution(c2, cov2)
n_11 = MultivariateNumericalDistribution.from_pd(p_11)
n_12 = MultivariateNumericalDistribution.from_pd(p_12)
n_22 = MultivariateNumericalDistribution.from_pd(p_22)
conv_11_12_gauss = convolve_distributions([p_11, p_12])
conv_11_12 = convolve_distributions([p_11, n_12])
self.assertIsInstance(conv_11_12, MultivariateNumericalDistribution)
npt.assert_array_almost_equal(conv_11_12.central_value, [2, 5], decimal=1)
self.assertAlmostEqual(conv_11_12.logpdf([2.2, 4]),
conv_11_12_gauss.logpdf([2.2, 4]), delta=0.1)
self.assertAlmostEqual(conv_11_12.logpdf([2.2, 6]),
conv_11_12_gauss.logpdf([2.2, 6]), delta=0.1)
self.assertAlmostEqual(conv_11_12.logpdf([1.4, 4]),
conv_11_12_gauss.logpdf([1.4, 4]), delta=0.2)
self.assertAlmostEqual(conv_11_12.logpdf([1.4, 6]),
conv_11_12_gauss.logpdf([1.4, 6]), delta=0.1)
with self.assertRaises(AssertionError):
convolve_distributions([p_11, n_22])
conv_11_22 = convolve_distributions([p_11, n_22], central_values='sum')
conv_11_22_gauss = convolve_distributions([p_11, p_22], central_values='sum')
self.assertIsInstance(conv_11_22, MultivariateNumericalDistribution)
npt.assert_array_almost_equal(conv_11_22.central_value, [-100+2, -250+5], decimal=1)
self.assertAlmostEqual(conv_11_22.logpdf([2.2-100, 4-250]),
conv_11_22_gauss.logpdf([2.2-100, 4-250]), delta=0.1)
self.assertAlmostEqual(conv_11_22.logpdf([1.6-100, 5.5-250]),
conv_11_22_gauss.logpdf([1.6-100, 5.5-250]), delta=0.1)
def test_1d_errors(self):
p = NormalDistribution(3, 0.2)
q = NumericalDistribution.from_pd(p)
self.assertEqual(p.error_left, 0.2)
self.assertEqual(p.error_right, 0.2)
self.assertAlmostEqual(q.error_left, 0.2, places=2)
self.assertAlmostEqual(q.error_right, 0.2, places=2)
self.assertAlmostEqual(q.get_error_left(method='hpd'), 0.2, places=2)
self.assertAlmostEqual(q.get_error_left(method='hpd', nsigma=2), 0.4, places=2)
self.assertAlmostEqual(q.get_error_right(method='hpd'), 0.2, places=2)
p = AsymmetricNormalDistribution(3, 0.2, 0.5)
q = NumericalDistribution.from_pd(p)
self.assertEqual(p.error_left, 0.5)
self.assertEqual(p.error_right, 0.2)
self.assertAlmostEqual(q.error_left, 0.5, places=2)
self.assertAlmostEqual(q.error_right, 0.2, places=2)
self.assertAlmostEqual(q.get_error_left(method='hpd'), 0.5, places=2)
self.assertAlmostEqual(q.get_error_right(method='hpd'), 0.2, places=2)
self.assertAlmostEqual(q.get_error_right(method='hpd', nsigma=2), 0.4, places=2)
p = DeltaDistribution(3)
self.assertEqual(p.error_left, 0)
self.assertEqual(p.error_right, 0)
p = UniformDistribution(3, 0.4)
q = NumericalDistribution.from_pd(p)
self.assertAlmostEqual(p.error_left, 0.4*0.68, places=2)
self.assertAlmostEqual(p.error_right, 0.4*0.68, places=2)
self.assertAlmostEqual(q.error_left, 0.4*0.68, places=2)
self.assertAlmostEqual(q.error_right, 0.4*0.68, places=2)
self.assertAlmostEqual(q.get_error_left(method='hpd'), 0.4*0.68, places=2)
self.assertAlmostEqual(q.get_error_right(method='hpd'), 0.4*0.68, places=2)
self.assertAlmostEqual(q.get_error_right(method='hpd', nsigma=2), 0.4*0.95, places=2)
p = HalfNormalDistribution(3, +0.5)
q = NumericalDistribution.from_pd(p)
self.assertEqual(p.error_left, 0)
self.assertEqual(p.error_right, 0.5)
self.assertAlmostEqual(q.error_left, 0, places=2)
self.assertAlmostEqual(q.error_right, 0.5, places=2)
# this does not work (returns nan)
self.assertTrue(np.isnan(q.get_error_left(method='hpd')))
self.assertTrue(np.isnan(q.get_error_right(method='hpd')))
# this works
self.assertAlmostEqual(q.get_error_right(method='limit'), 0.5, places=2)
p = HalfNormalDistribution(3, -0.5)
q = NumericalDistribution.from_pd(p)
self.assertEqual(p.error_left, 0.5)
self.assertEqual(p.error_right, 0)
self.assertAlmostEqual(q.error_left, 0.5, places=2)
self.assertAlmostEqual(q.error_right, 0, places=2)
# this does not work (returns nan)
self.assertTrue(np.isnan(q.get_error_left(method='hpd')))
self.assertTrue(np.isnan(q.get_error_right(method='hpd')))
# this works
self.assertAlmostEqual(q.get_error_left(method='limit'), 0.5, places=2)
self.assertAlmostEqual(q.get_error_left(method='limit', nsigma=2), 1, places=2)
def test_multivariate_exclude(self):
c2 = np.array([1e-3, 2])
c3 = np.array([1e-3, 2, 0.4])
cov22 = np.array([[(0.2e-3)**2, 0.2e-3*0.5*0.3],[0.2e-3*0.5*0.3, 0.5**2]])
cov33 = np.array([[(0.2e-3)**2, 0.2e-3*0.5*0.3 , 0],[0.2e-3*0.5*0.3, 0.5**2, 0.01], [0, 0.01, 0.1**2]])
pdf1 = NormalDistribution(2, 0.5)
pdf2 = MultivariateNormalDistribution(c2, cov22)
pdf3 = MultivariateNormalDistribution(c3, cov33)
self.assertEqual(pdf2.logpdf([1.1e-3, 2.4]), pdf3.logpdf([1.1e-3, 2.4], exclude=2))
self.assertEqual(pdf1.logpdf(2.4), pdf3.logpdf([2.4], exclude=(0,2)))
with self.assertRaises(ValueError):
# dimensions don't match
self.assertEqual(pdf2.logpdf([1.1e-3, 2.4]), pdf3.logpdf([1.1e-3, 2.4, 0.2], exclude=2))
def test_gaussian_kde(self):
# check that a random Gaussian is reproduced correctly
np.random.seed(42)
dat = np.random.normal(117, 23, size=100)
kde = GaussianKDE(dat)
norm = scipy.stats.norm(117, 23)
x = np.linspace(117-23, 117+23, 10)
npt.assert_array_almost_equal(kde.pdf(x)/norm.pdf(x), np.ones(10), decimal=1)
# check scott's factor
self.assertAlmostEqual(kde.bandwidth, 0.4*23, delta=0.4*23*0.1*2)
def test_vectorize(self):
# check that all logpdf methods work on arrays as well
np.random.seed(42)
xr = np.random.rand(10)
d = UniformDistribution(0, 1)
self.assertEqual(d.logpdf(xr).shape, (10,))
d = DeltaDistribution(1)
lpd = d.logpdf([2,3,4,5,1,1,3,6,1,3,5,1])
npt.assert_array_equal(lpd, [-np.inf, -np.inf, -np.inf, -np.inf,
0, 0, -np.inf, -np.inf, 0,
-np.inf, -np.inf, 0 ])
d = NormalDistribution(0, 1)
self.assertEqual(d.logpdf(xr).shape, (10,))
d = AsymmetricNormalDistribution(0, 1, 0.5)
self.assertEqual(d.logpdf(xr).shape, (10,))
d = HalfNormalDistribution(0, 1)
self.assertEqual(d.logpdf(xr).shape, (10,))
d = GammaDistributionPositive(1, 0, 3)
self.assertEqual(d.logpdf(xr).shape, (10,))
d = NumericalDistribution.from_pd(NormalDistribution(0, 1))
self.assertEqual(d.logpdf(xr).shape, (10,))
d = MultivariateNormalDistribution([1, 2, 3], np.eye(3))
xr3 = np.random.rand(10, 3)
xr2 = np.random.rand(10, 2)
self.assertEqual(d.logpdf(xr3[0]).shape, ())
self.assertEqual(d.logpdf(xr3).shape, (10,))
self.assertEqual(d.logpdf(xr2[0], exclude=(0,)).shape, ())
self.assertEqual(d.logpdf(xr2, exclude=(0,)).shape, (10,))
self.assertEqual(d.logpdf(xr[0], exclude=(0, 1)).shape, ())
self.assertEqual(d.logpdf(xr, exclude=(0, 1)).shape, (10,))
xi = [np.linspace(-1,1,5), np.linspace(-1,1,6), np.linspace(-1,1,7)]
y = np.random.rand(5,6,7)
d = MultivariateNumericalDistribution(xi, y)
xr3 = np.random.rand(10, 3)
xr2 = np.random.rand(10, 2)
self.assertEqual(d.logpdf(xr3[0]).shape, ())
self.assertEqual(d.logpdf(xr3).shape, (10,))
self.assertEqual(d.logpdf(xr2[0], exclude=(0,)).shape, ())
self.assertEqual(d.logpdf(xr2, exclude=(0,)).shape, (10,))
self.assertEqual(d.logpdf(xr[0], exclude=(0, 1)).shape, ())
self.assertEqual(d.logpdf(xr, exclude=(0, 1)).shape, (10,))
def test_repr(self):
"""Test the __repr__ method of all PDs"""
fsp = 'flavio.statistics.probability.'
self.assertEqual(repr(NormalDistribution(1, 2)),
fsp + 'NormalDistribution(1, 2)')
self.assertEqual(repr(HalfNormalDistribution(1, -2)),
fsp + 'HalfNormalDistribution(1, -2)')
self.assertEqual(repr(AsymmetricNormalDistribution(1, 2, 3.)),
fsp + 'AsymmetricNormalDistribution(1, 2, 3.0)')
self.assertEqual(repr(DeltaDistribution(-3.)),
fsp + 'DeltaDistribution(-3.0)')
self.assertEqual(repr(UniformDistribution(1, 2)),
fsp + 'UniformDistribution(1, 2)')
self.assertEqual(repr(GaussianUpperLimit(1e-9, 0.95)),
fsp + 'GaussianUpperLimit(1e-09, 0.95)')
self.assertEqual(repr(GammaDistribution(5, -2, 1.5)),
fsp + 'GammaDistribution(5, -2, 1.5)')
self.assertEqual(repr(GammaDistributionPositive(5, -2, 1.5)),
fsp + 'GammaDistributionPositive(5, -2, 1.5)')
self.assertEqual(repr(GammaUpperLimit(15, 10, 1e-9, 0.95)),
fsp + 'GammaUpperLimit(15, 10, 1e-09, 0.95)')
self.assertEqual(repr(GeneralGammaUpperLimit(1e-9, 0.95, counts_total=15, counts_background=10, background_variance=0.2)),
fsp + 'GeneralGammaUpperLimit(1e-09, 0.95, counts_total=15, counts_signal=5, background_variance=0.2)')
self.assertEqual(repr(MultivariateNormalDistribution([1., 2], [[2, 0.1], [0.1, 2]])),
fsp + 'MultivariateNormalDistribution([1.0, 2], [[2, 0.1], [0.1, 2]])')
self.assertEqual(repr(NumericalDistribution([1., 2], [3, 4.])),
fsp + 'NumericalDistribution([1.0, 2], [3, 4.0])')
self.assertEqual(repr(GaussianKDE([1, 2, 3], 0.1)),
fsp + 'GaussianKDE([1, 2, 3], 0.1, 3)')
self.assertEqual(repr(KernelDensityEstimate([1, 2, 3], NormalDistribution(0, 0.5))),
fsp + 'KernelDensityEstimate([1, 2, 3], ' + fsp + 'NormalDistribution(0, 0.5), 3)')
self.assertEqual(repr(MultivariateNumericalDistribution([[1., 2], [10., 20]], [[3, 4.],[5, 6.]], [2, 3])),
fsp + 'MultivariateNumericalDistribution([[1.0, 2.0], [10.0, 20.0]], [[3.0, 4.0], [5.0, 6.0]], [2, 3])')
def test_class_string(self):
class_from_string_old = {
'delta': DeltaDistribution,
'uniform': UniformDistribution,
'normal': NormalDistribution,
'asymmetric_normal': AsymmetricNormalDistribution,
'half_normal': HalfNormalDistribution,
'gaussian_upper_limit': GaussianUpperLimit,
'gamma': GammaDistribution,
'gamma_positive': GammaDistributionPositive,
'gamma_upper_limit': GammaUpperLimit,
'general_gamma_upper_limit': GeneralGammaUpperLimit,
'numerical': NumericalDistribution,
'multivariate_normal': MultivariateNormalDistribution,
'multivariate_numerical': MultivariateNumericalDistribution,
'gaussian_kde': GaussianKDE,
'general_gamma_positive': GeneralGammaDistributionPositive,
}
for k, v in class_from_string_old.items():
self.assertEqual(v.class_to_string(), k)
self.assertEqual(string_to_class(k), v)
self.assertEqual(string_to_class(v.__name__), v)
self.assertEqual(class_from_string_old,
{k: v for k, v in class_from_string.items()
if v != KernelDensityEstimate
and v != LogNormalDistribution},
msg="Failed for {}".format(k))
def test_get_yaml(self):
"""Test the test_get_yaml method of all PDs"""
self.assertEqual(yaml.safe_load(NormalDistribution(1, 2).get_yaml()),
{'distribution': 'normal',
'central_value': 1,
'standard_deviation': 2})
self.assertEqual(yaml.safe_load(HalfNormalDistribution(1, -2).get_yaml()),
{'distribution': 'half_normal',
'central_value': 1,
'standard_deviation': -2})
self.assertEqual(yaml.safe_load(AsymmetricNormalDistribution(1, 2, 3.).get_yaml()),
{'distribution': 'asymmetric_normal',
'central_value': 1,
'right_deviation': 2,
'left_deviation': 3.})
self.assertEqual(yaml.safe_load(MultivariateNormalDistribution([1., 2], [[4, 0.2], [0.2, 4]]).get_yaml()),
{'distribution': 'multivariate_normal',
'central_value': [1., 2],
'covariance': [[4, 0.2], [0.2, 4]],
'standard_deviation': [2, 2],
'correlation': [[1, 0.05], [0.05, 1]],
})
self.assertEqual(yaml.safe_load(KernelDensityEstimate([1, 2, 3], NormalDistribution(0, 0.5)).get_yaml()),
{'distribution': 'kernel_density_estimate',
'data': [1, 2, 3],
'kernel': {'distribution': 'normal',
'central_value': 0,
'standard_deviation': 0.5},
'n_bins': 3})
self.assertEqual(yaml.safe_load(MultivariateNumericalDistribution([[1., 2], [10., 20]], [[3, 4.],[5, 6.]], [2, 3]).get_yaml()),
{'distribution': 'multivariate_numerical',
'xi': [[1.0, 2.0], [10.0, 20.0]],
'y': [[3.0, 4.0], [5.0, 6.0]],
'central_value': [2, 3]})
def test_get_dict(self):
ps = [
NormalDistribution(1, 2),
HalfNormalDistribution(1, -2),
AsymmetricNormalDistribution(1, 2, 3.),
DeltaDistribution(-3.),
UniformDistribution(1, 2),
GaussianUpperLimit(1e-9, 0.95),
GammaDistribution(5, -2, 1.5),
GammaDistributionPositive(5, -2, 1.5),
GammaUpperLimit(15, 10, 1e-9, 0.95),
GeneralGammaUpperLimit(1e-9, 0.95, counts_total=15, counts_background=10, background_variance=0.2),
MultivariateNormalDistribution([1., 2], [[2, 0.1], [0.1, 2]]),
NumericalDistribution([1., 2], [3, 4.]),
GaussianKDE([1, 2, 3], 0.1),
KernelDensityEstimate([1, 2, 3], NormalDistribution(0, 0.5)),
MultivariateNumericalDistribution([[1., 2], [10., 20]], [[3, 4.],[5, 6.]], [2, 3])
]
for p in ps:
# try instantiating a class by feeding the get_dict to __init__
d = p.get_dict()
pnew = p.__class__(**d)
# check if the new class is the same as the old
self.assertEqual(repr(pnew), repr(p))
self.assertEqual(pnew.get_yaml(), p.get_yaml())
def test_dict2dist(self):
d = [
{'distribution': 'normal', 'central_value': 1, 'standard_deviation': 0.2},
{'distribution': 'uniform', 'central_value': 2, 'half_range': 1}
]
p = dict2dist(d)
self.assertEqual(repr(p[0]), repr(NormalDistribution(1.0, 0.2)))
self.assertEqual(repr(p[1]), repr(UniformDistribution(2.0, 1.0)))
p = dict2dist(d[0])
self.assertEqual(repr(p[0]), repr(NormalDistribution(1.0, 0.2)))
def test_mvnormal_correlation(self):
p1 = MultivariateNormalDistribution([0, 0], [[1, 1.5], [1.5, 4]])
p2 = MultivariateNormalDistribution([0, 0],
standard_deviation=[1, 2],
correlation=[[1, 0.75], [0.75, 1]])
for p in [p1, p2]:
npt.assert_array_equal(p.covariance, np.array([[1, 1.5], [1.5, 4]]))
npt.assert_array_equal(p.standard_deviation, np.array([1, 2]))
npt.assert_array_equal(p.correlation, np.array([[1, 0.75], [0.75, 1]]))
with self.assertRaises(ValueError):
MultivariateNormalDistribution([0, 0], correlation=[[1, 0.75], [0.75, 1]])
class TestCombineDistributions(unittest.TestCase):
def test_combine_normal(self):
p_1 = NormalDistribution(5, 0.2)
p_2 = NormalDistribution(4, 0.3)
p_comb = combine_distributions([p_1, p_2])
self.assertIsInstance(p_comb, NormalDistribution)
s = np.array([0.2, 0.3])
c = np.array([5, 4])
w = 1 / s**2 # weights
s_comb = sqrt(1 / np.sum(w))
c_comb = np.sum(c * w) / np.sum(w)
self.assertEqual(p_comb.central_value, c_comb)
self.assertEqual(p_comb.standard_deviation, s_comb)
def test_combine_delta(self):
pd_1 = DeltaDistribution(12.5)
pd_2 = DeltaDistribution(12.3)
pn = NormalDistribution(12.4, 2.463)
with self.assertRaises(ValueError):
combine_distributions([pd_1, pd_2])
for pd in [pd_1, pd_2]:
p_comb = combine_distributions([pd, pn])
self.assertIsInstance(p_comb, DeltaDistribution)
self.assertEqual(p_comb.central_value, pd.central_value)
def test_combine_numerical(self):
p_1 = NumericalDistribution.from_pd(NormalDistribution(5, 0.2))
p_2 = NumericalDistribution.from_pd(NormalDistribution(4, 0.3))
p_comb = combine_distributions([p_1, p_2])
self.assertIsInstance(p_comb, NumericalDistribution)
s = np.array([0.2, 0.3])
c = np.array([5, 4])
w = 1 / s**2 # weights
s_comb = sqrt(1 / np.sum(w))
c_comb = np.sum(c * w) / np.sum(w)
self.assertAlmostEqual(p_comb.central_value, c_comb, places=2)
self.assertAlmostEqual(p_comb.error_left, s_comb, places=2)
self.assertAlmostEqual(p_comb.error_right, s_comb, places=2)
def test_combine_multivariate_normal(self):
# compare combination of to univariate Gaussians
# with the multivariate combination of two uncorrelated 2D Gaussians
p11 = NormalDistribution(3, 1)
p12 = NormalDistribution(5, 2)
p21 = NormalDistribution(4, 2)
p22 = NormalDistribution(6, 3)
p1 = MultivariateNormalDistribution([3, 5], [[1, 0], [0, 4]])
p2 = MultivariateNormalDistribution([4, 6], [[4, 0], [0, 9]])
pc1 = combine_distributions([p11, p21])
pc2 = combine_distributions([p12, p22])
pc = combine_distributions([p1, p2])
self.assertIsInstance(pc, MultivariateNormalDistribution)
self.assertAlmostEqual(pc.central_value[0], pc1.central_value)
self.assertAlmostEqual(pc.central_value[1], pc2.central_value)
self.assertAlmostEqual(pc.covariance[0, 0], pc1.standard_deviation**2)
self.assertAlmostEqual(pc.covariance[1, 1], pc2.standard_deviation**2)
def test_combine_multivariate_numerical(self):
p1 = MultivariateNormalDistribution([3, 5], [[1, 0], [0, 4]])
p2 = MultivariateNormalDistribution([4, 6], [[4, 0], [0, 9]])
p1n = MultivariateNumericalDistribution.from_pd(p1)
p2n = MultivariateNumericalDistribution.from_pd(p2)
pc = combine_distributions([p1, p2])
pcn = combine_distributions([p1n, p2n])
self.assertAlmostEqual(pc.logpdf([2.7, 4.8]), pcn.logpdf([2.7, 4.8]), delta=0.01)
self.assertAlmostEqual(pc.logpdf([6.7, 2.8]), pcn.logpdf([6.7, 2.8]), delta=0.01)
| 53.811094 | 135 | 0.615374 |
7949ac1d83c40c7ff3d1e5ad8de3e58b41c1808d
| 6,833 |
py
|
Python
|
tunas/depthwise_initializers_test.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 23,901 |
2018-10-04T19:48:53.000Z
|
2022-03-31T21:27:42.000Z
|
tunas/depthwise_initializers_test.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 891 |
2018-11-10T06:16:13.000Z
|
2022-03-31T10:42:34.000Z
|
tunas/depthwise_initializers_test.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 6,047 |
2018-10-12T06:31:02.000Z
|
2022-03-31T13:59:28.000Z
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Tests for depthwise_initializers.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import tensorflow.compat.v1 as tf
from tunas import depthwise_initializers
class ModelOpsTest(tf.test.TestCase):
def test_variance_scaling_untruncated_normal_fan_in(self):
initializer = depthwise_initializers.DepthwiseVarianceScaling(
scale=1.0,
mode='fan_in',
distribution='untruncated_normal')
tensor = initializer([3, 5, 1024, 1])
value = self.evaluate(tensor)
self.assertEqual(value.shape, (3, 5, 1024, 1))
self.assertNear(np.mean(value), 0.0, 0.01)
self.assertNear(np.std(value), 1.0 / math.sqrt(3 * 5), 0.01)
def test_variance_scaling_truncated_normal_fan_in(self):
initializer = depthwise_initializers.DepthwiseVarianceScaling(
scale=1.0,
mode='fan_in',
distribution='truncated_normal')
tensor = initializer([3, 5, 1024, 1])
value = self.evaluate(tensor)
self.assertEqual(value.shape, (3, 5, 1024, 1))
self.assertNear(np.mean(value), 0.0, 0.01)
self.assertNear(np.std(value), 1.0 / math.sqrt(3 * 5), 0.01)
def test_variance_scaling_uniform_fan_in(self):
initializer = depthwise_initializers.DepthwiseVarianceScaling(
scale=1.0,
mode='fan_in',
distribution='uniform')
tensor = initializer([3, 5, 1024, 1])
value = self.evaluate(tensor)
self.assertEqual(value.shape, (3, 5, 1024, 1))
self.assertNear(np.mean(value), 0.0, 0.01)
self.assertNear(np.std(value), 1.0 / math.sqrt(3 * 5), 0.01)
def test_variance_scaling_scale_is_2(self):
initializer = depthwise_initializers.DepthwiseVarianceScaling(
scale=2.0,
mode='fan_in',
distribution='untruncated_normal')
tensor = initializer([3, 5, 1024, 1])
value = self.evaluate(tensor)
self.assertEqual(value.shape, (3, 5, 1024, 1))
self.assertNear(np.mean(value), 0.0, 0.01)
self.assertNear(np.std(value), math.sqrt(2.0 / (3 * 5)), 0.01)
def test_fan_in_depth_multiplier_is_2(self):
initializer = depthwise_initializers.DepthwiseVarianceScaling(
scale=1.0,
mode='fan_in',
distribution='untruncated_normal')
tensor = initializer([3, 5, 1024, 2])
value = self.evaluate(tensor)
self.assertEqual(value.shape, (3, 5, 1024, 2))
self.assertNear(np.mean(value), 0.0, 0.01)
self.assertNear(np.std(value), 1.0 / math.sqrt(3 * 5), 0.01)
def test_fan_out_depth_multiplier_is_2(self):
initializer = depthwise_initializers.DepthwiseVarianceScaling(
scale=1.0,
mode='fan_out',
distribution='untruncated_normal')
tensor = initializer([3, 5, 1024, 2])
value = self.evaluate(tensor)
self.assertEqual(value.shape, (3, 5, 1024, 2))
self.assertNear(np.mean(value), 0.0, 0.01)
self.assertNear(np.std(value), 1.0 / math.sqrt(2 * 3 * 5), 0.01)
def test_fan_avg_depth_multiplier_is_2(self):
initializer = depthwise_initializers.DepthwiseVarianceScaling(
scale=1.0,
mode='fan_avg',
distribution='untruncated_normal')
tensor = initializer([3, 5, 1024, 2])
value = self.evaluate(tensor)
self.assertEqual(value.shape, (3, 5, 1024, 2))
self.assertNear(np.mean(value), 0.0, 0.01)
self.assertNear(np.std(value), 1.0 / math.sqrt(1.5 * 3 * 5), 0.01)
def test_depthwise_variance_scaling_end_to_end(self):
# This is an end-to-end test for the VarianceScaling() class.
# We apply he initializer to a tensor, and verify that the
# distribution of outputs matches what we expect.
input_tensor = tf.random.normal(
shape=(32, 20, 20, 1024),
mean=0.0,
stddev=1)
kernel_initializer = depthwise_initializers.DepthwiseVarianceScaling(
scale=1.0,
mode='fan_in',
distribution='truncated_normal')
kernel = tf.get_variable(
name='kernel',
initializer=kernel_initializer,
shape=[5, 5, 1024, 1])
output_tensor = tf.nn.depthwise_conv2d(
input_tensor,
kernel,
strides=(1, 1, 1, 1),
padding='VALID')
self.evaluate(tf.global_variables_initializer())
result = self.evaluate(output_tensor)
self.assertNear(np.mean(result), 0.0, 0.05)
self.assertNear(np.std(result), 1.0, 0.05)
def test_depthwise_he_normal_initializer_end_to_end(self):
# This is an end-to-end test for the depthwise_he_normal() function.
# We apply a depthwise_he_normal() to a tensor, and verify that the
# distribution of outputs matches what we expect.
input_tensor = tf.random.normal(
shape=(32, 20, 20, 1024),
mean=0.0,
stddev=1)
kernel_initializer = depthwise_initializers.depthwise_he_normal()
kernel = tf.get_variable(
name='kernel',
initializer=kernel_initializer,
shape=[5, 5, 1024, 1])
output_tensor = tf.nn.depthwise_conv2d(
tf.nn.relu(input_tensor),
kernel,
strides=(1, 1, 1, 1),
padding='VALID')
self.evaluate(tf.global_variables_initializer())
result = self.evaluate(output_tensor)
self.assertNear(np.mean(result), 0.0, 0.05)
self.assertNear(np.std(result), 1.0, 0.05)
def test_variance_scaling_initializer_dtypes(self):
initializer0 = depthwise_initializers.DepthwiseVarianceScaling()
tensor0 = initializer0([3, 3, 128, 1])
self.assertEqual(tensor0.dtype, tf.float32)
initializer1 = depthwise_initializers.DepthwiseVarianceScaling()
tensor1 = initializer1([3, 3, 128, 1], dtype=tf.float64)
self.assertEqual(tensor1.dtype, tf.float64)
initializer2 = depthwise_initializers.DepthwiseVarianceScaling(
dtype=tf.float64)
tensor2 = initializer2([3, 3, 128, 1])
self.assertEqual(tensor2.dtype, tf.float64)
def test_variance_scaling_seed(self):
initializer = depthwise_initializers.DepthwiseVarianceScaling(seed=42)
tensor1 = initializer([3, 3, 128, 1])
tensor2 = initializer([3, 3, 128, 1])
self.assertAllClose(self.evaluate(tensor1), self.evaluate(tensor2))
if __name__ == '__main__':
tf.disable_v2_behavior()
tf.test.main()
| 35.404145 | 74 | 0.687985 |
7949ac7c98e798d33250e6e8b74e8adb5d136331
| 1,856 |
py
|
Python
|
test/functional/rpc_timestampindex.py
|
Lucky1689/ukcoin
|
11bcd6ded7b11a7179e32f1bf0d6f75615c0dde1
|
[
"MIT"
] | null | null | null |
test/functional/rpc_timestampindex.py
|
Lucky1689/ukcoin
|
11bcd6ded7b11a7179e32f1bf0d6f75615c0dde1
|
[
"MIT"
] | null | null | null |
test/functional/rpc_timestampindex.py
|
Lucky1689/ukcoin
|
11bcd6ded7b11a7179e32f1bf0d6f75615c0dde1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Copyright (c) 2017 The Raven Core developers
# Copyright (c) 2018 The Rito Core developers
# Copyright (c) 2020 The Ukcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test timestampindex generation and fetching
#
import time
from test_framework.test_framework import UkcoinTestFramework
from test_framework.util import *
class TimestampIndexTest(UkcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self):
self.add_nodes(4, [
# Nodes 0/1 are "wallet" nodes
["-debug"],
["-debug", "-timestampindex"],
# Nodes 2/3 are used for testing
["-debug"],
["-debug", "-timestampindex"]])
self.start_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
self.sync_all()
def run_test(self):
print("Mining 25 blocks...")
blockhashes = self.nodes[0].generate(25)
time.sleep(3)
print("Mining 25 blocks...")
blockhashes.extend(self.nodes[0].generate(25))
time.sleep(3)
print("Mining 25 blocks...")
blockhashes.extend(self.nodes[0].generate(25))
self.sync_all()
low = self.nodes[1].getblock(blockhashes[0])["time"]
high = low + 76
print("Checking timestamp index...")
hashes = self.nodes[1].getblockhashes(high, low)
assert_equal(len(hashes), len(blockhashes))
assert_equal(hashes, blockhashes)
print("Passed\n")
if __name__ == '__main__':
TimestampIndexTest().main()
| 27.701493 | 69 | 0.634698 |
7949ad5253e2bae10b32bd84627ba89dda338ed4
| 339 |
py
|
Python
|
src/AddressBook/migrations/0002_auto_20180101_1700.py
|
thunderoy/AddressBook
|
0d848c1732585c990f057b8e99b4b9290d0b043c
|
[
"MIT"
] | 1 |
2018-06-13T07:45:13.000Z
|
2018-06-13T07:45:13.000Z
|
src/AddressBook/migrations/0002_auto_20180101_1700.py
|
thunderoy/AddressBook
|
0d848c1732585c990f057b8e99b4b9290d0b043c
|
[
"MIT"
] | null | null | null |
src/AddressBook/migrations/0002_auto_20180101_1700.py
|
thunderoy/AddressBook
|
0d848c1732585c990f057b8e99b4b9290d0b043c
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0 on 2018-01-01 17:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('admin', '0002_logentry_remove_auto_add'),
('AddressBook', '0001_initial'),
]
operations = [
migrations.DeleteModel(
name='AppUser',
),
]
| 18.833333 | 51 | 0.60472 |
7949ad602c0a3aa0ff475a786ab5b898c4bccbf8
| 1,618 |
py
|
Python
|
src/test_receive_addresses/test_receive_addresses.py
|
ska-telescope/sdp-workflows-procfunc
|
ef6e7be9584a006e936139ae653902a41af4d906
|
[
"BSD-3-Clause"
] | null | null | null |
src/test_receive_addresses/test_receive_addresses.py
|
ska-telescope/sdp-workflows-procfunc
|
ef6e7be9584a006e936139ae653902a41af4d906
|
[
"BSD-3-Clause"
] | null | null | null |
src/test_receive_addresses/test_receive_addresses.py
|
ska-telescope/sdp-workflows-procfunc
|
ef6e7be9584a006e936139ae653902a41af4d906
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Workflow to test generation of receive addresses.
The purpose of this workflow is to test the mechanism for generating SDP
receive addresses from the channel link map contained in the SBI. The workflow
picks it up from there, uses it to generate the receive addresses for each scan
type and writes them to the processing block state. The subarray publishes this
address map on the appropriate attribute to complete the transition following
AssignResources.
This workflow does not generate any deployments.
"""
import logging
import ska_ser_logging
from ska_sdp_workflow import workflow
ska_ser_logging.configure_logging()
LOG = logging.getLogger("test_receive_addresses")
LOG.setLevel(logging.DEBUG)
# Claim processing block
pb = workflow.ProcessingBlock()
# Default maximum number of channels per receive process
max_channels = 20
# Port configuration
port_start = 9000
num_ports = 1
# Get the channel link map from SBI
scan_types = pb.get_scan_types()
# Port and receive process configuration
host_port, num_process = pb.configure_recv_processes_ports(
scan_types, max_channels, port_start, num_ports
)
# Create work phase
LOG.info("Create work phase")
work_phase = pb.create_phase("Work", [])
with work_phase:
# Deploying a fake execution engine.
work_phase.ee_deploy_helm("test-receive")
# Add receive addresses to pb
pb.receive_addresses(configured_host_port=host_port)
# ... Do some processing here ...
LOG.info("Done, now idling...")
for txn in work_phase.wait_loop():
if work_phase.is_sbi_finished(txn):
break
txn.loop(wait=True)
| 26.966667 | 79 | 0.767614 |
7949ae13bd1b600938681275bf3ee97309e995e4
| 1,652 |
py
|
Python
|
allure-pytest/setup.py
|
wuhuizuo/allure-python
|
7285adf0690bb703225d45e236594581bfb62728
|
[
"Apache-2.0"
] | 1 |
2018-07-23T16:09:54.000Z
|
2018-07-23T16:09:54.000Z
|
allure-pytest/setup.py
|
hosniadala-dt/allure-python
|
7285adf0690bb703225d45e236594581bfb62728
|
[
"Apache-2.0"
] | null | null | null |
allure-pytest/setup.py
|
hosniadala-dt/allure-python
|
7285adf0690bb703225d45e236594581bfb62728
|
[
"Apache-2.0"
] | 1 |
2020-08-05T05:40:44.000Z
|
2020-08-05T05:40:44.000Z
|
import os,sys
from setuptools import setup
from pkg_resources import require, DistributionNotFound, VersionConflict
try:
require('pytest-allure-adaptor')
print("""
You have pytest-allure-adaptor installed.
You need to remove pytest-allure-adaptor from your site-packages
before installing allure-pytest, or conflicts may result.
""")
sys.exit()
except (DistributionNotFound, VersionConflict):
pass
PACKAGE = "allure-pytest"
VERSION = "2.4.1"
classifiers = [
'Development Status :: 5 - Production/Stable',
'Framework :: Pytest',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
]
install_requires = [
"pytest>=3.3.0",
"six>=1.9.0",
"allure-python-commons==2.4.1"
]
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def main():
setup(
name=PACKAGE,
version=VERSION,
description="Allure pytest integration",
url="https://github.com/allure-framework/allure-python",
author="QAMetaSoftware, Stanislav Seliverstov",
author_email="sseliverstov@qameta.io",
license="Apache-2.0",
classifiers=classifiers,
keywords="allure reporting pytest",
long_description=read('README.rst'),
packages=["allure_pytest"],
package_dir={"allure_pytest": "src"},
entry_points={"pytest11": ["allure_pytest = allure_pytest.plugin"]},
install_requires=install_requires
)
if __name__ == '__main__':
main()
| 27.533333 | 76 | 0.66586 |
7949ae3669a030b8cf6b5885d53ca361ba9a6f32
| 4,468 |
py
|
Python
|
test/confs/sugar.conf.py
|
Thecarisma/themata
|
09a8ce670479ea4e9b5a26457f5cb290728f604a
|
[
"CC0-1.0"
] | 2 |
2020-04-27T10:14:54.000Z
|
2020-04-28T01:24:59.000Z
|
test/confs/sugar.conf.py
|
Thecarisma/themata
|
09a8ce670479ea4e9b5a26457f5cb290728f604a
|
[
"CC0-1.0"
] | 28 |
2020-05-16T19:50:54.000Z
|
2021-12-02T07:38:03.000Z
|
test/confs/sugar.conf.py
|
Thecarisma/themata
|
09a8ce670479ea4e9b5a26457f5cb290728f604a
|
[
"CC0-1.0"
] | null | null | null |
import os
import themata
project = 'First Doc'
copyright = '2020, Adewale Azeez'
author = 'Adewale Azeez'
html_theme_path = [themata.get_html_theme_path()]
extensions = ['recommonmark']
source_suffix = {
'.rst': 'restructuredtext',
'.txt': 'markdown',
'.md': 'markdown',
}
html_theme_options = {
'project_icon': "https://github.com/Thecarisma/themata/raw/main/docs/images/themata.png",
'has_left_sidebar': True,
'has_right_sidebar': True,
'show_navigators': True,
'show_navigators_in_index': False,
'left_sidebar_only': [
"leftpage"
],
'right_sidebar_only': [
"sampleregex1"
],
'no_sidebar': [
"singlepage"
],
'navbar_links': [
("General", "general/index"),
("Download", "https://pypi.python.org/pypi/themata"),
("Introduction", "general/introduction"),
("FAQ", "general/faq")
],
'navbar_sec_links': [
("General", "general/index"),
("Download", "https://pypi.python.org/pypi/themata"),
("Introduction", "general/introduction"),
("FAQ", "general/faq")
],
'social_icons': [
("fab fa-twitter", "https://twitter.com/iamthecarisma"),
("fab fa-twitch", "https://www.twitch.tv/amsiraceht"),
("fab fa-github", "https://github.com/Thecarisma"),
("fab fa-linkedin", "https://www.linkedin.com/in/azeez-adewale/"),
("fab fa-stackoverflow", "https://stackoverflow.com/users/6626422/thecarisma")
],
'footer_menus': [
{
'icon': 'fa-phone',
'title': 'Contact',
'menu_items': [
{
'link': "#",
'title': 'https://thecarisma.github.io/'
},
{
'link': "",
'title': '12345678998'
}
]
},
{
'icon': 'fa-book',
'title': 'Documentation',
'menu_items': [
{
'link': "#",
'title': 'Title One'
},
{
'link': "#",
'title': 'Two'
},
{
'link': "#",
'title': 'Three'
},
{
'link': "#",
'title': 'Title Four'
},
{
'link': "#",
'title': 'Item Five'
},
]
},
{
'icon': 'fa-users',
'title': 'Community',
'menu_items': [
{
'link': "#",
'title': 'Stackoverflow'
},
{
'link': "",
'title': 'Slack'
},
{
'link': "",
'title': 'Forum'
},
{
'link': "",
'title': 'IRC'
}
]
}
],
"source_root": "https://github.com/Thecarisma/themata/tree/test/test/test_rst",
"metadata": {
"enable": True,
"url": "https://thecarisma.github.io/themata",
"type": "website",
"title": "Set of Highly customizable sphinx themes.",
"description": "Themata package contains different sphinx theme that can be easily customized to look like a complete website or just a documentation webpage.",
"image": "https://raw.githubusercontent.com/Thecarisma/themata/main/docs/images/themata.small.png",
"keywords": "python, sphinx, thecarisma, themata, documentation, markdown, rst, themes",
"author": "Adewale Azeez"
},
"twitter_metadata": {
"enable": True,
"card": "summary",
"site": "@iamthecarisma",
"creator": "@iamthecarisma",
"title": "Set of Highly customizable sphinx themes.",
"description": "Themata package contains different sphinx theme that can be easily customized to look like a complete website or just a documentation webpage.",
"image": "https://raw.githubusercontent.com/Thecarisma/themata/main/docs/images/themata.small.png",
}
}
html_theme = 'sugar'
| 32.613139 | 169 | 0.450761 |
7949b07f350c247420b50c9a0d607ded2b700d97
| 4,377 |
py
|
Python
|
db-server/dbcon/views.py
|
JannisBush/xs-leaks-browser-web
|
15ed76a87c1c30e15cd7d0b070e2853d17d322bc
|
[
"MIT"
] | null | null | null |
db-server/dbcon/views.py
|
JannisBush/xs-leaks-browser-web
|
15ed76a87c1c30e15cd7d0b070e2853d17d322bc
|
[
"MIT"
] | null | null | null |
db-server/dbcon/views.py
|
JannisBush/xs-leaks-browser-web
|
15ed76a87c1c30e15cd7d0b070e2853d17d322bc
|
[
"MIT"
] | null | null | null |
import json
from django.http import HttpResponse
from .models import (Result, Browser, Test, ObjectProperties,
GlobalProperties, Events, Observation, WindowProperties, LeakResult, CookieSecFetch)
import logging
logger = logging.getLogger(__name__)
def save_data_v2(request):
try:
message = "Success"
body_json = json.loads(request.body)
# Site is not set, we are in the Browser Mode
if body_json["site"] == "":
observation = {}
observation["browser"], _ = Browser.objects.get_or_create(**body_json["browser"])
observation["test"], _ = Test.objects.get_or_create(**body_json["test"])
if "events" in body_json:
observation["events"], _ = Events.objects.get_or_create(**body_json["events"])
if "op" in body_json:
observation["object_properties"], _ = ObjectProperties.objects.get_or_create(**body_json["op"])
if "gp" in body_json:
observation["global_properties"], _ = GlobalProperties.objects.get_or_create(**body_json["gp"])
if "win" in body_json:
observation["window_properties"], _ = WindowProperties.objects.get_or_create(**body_json["win"])
if "loading_time" in body_json:
observation["loading_time"] = body_json["loading_time"]
if "complete_time" in body_json:
observation["complete_time"] = body_json["complete_time"]
if "timed_out" in body_json:
observation["timed_out"] = body_json["timed_out"]
if "apg_url" in body_json:
observation["apg_url"] = body_json["apg_url"]
if "retest" in body_json:
observation["retest"] = body_json["retest"]
obsv = Observation(**observation)
obsv.save()
# Site is set, we are in the dynamic confirmation mode
else:
leak_result = {}
leak_result["browser"], _ = Browser.objects.get_or_create(**body_json["browser"])
test_json = body_json["test"]
test_json["url_dict_version"] = "notapplicable"
leak_result["test"], _ = Test.objects.get_or_create(**test_json)
if "events" in body_json:
leak_result["events"], _ = Events.objects.get_or_create(**body_json["events"])
if "op" in body_json:
leak_result["object_properties"], _ = ObjectProperties.objects.get_or_create(**body_json["op"])
if "gp" in body_json:
leak_result["global_properties"], _ = GlobalProperties.objects.get_or_create(**body_json["gp"])
if "win" in body_json:
leak_result["window_properties"], _ = WindowProperties.objects.get_or_create(**body_json["win"])
if "loading_time" in body_json:
leak_result["loading_time"] = body_json["loading_time"]
if "complete_time" in body_json:
leak_result["complete_time"] = body_json["complete_time"]
if "timed_out" in body_json:
leak_result["timed_out"] = body_json["timed_out"]
if "apg_url" in body_json:
leak_result["apg_url"] = body_json["apg_url"]
if "retest" in body_json:
leak_result["retest_num"] = body_json["retest"]
if "cookies" in body_json:
leak_result["cookies"] = body_json["cookies"]
if "site" in body_json:
leak_result["site"] = body_json["site"]
lkr = LeakResult(**leak_result)
lkr.save()
except Exception as e:
logger.warning(e)
message = "Failed"
finally:
return HttpResponse(message)
def save_data(request):
"""Save the data in a database.
Request needs to be post and the body needs to be json
and adhere to the schema of the result model.
"""
body_json = json.loads(request.body)
res = Result(**body_json)
res.save()
return HttpResponse()
def save_test(request):
"""Save the cookie/sec-fetch data in a database."""
message = "Success"
try:
body_json = json.loads(request.body)
_, _ = CookieSecFetch.objects.get_or_create(**body_json)
except Exception as e:
logger.warning(e)
message = "Failed"
finally:
return HttpResponse(message)
| 44.212121 | 112 | 0.60658 |
7949b1469ec730f420fdf776fbf81d61e17bfd7a
| 2,880 |
py
|
Python
|
apps/hashblock_cli/scripts/ucum_to_assets.py
|
hashblock/sawtooth-uom
|
0b8f131ee4f2a3a70a19f21bc88fe064d39fe5cd
|
[
"MIT"
] | 5 |
2018-04-05T07:02:04.000Z
|
2018-07-10T09:39:03.000Z
|
apps/hashblock_cli/scripts/ucum_to_assets.py
|
hashblock/sawtooth-uom
|
0b8f131ee4f2a3a70a19f21bc88fe064d39fe5cd
|
[
"MIT"
] | 89 |
2018-03-11T14:46:07.000Z
|
2018-09-07T14:50:42.000Z
|
apps/hashblock_cli/scripts/ucum_to_assets.py
|
hashblock/sawtooth-uom
|
0b8f131ee4f2a3a70a19f21bc88fe064d39fe5cd
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------
# Copyright 2018 Frank V. Castellucci and Arthur Greef
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import urllib.request
from xml.dom import minidom, Node
def get_name(parent_node):
"""Fetches the node tag value for 'name'"""
return parent_node.getElementsByTagName('name')[0].childNodes[0].nodeValue
def get_code(parent_node):
"""Fetches the node attribute value for 'Code'"""
return str(parent_node.attributes["Code"].value)
def get_symbol(parent_node):
"""Fetches the node tag value for 'printSymbol'"""
def traverse(node):
while node != Node.TEXT_NODE:
if len(node.childNodes) != 0:
traverse(node.childNodes[0])
else:
return get_name(parent_node)
return node.data
x = parent_node.getElementsByTagName('printSymbol')
if len(x) == 0:
return get_name(parent_node)
else:
x0 = x[0]
cn = x0.childNodes
if len(cn) == 0:
return get_name(parent_node)
else:
return traverse(cn[0])
def genucum():
"""Generates a list of dictionaries representing ucum base units
and units"""
ucum = urllib.request.urlopen(
'http://unitsofmeasure.org/ucum-essence.xml').read()
dom = minidom.parseString(ucum)
base_units = dom.getElementsByTagName('base-unit')
units = dom.getElementsByTagName('unit')
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]
used_primes = set()
genesis_array = [
{"system": "universal", "prime": "1".zfill(44), "key": "unity"}]
for b in base_units:
prime_id = None
for p in primes:
if p not in used_primes:
used_primes.add(p)
prime_id = "{:x}".format(p)
break
bname = {
"system": "ucum",
"prime": prime_id.zfill(44),
"key": get_code(b)}
genesis_array.append(bname)
for u in units:
uname = {"system": "ucum", "prime": "", "key": get_code(u)}
genesis_array.append(uname)
# print("{}".format(json.dumps(genesis_array)))
return genesis_array
if __name__ == '__main__':
x = genucum()
print("{}".format(x))
| 30.967742 | 80 | 0.587847 |
7949b1a561f113dc5d0f06aa6c43eaa8050dff3a
| 6,135 |
py
|
Python
|
rp/rsa.py
|
bptripp/rp
|
f076034547f7f8afe0f8aad8d0122f7035ca4ed1
|
[
"MIT"
] | null | null | null |
rp/rsa.py
|
bptripp/rp
|
f076034547f7f8afe0f8aad8d0122f7035ca4ed1
|
[
"MIT"
] | null | null | null |
rp/rsa.py
|
bptripp/rp
|
f076034547f7f8afe0f8aad8d0122f7035ca4ed1
|
[
"MIT"
] | null | null | null |
# Gradient descent to find random activity pattern with given representational
# similarity matrix. This works better than taking steps in null space of similarity
# matrix.
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def get_random_stimuli(n_stim, dim):
"""
:param n_stim: Number of "stimuli" to sample (stimuli are vectors)
:param dim: Dimension of each stimulus
:return: Random stimulus vectors with entries between -1 and 1 (#stim by dim)
"""
return -1 + 2 * np.random.rand(n_stim, dim)
def get_gaussian_population_response(stim, n_neurons, dim, sigma):
"""
:param stim: Matrix of stimulus vectors (#stim by dim)
:param n_neurons: Number of neurons in a population
:param dim: Dimension of stimulus vectors
:param sigma: Width of Gaussian tuning curves of dim dimensions
:return: Responses of population of Gaussian tuning curves to given stimuli
"""
centres = -1 + 2 * np.random.rand(n_neurons, dim)
responses = np.zeros((stim.shape[0], n_neurons))
for i in range(n_stim):
for j in range(n_neurons):
dist = np.linalg.norm(stim[i, :] - centres[j, :])
responses[i, j] = np.exp(-dist ** 2 / (2 * sigma ** 2))
return responses
def get_tf_corrcoef(tf_responses):
"""
:param tf_responses: A TensorFlow variable that holds a matrix of population responses to
a list of stimuli
:return: A TensorFlow variable that holds the matrix of correlations between responses to
pairs of stimlui
"""
deviations = tf_responses - tf.expand_dims(tf.reduce_mean(tf_responses, axis=1), axis=1)
numerator = tf.matmul(deviations, tf.transpose(deviations))
d = tf.sqrt(tf.diag_part(numerator))
return tf.div(numerator, tf.tensordot(d, d, axes=0))
def test_get_tf_corrcoef():
responses = np.random.rand(80, 100)
tf_corrcoef = get_tf_corrcoef(responses)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
np.testing.assert_allclose(sess.run(tf_corrcoef), np.corrcoef(responses))
def get_population_response_with_similarity(n_neurons, similarity, initial_responses=None, iterations=10):
"""
:param n_neurons: Number of neurons in population
:param similarity: Correlation matrix to approximate (correlations between population responses to pairs of stimuli)
:param initial_responses (optional): An initial guess at the population responses (initialized to a random matrix
if not given)
:param iterations (optional): Iterations of an outer optimization loop (several optimization steps per iteration)
:return: Population responses that optimally approximate the similarity matrix
"""
if initial_responses is None:
initial_responses = np.random.rand(similarity.shape[0], n_neurons)
tf_responses = tf.get_variable('responses', initializer=tf.constant(initial_responses))
tf_target_similarity = tf.constant(similarity)
tf_actual_similarity = get_tf_corrcoef(tf_responses)
cost = tf.reduce_mean(tf.pow(tf_actual_similarity - tf_target_similarity, 2))
optimizer = tf.train.AdamOptimizer(learning_rate=.1)
clip_op = tf.assign(tf_responses, tf.clip_by_value(tf_responses, 0, 1000000))
opt_op = optimizer.minimize(cost, var_list=tf_responses)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
print(sess.run(cost))
for i in range(iterations):
for j in range(25):
opt_op.run()
sess.run(clip_op)
print(sess.run(cost))
result = sess.run(tf_responses)
return result
def get_similarity_via_random_subsets(n_neurons, similarity, n_batch, batches):
"""
:param n_neurons: Number of neurons in population
:param similarity: Ccorrelation matrix to approximate (correlations between population responses to pairs of stimuli)
:param n_batch: Size of subsets of stimuli to optimize at once
:param batches: Number of batches to optimize
:return: Population responses that optimally approximate the similarity matrix
"""
#TODO: manage memory by saving and loading parts of response and similarity matrices
n_stimuli = similarity.shape[0]
responses = np.random.rand(n_stimuli, n_neurons)
for i in range(batches):
print('iteration {}'.format(i))
subset = np.random.choice(range(n_stimuli), size=n_batch, replace=False)
similarity_subset = similarity[subset,:][:,subset]
responses_subset = responses[subset,:]
tf.reset_default_graph()
new_responses = get_population_response_with_similarity(n_neurons,
similarity_subset,
initial_responses=responses_subset,
iterations=1)
responses[subset,:] = new_responses
return responses
if __name__ == '__main__':
# test_get_rf_corrcoef()
n_neurons = 1500
n_stim = 1000
sigma = .5
dim = 2
stim = get_random_stimuli(n_stim, dim)
responses = get_gaussian_population_response(stim, n_neurons, dim, sigma)
similarity = np.corrcoef(responses)
# perturbed = get_population_response_with_similarity(n_neurons, similarity)
perturbed = get_similarity_via_random_subsets(n_neurons, similarity, 300, 50)
plt.subplot(2,3,1)
plt.imshow(np.corrcoef(responses), vmin=-1, vmax=1)
plt.title('Original Similarity')
plt.subplot(2,3,2)
plt.imshow(np.corrcoef(perturbed), vmin=-1, vmax=1)
plt.title('Optimized Similarity')
plt.subplot(2,3,3)
plt.imshow(np.corrcoef(perturbed) - np.corrcoef(responses), vmin=-1, vmax=1)
plt.title('Difference')
plt.subplot(2,3,4)
plt.imshow(responses)
plt.title('Original Responses')
plt.subplot(2,3,5)
plt.imshow(perturbed)
plt.title('Optimized Responses')
plt.subplot(2,3,6)
plt.imshow(perturbed - responses)
plt.title('Difference')
plt.show()
| 38.829114 | 121 | 0.68639 |
7949b28ad48d7dda88914ce7fdd42833993026c7
| 2,442 |
py
|
Python
|
emailnetwork/tests/test_extract.py
|
supertypeai/emailnetwork
|
65f8f72301ef40c35fa6e36a8589d27ab961243a
|
[
"MIT"
] | 1 |
2021-01-29T04:26:36.000Z
|
2021-01-29T04:26:36.000Z
|
emailnetwork/tests/test_extract.py
|
supertypeai/emailnetwork
|
65f8f72301ef40c35fa6e36a8589d27ab961243a
|
[
"MIT"
] | null | null | null |
emailnetwork/tests/test_extract.py
|
supertypeai/emailnetwork
|
65f8f72301ef40c35fa6e36a8589d27ab961243a
|
[
"MIT"
] | 1 |
2022-01-21T09:03:50.000Z
|
2022-01-21T09:03:50.000Z
|
import os
import datetime
from unittest import TestCase
from emailnetwork.extract import MBoxReader, extract_meta
from emailnetwork.emails import EmailAddress, EmailMeta
MBOX_PATH = f'{os.path.dirname(__file__)}/test.mbox'
class TestExtract(TestCase):
def setUp(self):
self.reader = MBoxReader(MBOX_PATH)
self.emails = self.reader.extract()
def tearDown(self):
self.reader = None
def test_read_mbox(self):
self.assertTrue(isinstance(self.reader, MBoxReader))
def test_length_mbox(self):
self.assertEqual(len(self.reader), 140)
def test_extract(self):
# self.assertTrue(isinstance(next(self.emails), EmailMeta))
firstemail = next(self.emails)
self.assertIsInstance(firstemail, EmailMeta)
self.assertIsInstance(firstemail.subject, str)
self.assertIsInstance(firstemail.date, datetime.datetime)
for msg in self.emails:
self.assertGreaterEqual(len(msg.recipients), 1)
self.assertIsInstance(msg.cc, list)
def test_email_address(self):
firstemail = next(self.emails)
self.assertIsInstance(firstemail.sender, EmailAddress)
self.assertIsInstance(firstemail.sender.name, str)
self.assertIsInstance(firstemail.sender.email, str)
def test_filter_by_date(self):
newmails = self.reader.filter_by_date(">=", "2020-01-01")
self.assertEqual(len(newmails), 4)
for email in newmails:
self.assertGreater(email.date, datetime.datetime(2020,1,1))
self.assertLess(email.date, datetime.datetime.now())
oldmails = self.reader.filter_by_date("<=", "2019-12-31")
self.assertEqual(len(oldmails), 136)
exactmails = self.reader.filter_by_date("==", "2020-04-17")
self.assertEqual(len(exactmails), 1)
self.assertEqual(exactmails[0].date.date(), datetime.date(2020, 4, 17))
# also need tests to fail with expected exception when not in [==, <=, >=]
def test_afunction_throws_exception(self):
self.assertRaises(ValueError, self.reader.filter_by_date, "<", "2019-12-31")
def test_extract_meta_single(self):
for email in self.reader.mbox:
emailmsg = extract_meta(email)
self.assertIsInstance(emailmsg, EmailMeta)
self.assertIsInstance(emailmsg.origin_domain, str)
self.assertIsInstance(emailmsg.subject, str)
| 37 | 84 | 0.677314 |
7949b2ef91c9a44425e0c4e22666bfc384fc22ad
| 1,110 |
py
|
Python
|
queue_rules/data/management/commands/clear_user_locks.py
|
benmckibben/queue-rules
|
8beaf6e306f42866a6a2ed1b9c67d94e29bacd33
|
[
"MIT"
] | null | null | null |
queue_rules/data/management/commands/clear_user_locks.py
|
benmckibben/queue-rules
|
8beaf6e306f42866a6a2ed1b9c67d94e29bacd33
|
[
"MIT"
] | 2 |
2021-06-10T20:11:42.000Z
|
2021-09-22T19:36:39.000Z
|
queue_rules/data/management/commands/clear_user_locks.py
|
benmckibben/queue-rules
|
8beaf6e306f42866a6a2ed1b9c67d94e29bacd33
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timezone
from django.core.management.base import BaseCommand
from data.models import UserLock
def _parse_datetime(string_dt: str) -> datetime:
return datetime.strptime(string_dt, "%Y-%m-%dT%H:%M:%S").replace(
tzinfo=timezone.utc
)
class Command(BaseCommand):
help = "Clear existing user locks."
def add_arguments(self, parser):
parser.add_argument(
"-c",
"--created-before",
action="store",
type=_parse_datetime,
default=None,
help=(
"Only delete locks created before this UTC datetime, provided in "
"the format YYYY-MM-DDTHH:MM:SS (such as 2020-09-04T19:18:09)"
),
)
def handle(self, *args, **options):
locks = UserLock.objects.all()
created_upper_bound = options["created_before"]
if created_upper_bound is not None:
locks = locks.filter(created__lt=created_upper_bound)
num_deleted = locks.delete()[0]
self.stdout.write(f"Deleted {num_deleted} locks.")
| 28.461538 | 82 | 0.614414 |
7949b34c2cd0fcdd01fae1066acd4df3e032196e
| 2,209 |
py
|
Python
|
tests/tests/correctness/EPLAnalytics/Flow_Manipulation/Filter/f_cor_014/run.py
|
rpeach-sag/apama-industry-analytics-kit
|
a3f6039915501d41251b6f7ec41b0cb8111baf7b
|
[
"Apache-2.0"
] | 3 |
2019-09-02T18:21:22.000Z
|
2020-04-17T16:34:57.000Z
|
tests/tests/correctness/EPLAnalytics/Flow_Manipulation/Filter/f_cor_014/run.py
|
rpeach-sag/apama-industry-analytics-kit
|
a3f6039915501d41251b6f7ec41b0cb8111baf7b
|
[
"Apache-2.0"
] | null | null | null |
tests/tests/correctness/EPLAnalytics/Flow_Manipulation/Filter/f_cor_014/run.py
|
rpeach-sag/apama-industry-analytics-kit
|
a3f6039915501d41251b6f7ec41b0cb8111baf7b
|
[
"Apache-2.0"
] | null | null | null |
# $Copyright (c) 2015 Software AG, Darmstadt, Germany and/or Software AG USA Inc., Reston, VA, USA, and/or Terracotta Inc., San Francisco, CA, USA, and/or Software AG (Canada) Inc., Cambridge, Ontario, Canada, and/or, Software AG (UK) Ltd., Derby, United Kingdom, and/or Software A.G. (Israel) Ltd., Or-Yehuda, Israel and/or their licensors.$
# Use, reproduction, transfer, publication or disclosure is prohibited except as specifically provided for in your License Agreement with Software AG
from industry.framework.AnalyticsBaseTest import AnalyticsBaseTest
from pysys.constants import *
class PySysTest(AnalyticsBaseTest):
def execute(self):
# Start the correlator
correlator = self.startTest()
self.injectAnalytic(correlator)
self.injectDataSourceService(correlator)
self.injectFilter(correlator)
self.ready(correlator)
correlator.injectMonitorscript(['test.mon'], self.input)
self.waitForSignal('correlator.out', expr='TEST COMPLETE', condition='==1', timeout=5)
def validate(self):
# Basic sanity checks
self.checkSanity()
# Ensure the test output was correct
exprList=[]
exprList.append('TEST PASSED: 1')
exprList.append('FAILED TO CREATE ANALYTIC: 2')
exprList.append('FAILED TO CREATE ANALYTIC: 3')
exprList.append('FAILED TO CREATE ANALYTIC: 4')
exprList.append('FAILED TO CREATE ANALYTIC: 5')
exprList.append('FAILED TO CREATE ANALYTIC: 6')
exprList.append('FAILED TO CREATE ANALYTIC: 7')
exprList.append('FAILED TO CREATE ANALYTIC: 8')
exprList.append('FAILED TO CREATE ANALYTIC: 9')
exprList.append('FAILED TO CREATE ANALYTIC: 10')
exprList.append('FAILED TO CREATE ANALYTIC: 11')
exprList.append('FAILED TO CREATE ANALYTIC: 12')
exprList.append('TEST PASSED: 13')
exprList.append('TEST PASSED: 14')
exprList.append('TEST PASSED: 15')
exprList.append('TEST PASSED: 16')
self.assertOrderedGrep("correlator.out", exprList=exprList)
# Make sure that the we got the right number of actions/listeners called
self.assertLineCount('correlator.out', expr='TEST PASSED', condition='==5')
self.assertLineCount('correlator.out', expr='FAILED TO CREATE ANALYTIC:', condition='==11')
| 46.020833 | 343 | 0.732911 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.