hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79491bbcc059cc6a34bf45e026b3078878c0b539
| 14,875 |
py
|
Python
|
mklaren/kernel/kernel.py
|
tkemps/mklaren
|
d9e7890aaa26cb3877e1a82114ab1e52df595d96
|
[
"BSD-2-Clause"
] | 3 |
2019-10-28T17:20:37.000Z
|
2020-08-20T22:59:18.000Z
|
mklaren/kernel/kernel.py
|
tkemps/mklaren
|
d9e7890aaa26cb3877e1a82114ab1e52df595d96
|
[
"BSD-2-Clause"
] | null | null | null |
mklaren/kernel/kernel.py
|
tkemps/mklaren
|
d9e7890aaa26cb3877e1a82114ab1e52df595d96
|
[
"BSD-2-Clause"
] | 1 |
2019-10-28T17:20:35.000Z
|
2019-10-28T17:20:35.000Z
|
""" Methods related to calculation of kernel function values and kernel
matrices.
"""
import numpy as np
import scipy.sparse as sp
from sklearn.gaussian_process.kernels import Matern
from scipy.spatial.distance import cdist
# Install the GPy module to use included kernels
try:
import GPy
except ImportError:
pass
def correct_xy(x, y):
"""
Convert matrices to dense and correct shapes.
:param x: (``numpy.ndarray``) 2D or 1D array
:param y: (``numpy.ndarray``) 2D or 1D array
:return: (``numpy.ndarray``) Convert x, y to dense, 2D arrays.
"""
if sp.isspmatrix(x) and sp.isspmatrix(y):
x = np.array(x.todense())
y = np.array(y.todense())
if not hasattr(x, "shape") or np.asarray(x).ndim == 0:
x = np.reshape(np.array([x]), (1, 1))
if not hasattr(y, "shape") or np.asarray(y).ndim == 0:
y = np.reshape(np.array([y]), (1, 1))
if np.asarray(x).ndim == 1: x = np.reshape(np.array([x]), (len(x), 1))
if np.asarray(y).ndim == 1: y = np.reshape(np.array([y]), (len(y), 1))
return x, y
def linear_kernel(x, y, b=0):
"""
The linear kernel (the usual dot product in n-dimensional space).
.. math::
k(\mathbf{x}, \mathbf{y}) = b + \mathbf{x}^T \mathbf{y}
:param x: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param y: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param b: (``float``) Bias term.
:return: (``numpy.ndarray``) Kernel value/matrix between data points.
"""
if isinstance(x, int):
return x * y
if sp.isspmatrix(x):
return b + np.array(x.dot(y.T).todense())
else:
return b + x.dot(y.T)
def linear_kernel_noise(x, y, b=1, noise=1):
"""
The linear kernel (the usual dot product in n-dimensional space). A noise term is
added explicitly to avoid singular kernel matrices
.. math::
k(\mathbf{x}, \mathbf{y}) = b + \mathbf{x}^T \mathbf{y} + noise \cdot (\mathbf{x} == \mathbf{y})
:param x: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param y: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param b: (``float``) Bias term.
:param noise: (``float``) Noise term.
:return: (``numpy.ndarray``) Kernel value/matrix between data points.
"""
D = cdist(x, y, metric="euclidean")
if isinstance(x, int):
return x * y
if sp.isspmatrix(x):
return b + np.array(x.dot(y.T).todense()) + noise * (D == 0)
else:
return b + x.dot(y.T) + noise * (D == 0)
def poly_kernel(x, y, degree=2, b=0):
"""
The polynomial kernel.
.. math::
k(\mathbf{x}, \mathbf{y}) = (b + \mathbf{x}^T \mathbf{y})^p
:param x: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param y: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param degree: (``float``) Polynomial degree.
:param b: (``float``) Bias term.
:return: (``numpy.ndarray``) Kernel value/matrix between data points.
"""
if sp.isspmatrix(x):
return np.array(x.dot(y.T).todense()) ** degree
if not hasattr(x, "shape"):
return (b + x * y) ** degree
else:
return (b + x.dot(y.T)) ** degree
def sigmoid_kernel(x, y, c=1, b=0):
"""
The sigmoid kernel.
.. math::
k(\mathbf{x}, \mathbf{y}) = tan(c \mathbf{x}^T \mathbf{y} + b)
:param x: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param y: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param c: (``float``) Scale.
:param b: (``float``) Bias.
:return: (``numpy.ndarray``) Kernel value/matrix between data points.
"""
if sp.isspmatrix(x) and sp.isspmatrix(y):
x = np.array(x.todense())
y = np.array(y.todense())
if not hasattr(x, "shape"):
return np.tanh(c * x * y + b)
else:
return np.tanh(c * x.dot(y.T) + b)
def exponential_kernel(x, y, sigma=2.0, gamma=None):
"""
The exponential quadratic / radial basis kernel (RBF) kernel.
.. math::
k(\mathbf{x}, \mathbf{y}) = exp\{\dfrac{\|\mathbf{x} - \mathbf{y}\|^2}{\sigma^2} \}
or
.. math::
k(\mathbf{x}, \mathbf{y}) = exp\{\gamma \|\mathbf{x} - \mathbf{y}\|^2 \}
:param x: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param y: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param sigma: (``float``) Length scale.
:param gamma: (``float``) Scale.
:return: (``numpy.ndarray``) Kernel value/matrix between data points.
"""
if gamma is None:
gamma = 1.0 / (2.0 * sigma ** 2)
if sp.isspmatrix(x) and sp.isspmatrix(y):
x = np.array(x.todense())
y = np.array(y.todense())
if not hasattr(x, "shape"):
return np.exp(-gamma * np.linalg.norm(x - y, ord=2)**2)
if np.asarray(x).ndim == 0:
return np.exp(-gamma * (x - y)**2)
if len(x.shape) >= 2 or len(y.shape) >= 2:
return np.exp(-gamma * cdist(x, y, metric="euclidean")**2)
return np.exp(-gamma * np.linalg.norm(x - y, ord=2)**2)
def exponential_cosine_kernel(x, y, gamma=1, omega=1):
"""
A sum of exponential quadratic and a cosine kernel.
.. math::
d = \|\mathbf{x} - \mathbf{y}\|
.. math::
k(\mathbf{x}, \mathbf{y}) = \dfrac{1}{2} exp\{\dfrac{d^2}{\sigma^2}\} + \dfrac{1}{2} cos(\omega d^2)
:param x: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param y: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param omega: (``float``) Frequency of the oscillation.
:param gamma: (``float``) Scale.
:return: (``numpy.ndarray``) Kernel value/matrix between data points.
"""
if sp.isspmatrix(x) and sp.isspmatrix(y):
x = np.array(x.todense())
y = np.array(y.todense())
if not hasattr(x, "shape"):
D = np.linalg.norm(x - y, ord=2)
elif np.asarray(x).ndim == 0:
D = np.abs(x - y)
elif len(x.shape) >= 2 or len(y.shape) >= 2:
D = cdist(x, y, metric="euclidean")
else:
D = np.linalg.norm(x - y, ord=2)
return 0.5 * np.exp(-gamma * D**2) + 0.5 * np.cos(omega * D**2)
def exponential_absolute(x, y, sigma=2.0, gamma=None):
"""
The exponential quadratic / radial basis kernel (RBF) kernel.
.. math::
k(\mathbf{x}, \mathbf{y}) = exp\{\dfrac{\|\mathbf{x} - \mathbf{y}\|^2}{\sigma^2} \}
or
.. math::
k(\mathbf{x}, \mathbf{y}) = exp\{\gamma \|\mathbf{x} - \mathbf{y}\|^2 \}
:param x: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param y: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param sigma: (``float``) Length scale.
:param gamma: (``float``) Scale.
:return: (``numpy.ndarray``) Kernel value/matrix between data points.
"""
if gamma is None:
gamma = 1.0 / (2.0 * sigma ** 2)
if sp.isspmatrix(x) and sp.isspmatrix(y):
x = np.array(x.todense())
y = np.array(y.todense())
if not hasattr(x, "shape"):
return np.exp(-gamma * np.linalg.norm(x - y, ord=1))
if np.asarray(x).ndim == 0:
return np.exp(-gamma * np.absolute(x - y))
if len(x.shape) >= 2 or len(y.shape) >= 2:
return np.exp(-gamma * cdist(x, y, metric="cityblock"))
return np.exp(-gamma * np.linalg.norm(x - y, ord=1))
rbf_kernel = exponential_kernel
def periodic_kernel(x, y, sigma=1, per=1, l=1):
"""
The periodic kernel.
Defined as in http://www.cs.toronto.edu/~duvenaud/cookbook/index.html.
.. math::
k(\mathbf{x}, \mathbf{y}) = \sigma^2 exp\{-2 \pi sin(\dfrac{\|\mathbf{x} - \mathbf{y}\|}{per})/l \}
:param x: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param y: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param per: (``float``) Period.
:param l: (``float``) Length scale.
:param sigma: (``float``) Variance.
:return: (``numpy.ndarray``) Kernel value/matrix between data points.
"""
if sp.isspmatrix(x) and sp.isspmatrix(y):
x = np.array(x.todense())
y = np.array(y.todense())
if not hasattr(x, "shape"):
return sigma**2 * np.exp(- 2 * np.sin(np.pi * np.absolute(x - y) / per) ** 2 / l ** 2)
if np.asarray(x).ndim == 0:
return sigma**2 * np.exp(- 2 * np.sin(np.pi * np.absolute(x - y) / per) ** 2 / l ** 2)
if len(x.shape) >= 2 or len(y.shape) >= 2:
return sigma ** 2 * np.exp(- 2 * np.sin(np.pi * cdist(x, y, metric="euclidean") / per) ** 2 / l ** 2)
return sigma**2 * np.exp(- 2 * np.sin(np.pi * np.absolute(x - y) / per) ** 2 / l ** 2)
def matern_kernel(x, y, l=1.0, nu=1.5):
"""
The Matern kernel wrapped from Scikit learn.
.. math::
k(\mathbf{x}, \mathbf{y}) = \sigma^2 \dfrac{2^{1-\nu}}{\Gamma{\nu}} (\sqrt{2\nu} \dfrac{d}{l})^{\nu} K_{\nu} (\sqrt{2\nu} \dfrac{d}{l})
where {\Gamma } \Gamma is the gamma function, {K_{\nu }} K_{\nu }
is the modified Bessel function of the second kind, and l and \nu are non-negative parameters of the covariance.
:param x: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param y: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param l: (``float``) Length scale.
:param nu: (``float``) Differentiability of the kernel.
:return: (``numpy.ndarray``) Kernel value/matrix between data points.
"""
mk = Matern(length_scale=l, nu=nu)
if sp.isspmatrix(x) and sp.isspmatrix(y):
x = np.array(x.todense())
y = np.array(y.todense())
if not hasattr(x, "shape") or np.asarray(x).ndim == 0:
x = np.reshape(np.array([x]), (1, 1))
if not hasattr(y, "shape") or np.asarray(y).ndim == 0:
y = np.reshape(np.array([y]), (1, 1))
if np.asarray(x).ndim == 1: x = np.reshape(np.array([x]), (len(x), 1))
if np.asarray(y).ndim == 1: y = np.reshape(np.array([y]), (len(y), 1))
return mk(x, y)
def matern32_gpy(x, y, lengthscale=1):
"""
Temp: GPy wrapper for the matern kernel.
"""
x, y = correct_xy(x, y)
k = GPy.kern.Matern32(input_dim=x.shape[1], lengthscale=lengthscale)
return k.K(x, y)
def matern52_gpy(x, y, lengthscale=1):
"""
Temp: GPy wrapper for the matern kernel.
"""
x, y = correct_xy(x, y)
k = GPy.kern.Matern52(input_dim=x.shape[1], lengthscale=lengthscale)
return k.K(x, y)
def periodic_gpy(x, y, lengthscale=1, period=6.28):
"""
Temp: GPy wrapper for the matern kernel.
"""
x, y = correct_xy(x, y)
k = GPy.kern.PeriodicExponential(input_dim=x.shape[1], lengthscale=lengthscale, period=period)
return k.K(x, y)
def random_kernel(n):
"""
Generate a random kernel matrix of shape ``(n, n)``.
:param n: (``int``) Number of examples.
:return: (``numpy.ndarray``) Random positive semidefinite kernel matrix of shape ``(n, n)``.
"""
G = np.random.rand(n, n)
return G.T.dot(G)
def center_kernel(K):
"""
Center a kernel matrix.
.. math::
\mathbf{K}_{c} = (\mathbf{I}-\dfrac{\mathbf{11}^T}{n})\mathbf{K}(\mathbf{I}-\dfrac{\mathbf{11}^1}{n})
:param K: (``numpy.ndarray``) Kernel matrix of shape ``(n, n)``.
:return: (``numpy.ndarray``) Centered kernel for a sample of points.
"""
m = int(K.shape[0])
o = np.ones((m, 1))
I = np.eye(m, m)
Ic = (I-o.dot(o.T)/m)
return Ic.dot(K).dot(Ic)
def center_kernel_low_rank(G):
"""
Center a the feature matrix such that :math:`\mathbf{G}_c \mathbf{G}_c^T` is centered.
.. math::
\mathbf{G}_c = (\mathbf{I} - \dfrac{\mathbf{11}^T}{n})\mathbf{G}
:param G: (``numpy.ndarray``) Low-rank approximation of the feature matrix of shape ``(n, k)``.
:return: (``numpy.ndarray``) Centered low-rank approximation of the feature space.
"""
return G - G.mean(axis=0)
def kernel_row_normalize(K):
"""
Divide inner products of examples by their norm in the feature space,
effectively computing angles. Applycable only to symmetric kernels.
:param K: (``numpy.ndarray``) Kernel matrix of shape ``(n, n)``.
:return: (``numpy.ndarray``) Row-normalized kernel for a sample of points.
"""
assert K.shape[0] == K.shape[1]
d = np.diag(K).reshape((K.shape[0], 1))
Kn = np.sqrt(d.dot(d.T))
return K / Kn
def kernel_to_distance(K):
"""
Divide inner products of examples by their norm in the feature space,
effectively computing angles. Applycable only to symmetric kernels.
:param K: (``numpy.ndarray``) Kernel matrix or Kinterface of shape ``(n, n)``.
:return: (``numpy.ndarray``) Distance matrix in the feature space induced by K.
"""
assert K.shape[0] == K.shape[1]
n = K.shape[0]
d = K.diag() if hasattr(K, "diag") else np.diag(K)
D = np.sqrt(-2 * K [:, :] + d.reshape((n, 1)) + d.reshape((1, n)))
return D
def kernel_sum(x, y, kernels, kernels_args, kernels_weights=None):
"""
Sum of arbitrary kernel functions.
:param x: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param y: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param kernels: (``Iterable``) Iterable of pointers to kernels.
:param kernels_args: (``Iterable``) Iterable with dictionaries, of the same length as `kernels`.
Arguments are passed to kernels as kwargs.
:param kernels_weights: (``Iterable``) Iterable with kernel weights, of the same length as `kernels`.
:return:
"""
assert len(kernels) == len(kernels_args)
if kernels_weights is not None:
return sum(w * k(x, y, **kw) for k, kw, w in zip(kernels, kernels_args, kernels_weights))
else:
return sum(k(x, y, **kw) for k, kw in zip(kernels, kernels_args))
| 33.730159 | 147 | 0.565513 |
79491dc86e8091609031852ddec02c2bfac743f2
| 1,427 |
py
|
Python
|
inference/inference_api_test/python_api_test/tests/cpu/test_deeplabv3_cpu.py
|
zjjlivein/continuous_integration
|
c8825f32136fdd425389702c37ded08d6fd28a26
|
[
"Apache-2.0"
] | 14 |
2020-03-04T07:52:07.000Z
|
2022-02-14T01:39:14.000Z
|
inference/inference_api_test/python_api_test/tests/cpu/test_deeplabv3_cpu.py
|
zjjlivein/continuous_integration
|
c8825f32136fdd425389702c37ded08d6fd28a26
|
[
"Apache-2.0"
] | 19 |
2020-03-04T03:52:10.000Z
|
2021-12-23T07:02:07.000Z
|
inference/inference_api_test/python_api_test/tests/cpu/test_deeplabv3_cpu.py
|
zjjlivein/continuous_integration
|
c8825f32136fdd425389702c37ded08d6fd28a26
|
[
"Apache-2.0"
] | 26 |
2020-03-04T05:39:09.000Z
|
2022-02-14T01:43:28.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import argparse
import logging
import struct
import six
import pytest
import nose
import numpy as np
from test_cpu_helper import TestModelInferenceCPU
TestBase = TestModelInferenceCPU()
@pytest.mark.p0
def test_inference_deeplabv3_cpu():
"""
Inference and check value
deeplabv3_mobilenetv2 cpu model
Args:
None
Returns:
None
"""
model_name = "deeplabv3_mobilenetv2"
tmp_path = os.path.join(TestBase.model_root, "segmentation")
model_path = os.path.join(tmp_path, model_name, "model")
data_path = os.path.join(tmp_path, model_name, "data/data.json")
delta = 0.0001
res, exp = TestBase.get_infer_results(model_path, data_path)
for i in range(len(res)):
TestBase.check_data(res[i].flatten(), exp[i].flatten(), delta)
| 29.122449 | 74 | 0.735109 |
79491e6b1733e96adf45c6ad682ad38ee82f75ef
| 2,461 |
py
|
Python
|
src/progress.py
|
ryuichi1208/hakarumaru
|
34478e0c4d545a196b584d04f73aca917c25b27a
|
[
"Apache-2.0"
] | null | null | null |
src/progress.py
|
ryuichi1208/hakarumaru
|
34478e0c4d545a196b584d04f73aca917c25b27a
|
[
"Apache-2.0"
] | null | null | null |
src/progress.py
|
ryuichi1208/hakarumaru
|
34478e0c4d545a196b584d04f73aca917c25b27a
|
[
"Apache-2.0"
] | null | null | null |
from multiprocessing import Pool, cpu_count
from IPython.display import clear_output
import time
from datetime import datetime, timedelta
import sys
def progress(itr, total=None, update_interval=1, clear=True):
if total is None and hasattr(itr, '__len__'):
total = len(itr)
if total == 0:
return
if total:
print('0/{} 0s 0/s'.format(total))
else:
print('0 0s 0/s')
start_time = None
last_time = None
for i, x in enumerate(itr):
cur_time = time.time()
if start_time is None:
start_time = cur_time
last_time = cur_time
yield x
if cur_time - last_time > update_interval:
duration = cur_time - start_time
speed = (i + 1) / duration
duration_str = timedelta(seconds=round(duration))
if clear:
clear_output(wait=True)
if total:
duration_total = duration * total / (i + 1)
duration_remaining = duration_total - duration
duration_remaining_str = timedelta(seconds=round(duration_remaining))
pct = 100. * (i + 1) / total
print('{:.2f}% {}/{} {}<{} {:.2f}/s'.format(pct, i+1, total, duration_str, duration_remaining_str, speed))
else:
print('{} {} {:.2f}/s'.format(i+1, duration_str, speed))
last_time = cur_time
duration = time.time() - start_time
speed = (i + 1) / duration
duration_str = timedelta(seconds=round(duration))
if clear:
clear_output(wait=True)
print('{} {} {:.2f}/s'.format(i+1, duration_str, speed))
class job_wrapper(object):
def __init__(self, job):
self.job = job
def __call__(self, args):
i, task = args
return i, self.job(task)
def progress_parallel(job, tasks, total=None, processes=None, **kwargs):
if processes == 1:
return [job(task) for task in progress(tasks)]
results = []
if total is None and hasattr(tasks, '__len__'):
total = len(tasks)
if processes is None:
processes = cpu_count()
try:
with Pool(processes) as pool:
results = list(progress(pool.imap_unordered(job_wrapper(job), enumerate(tasks)),
total=total, **kwargs))
results.sort()
return [x for i,x in results]
except KeyboardInterrupt:
pass
| 35.157143 | 122 | 0.571312 |
79492002366036b14674a4bb3ac7af5ccbddd76a
| 482 |
py
|
Python
|
gotypes.py
|
Soycid/dlgo
|
c3189e5867ce67875a4f53c93af3fb11bbe18ad8
|
[
"MIT"
] | null | null | null |
gotypes.py
|
Soycid/dlgo
|
c3189e5867ce67875a4f53c93af3fb11bbe18ad8
|
[
"MIT"
] | null | null | null |
gotypes.py
|
Soycid/dlgo
|
c3189e5867ce67875a4f53c93af3fb11bbe18ad8
|
[
"MIT"
] | null | null | null |
import enum
class Player(enum.Enum):
black = 1
white = 2
@property
def other(self):
return Player.black if self == Player.white else Player.white
from collections import namedtuple
class Point(namedtuple('Point', 'row col')):
def neighbors(self):
return [
Point(self.row - 1, self.col),
Point(self.row + 1, self.col),
Point(self.row, self.col - 1),
Point(self.row, self.col + 1),
]
| 19.28 | 69 | 0.568465 |
794920b9936db772e220fea4dc01f62ceab02fdb
| 8,865 |
py
|
Python
|
Lib/site-packages/qwt/null_paintdevice.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | null | null | null |
Lib/site-packages/qwt/null_paintdevice.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | 20 |
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
Lib/site-packages/qwt/null_paintdevice.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed under the terms of the Qwt License
# Copyright (c) 2002 Uwe Rathmann, for the original C++ code
# Copyright (c) 2015 Pierre Raybaut, for the Python translation/optimization
# (see LICENSE file for more details)
"""
QwtNullPaintDevice
------------------
.. autoclass:: QwtNullPaintDevice
:members:
"""
from qtpy.QtGui import QPaintEngine, QPainterPath, QPaintDevice
from qtpy import PYSIDE2
class QwtNullPaintDevice_PrivateData(object):
def __init__(self):
self.mode = QwtNullPaintDevice.NormalMode
class QwtNullPaintDevice_PaintEngine(QPaintEngine):
def __init__(self, paintdevice):
super(QwtNullPaintDevice_PaintEngine, self).__init__(QPaintEngine.AllFeatures)
self.__paintdevice = paintdevice
def begin(self, paintdevice):
self.setActive(True)
return True
def end(self):
self.setActive(False)
return True
def type(self):
return QPaintEngine.User
def drawRects(self, rects, rectCount=None):
if rectCount is None:
rectCount = len(rects)
device = self.nullDevice()
if device is None:
return
if device.mode() != QwtNullPaintDevice.NormalMode:
try:
QPaintEngine.drawRects(self, rects, rectCount)
except TypeError:
# PyQt <=4.9
QPaintEngine.drawRects(self, rects)
return
device.drawRects(rects, rectCount)
def drawLines(self, lines, lineCount=None):
if lineCount is None:
lineCount = len(lines)
device = self.nullDevice()
if device is None:
return
if device.mode() != QwtNullPaintDevice.NormalMode and not PYSIDE2:
try:
QPaintEngine.drawLines(lines, lineCount)
except TypeError:
# PyQt <=4.9
QPaintEngine.drawLines(self, lines)
return
device.drawLines(lines, lineCount)
def drawEllipse(self, rect):
device = self.nullDevice()
if device is None:
return
if device.mode() != QwtNullPaintDevice.NormalMode:
QPaintEngine.drawEllipse(rect)
return
device.drawEllipse(rect)
def drawPath(self, path):
device = self.nullDevice()
if device is None:
return
device.drawPath(path)
def drawPoints(self, points, pointCount=None):
if pointCount is None:
pointCount = len(points)
device = self.nullDevice()
if device is None:
return
if device.mode() != QwtNullPaintDevice.NormalMode:
try:
QPaintEngine.drawPoints(points, pointCount)
except TypeError:
# PyQt <=4.9
QPaintEngine.drawPoints(self, points)
return
device.drawPoints(points, pointCount)
def drawPolygon(self, *args):
if len(args) == 3:
points, pointCount, mode = args
elif len(args) == 2:
points, mode = args
pointCount = len(points)
else:
raise TypeError("Unexpected arguments")
device = self.nullDevice()
if device is None:
return
if device.mode() == QwtNullPaintDevice.PathMode:
path = QPainterPath()
if pointCount > 0:
path.moveTo(points[0])
for i in range(1, pointCount):
path.lineTo(points[i])
if mode != QPaintEngine.PolylineMode:
path.closeSubpath()
device.drawPath(path)
return
device.drawPolygon(points, pointCount, mode)
def drawPixmap(self, rect, pm, subRect):
device = self.nullDevice()
if device is None:
return
device.drawPixmap(rect, pm, subRect)
def drawTextItem(self, pos, textItem):
device = self.nullDevice()
if device is None:
return
if device.mode() != QwtNullPaintDevice.NormalMode:
QPaintEngine.drawTextItem(pos, textItem)
return
device.drawTextItem(pos, textItem)
def drawTiledPixmap(self, rect, pixmap, subRect):
device = self.nullDevice()
if device is None:
return
if device.mode() != QwtNullPaintDevice.NormalMode:
QPaintEngine.drawTiledPixmap(rect, pixmap, subRect)
return
device.drawTiledPixmap(rect, pixmap, subRect)
def drawImage(self, rect, image, subRect, flags):
device = self.nullDevice()
if device is None:
return
device.drawImage(rect, image, subRect, flags)
def updateState(self, state):
device = self.nullDevice()
if device is None:
return
device.updateState(state)
def nullDevice(self):
if not self.isActive():
return
return self.__paintdevice
class QwtNullPaintDevice(QPaintDevice):
"""
A null paint device doing nothing
Sometimes important layout/rendering geometries are not
available or changeable from the public Qt class interface.
( f.e hidden in the style implementation ).
`QwtNullPaintDevice` can be used to manipulate or filter out
this information by analyzing the stream of paint primitives.
F.e. `QwtNullPaintDevice` is used by `QwtPlotCanvas` to identify
styled backgrounds with rounded corners.
Modes:
* `NormalMode`:
All vector graphic primitives are painted by
the corresponding draw methods
* `PolygonPathMode`:
Vector graphic primitives ( beside polygons ) are mapped to a
`QPainterPath` and are painted by `drawPath`. In `PolygonPathMode`
mode only a few draw methods are called:
- `drawPath()`
- `drawPixmap()`
- `drawImage()`
- `drawPolygon()`
* `PathMode`:
Vector graphic primitives are mapped to a `QPainterPath`
and are painted by `drawPath`. In `PathMode` mode
only a few draw methods are called:
- `drawPath()`
- `drawPixmap()`
- `drawImage()`
"""
# enum Mode
NormalMode, PolygonPathMode, PathMode = list(range(3))
def __init__(self):
super(QwtNullPaintDevice, self).__init__()
self.__engine = None
self.__data = QwtNullPaintDevice_PrivateData()
def setMode(self, mode):
"""
Set the render mode
:param int mode: New mode
.. seealso::
:py:meth:`mode()`
"""
self.__data.mode = mode
def mode(self):
"""
:return: Render mode
.. seealso::
:py:meth:`setMode()`
"""
return self.__data.mode
def paintEngine(self):
if self.__engine is None:
self.__engine = QwtNullPaintDevice_PaintEngine(self)
return self.__engine
def metric(self, deviceMetric):
if deviceMetric == QPaintDevice.PdmWidth:
value = self.sizeMetrics().width()
elif deviceMetric == QPaintDevice.PdmHeight:
value = self.sizeMetrics().height()
elif deviceMetric == QPaintDevice.PdmNumColors:
value = 0xFFFFFFFF
elif deviceMetric == QPaintDevice.PdmDepth:
value = 32
elif deviceMetric in (
QPaintDevice.PdmPhysicalDpiX,
QPaintDevice.PdmPhysicalDpiY,
QPaintDevice.PdmDpiY,
QPaintDevice.PdmDpiX,
):
value = 72
elif deviceMetric == QPaintDevice.PdmWidthMM:
value = round(
self.metric(QPaintDevice.PdmWidth)
* 25.4
/ self.metric(QPaintDevice.PdmDpiX)
)
elif deviceMetric == QPaintDevice.PdmHeightMM:
value = round(
self.metric(QPaintDevice.PdmHeight)
* 25.4
/ self.metric(QPaintDevice.PdmDpiY)
)
else:
value = 0
return value
def drawRects(self, rects, rectCount):
pass
def drawLines(self, lines, lineCount):
pass
def drawEllipse(self, rect):
pass
def drawPath(self, path):
pass
def drawPoints(self, points, pointCount):
pass
def drawPolygon(self, points, pointCount, mode):
pass
def drawPixmap(self, rect, pm, subRect):
pass
def drawTextItem(self, pos, textItem):
pass
def drawTiledPixmap(self, rect, pm, subRect):
pass
def drawImage(self, rect, image, subRect, flags):
pass
def updateState(self, state):
pass
| 28.68932 | 86 | 0.57665 |
7949217b1e3247521314699126ab2a3d77801479
| 153 |
py
|
Python
|
projects/skel/src/manage.py
|
d2emon/project-constructor
|
217227acb14ee44affdd91b8d2b544442763c358
|
[
"WTFPL"
] | 48 |
2015-04-25T21:09:50.000Z
|
2021-12-13T01:34:07.000Z
|
manage.py
|
Depado/experiments
|
ad8c6d7d82ed015e01aea5476c05a0ac7de5bcf4
|
[
"WTFPL"
] | 1 |
2015-04-01T07:27:53.000Z
|
2015-04-01T07:27:53.000Z
|
manage.py
|
Depado/experiments
|
ad8c6d7d82ed015e01aea5476c05a0ac7de5bcf4
|
[
"WTFPL"
] | 15 |
2015-02-25T19:35:07.000Z
|
2020-07-16T10:41:13.000Z
|
# -*- coding: utf-8 -*-
from flask.ext.script import Manager
from app import app
manager = Manager(app)
if __name__ == '__main__':
manager.run()
| 13.909091 | 36 | 0.666667 |
7949217e4455564fe2a85cfbff08033d3e61b2b1
| 16,187 |
py
|
Python
|
fast-reid/fastreid/modeling/backbones/osnet.py
|
icicle4/TranSTAM
|
3dfda76cc507b3a3a4b25fbecab7df65fd8cab4d
|
[
"Apache-2.0"
] | 71 |
2021-03-12T07:43:43.000Z
|
2022-03-30T03:28:16.000Z
|
fast-reid/fastreid/modeling/backbones/osnet.py
|
icicle4/TranSTAM
|
3dfda76cc507b3a3a4b25fbecab7df65fd8cab4d
|
[
"Apache-2.0"
] | 8 |
2021-04-06T03:02:58.000Z
|
2022-02-16T14:05:47.000Z
|
fast-reid/fastreid/modeling/backbones/osnet.py
|
icicle4/TranSTAM
|
3dfda76cc507b3a3a4b25fbecab7df65fd8cab4d
|
[
"Apache-2.0"
] | 7 |
2021-04-19T02:55:58.000Z
|
2021-11-11T12:39:09.000Z
|
# encoding: utf-8
"""
@author: xingyu liao
@contact: sherlockliao01@gmail.com
"""
# based on:
# https://github.com/KaiyangZhou/deep-person-reid/blob/master/torchreid/models/osnet.py
import logging
import torch
from torch import nn
from fastreid.layers import get_norm
from fastreid.utils import comm
from .build import BACKBONE_REGISTRY
logger = logging.getLogger(__name__)
model_urls = {
'osnet_x1_0':
'https://drive.google.com/uc?id=1LaG1EJpHrxdAxKnSCJ_i0u-nbxSAeiFY',
'osnet_x0_75':
'https://drive.google.com/uc?id=1uwA9fElHOk3ZogwbeY5GkLI6QPTX70Hq',
'osnet_x0_5':
'https://drive.google.com/uc?id=16DGLbZukvVYgINws8u8deSaOqjybZ83i',
'osnet_x0_25':
'https://drive.google.com/uc?id=1rb8UN5ZzPKRc_xvtHlyDh-cSz88YX9hs',
'osnet_ibn_x1_0':
'https://drive.google.com/uc?id=1sr90V6irlYYDd4_4ISU2iruoRG8J__6l'
}
##########
# Basic layers
##########
class ConvLayer(nn.Module):
"""Convolution layer (conv + bn + relu)."""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
bn_norm,
num_splits,
stride=1,
padding=0,
groups=1,
IN=False
):
super(ConvLayer, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=False,
groups=groups
)
if IN:
self.bn = nn.InstanceNorm2d(out_channels, affine=True)
else:
self.bn = get_norm(bn_norm, out_channels, num_splits)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Conv1x1(nn.Module):
"""1x1 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, bn_norm, num_splits, stride=1, groups=1):
super(Conv1x1, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
1,
stride=stride,
padding=0,
bias=False,
groups=groups
)
self.bn = get_norm(bn_norm, out_channels, num_splits)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Conv1x1Linear(nn.Module):
"""1x1 convolution + bn (w/o non-linearity)."""
def __init__(self, in_channels, out_channels, bn_norm, num_splits, stride=1):
super(Conv1x1Linear, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1, stride=stride, padding=0, bias=False
)
self.bn = get_norm(bn_norm, out_channels, num_splits)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class Conv3x3(nn.Module):
"""3x3 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, bn_norm, num_splits, stride=1, groups=1):
super(Conv3x3, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
3,
stride=stride,
padding=1,
bias=False,
groups=groups
)
self.bn = get_norm(bn_norm, out_channels, num_splits)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class LightConv3x3(nn.Module):
"""Lightweight 3x3 convolution.
1x1 (linear) + dw 3x3 (nonlinear).
"""
def __init__(self, in_channels, out_channels, bn_norm, num_splits):
super(LightConv3x3, self).__init__()
self.conv1 = nn.Conv2d(
in_channels, out_channels, 1, stride=1, padding=0, bias=False
)
self.conv2 = nn.Conv2d(
out_channels,
out_channels,
3,
stride=1,
padding=1,
bias=False,
groups=out_channels
)
self.bn = get_norm(bn_norm, out_channels, num_splits)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.bn(x)
x = self.relu(x)
return x
##########
# Building blocks for omni-scale feature learning
##########
class ChannelGate(nn.Module):
"""A mini-network that generates channel-wise gates conditioned on input tensor."""
def __init__(
self,
in_channels,
num_gates=None,
return_gates=False,
gate_activation='sigmoid',
reduction=16,
layer_norm=False
):
super(ChannelGate, self).__init__()
if num_gates is None: num_gates = in_channels
self.return_gates = return_gates
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(
in_channels,
in_channels // reduction,
kernel_size=1,
bias=True,
padding=0
)
self.norm1 = None
if layer_norm: self.norm1 = nn.LayerNorm((in_channels // reduction, 1, 1))
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(
in_channels // reduction,
num_gates,
kernel_size=1,
bias=True,
padding=0
)
if gate_activation == 'sigmoid':
self.gate_activation = nn.Sigmoid()
elif gate_activation == 'relu':
self.gate_activation = nn.ReLU(inplace=True)
elif gate_activation == 'linear':
self.gate_activation = nn.Identity()
else:
raise RuntimeError(
"Unknown gate activation: {}".format(gate_activation)
)
def forward(self, x):
input = x
x = self.global_avgpool(x)
x = self.fc1(x)
if self.norm1 is not None: x = self.norm1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.gate_activation(x)
if self.return_gates: return x
return input * x
class OSBlock(nn.Module):
"""Omni-scale feature learning block."""
def __init__(
self,
in_channels,
out_channels,
bn_norm,
num_splits,
IN=False,
bottleneck_reduction=4,
**kwargs
):
super(OSBlock, self).__init__()
mid_channels = out_channels // bottleneck_reduction
self.conv1 = Conv1x1(in_channels, mid_channels, bn_norm, num_splits)
self.conv2a = LightConv3x3(mid_channels, mid_channels, bn_norm, num_splits)
self.conv2b = nn.Sequential(
LightConv3x3(mid_channels, mid_channels, bn_norm, num_splits),
LightConv3x3(mid_channels, mid_channels, bn_norm, num_splits),
)
self.conv2c = nn.Sequential(
LightConv3x3(mid_channels, mid_channels, bn_norm, num_splits),
LightConv3x3(mid_channels, mid_channels, bn_norm, num_splits),
LightConv3x3(mid_channels, mid_channels, bn_norm, num_splits),
)
self.conv2d = nn.Sequential(
LightConv3x3(mid_channels, mid_channels, bn_norm, num_splits),
LightConv3x3(mid_channels, mid_channels, bn_norm, num_splits),
LightConv3x3(mid_channels, mid_channels, bn_norm, num_splits),
LightConv3x3(mid_channels, mid_channels, bn_norm, num_splits),
)
self.gate = ChannelGate(mid_channels)
self.conv3 = Conv1x1Linear(mid_channels, out_channels, bn_norm, num_splits)
self.downsample = None
if in_channels != out_channels:
self.downsample = Conv1x1Linear(in_channels, out_channels, bn_norm, num_splits)
self.IN = None
if IN: self.IN = nn.InstanceNorm2d(out_channels, affine=True)
self.relu = nn.ReLU(True)
def forward(self, x):
identity = x
x1 = self.conv1(x)
x2a = self.conv2a(x1)
x2b = self.conv2b(x1)
x2c = self.conv2c(x1)
x2d = self.conv2d(x1)
x2 = self.gate(x2a) + self.gate(x2b) + self.gate(x2c) + self.gate(x2d)
x3 = self.conv3(x2)
if self.downsample is not None:
identity = self.downsample(identity)
out = x3 + identity
if self.IN is not None:
out = self.IN(out)
return self.relu(out)
##########
# Network architecture
##########
class OSNet(nn.Module):
"""Omni-Scale Network.
Reference:
- Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.
- Zhou et al. Learning Generalisable Omni-Scale Representations
for Person Re-Identification. arXiv preprint, 2019.
"""
def __init__(
self,
blocks,
layers,
channels,
bn_norm,
num_splits,
IN=False,
**kwargs
):
super(OSNet, self).__init__()
num_blocks = len(blocks)
assert num_blocks == len(layers)
assert num_blocks == len(channels) - 1
# convolutional backbone
self.conv1 = ConvLayer(3, channels[0], 7, bn_norm, num_splits, stride=2, padding=3, IN=IN)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.conv2 = self._make_layer(
blocks[0],
layers[0],
channels[0],
channels[1],
bn_norm,
num_splits,
reduce_spatial_size=True,
IN=IN
)
self.conv3 = self._make_layer(
blocks[1],
layers[1],
channels[1],
channels[2],
bn_norm,
num_splits,
reduce_spatial_size=True
)
self.conv4 = self._make_layer(
blocks[2],
layers[2],
channels[2],
channels[3],
bn_norm,
num_splits,
reduce_spatial_size=False
)
self.conv5 = Conv1x1(channels[3], channels[3], bn_norm, num_splits)
self._init_params()
def _make_layer(
self,
block,
layer,
in_channels,
out_channels,
bn_norm,
num_splits,
reduce_spatial_size,
IN=False
):
layers = []
layers.append(block(in_channels, out_channels, bn_norm, num_splits, IN=IN))
for i in range(1, layer):
layers.append(block(out_channels, out_channels, bn_norm, num_splits, IN=IN))
if reduce_spatial_size:
layers.append(
nn.Sequential(
Conv1x1(out_channels, out_channels, bn_norm, num_splits),
nn.AvgPool2d(2, stride=2),
)
)
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.conv1(x)
x = self.maxpool(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
return x
def init_pretrained_weights(model, key=''):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
import os
import errno
import gdown
from collections import OrderedDict
import warnings
import logging
logger = logging.getLogger(__name__)
def _get_torch_home():
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
torch_home = os.path.expanduser(
os.getenv(
ENV_TORCH_HOME,
os.path.join(
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'
)
)
)
return torch_home
torch_home = _get_torch_home()
model_dir = os.path.join(torch_home, 'checkpoints')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# Directory already exists, ignore.
pass
else:
# Unexpected OSError, re-raise.
raise
filename = key + '_imagenet.pth'
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
if comm.is_main_process():
gdown.download(model_urls[key], cached_file, quiet=False)
comm.synchronize()
state_dict = torch.load(cached_file, map_location=torch.device('cpu'))
model_dict = model.state_dict()
new_state_dict = OrderedDict()
matched_layers, discarded_layers = [], []
for k, v in state_dict.items():
if k.startswith('module.'):
k = k[7:] # discard module.
if k in model_dict and model_dict[k].size() == v.size():
new_state_dict[k] = v
matched_layers.append(k)
else:
discarded_layers.append(k)
model_dict.update(new_state_dict)
model.load_state_dict(model_dict)
if len(matched_layers) == 0:
warnings.warn(
'The pretrained weights from "{}" cannot be loaded, '
'please check the key names manually '
'(** ignored and continue **)'.format(cached_file)
)
else:
logger.info(
'Successfully loaded imagenet pretrained weights from "{}"'.
format(cached_file)
)
if len(discarded_layers) > 0:
logger.info(
'** The following layers are discarded '
'due to unmatched keys or layer size: {}'.
format(discarded_layers)
)
@BACKBONE_REGISTRY.register()
def build_osnet_backbone(cfg):
"""
Create a OSNet instance from config.
Returns:
OSNet: a :class:`OSNet` instance
"""
# fmt: off
pretrain = cfg.MODEL.BACKBONE.PRETRAIN
pretrain_path = cfg.MODEL.BACKBONE.PRETRAIN_PATH
with_ibn = cfg.MODEL.BACKBONE.WITH_IBN
bn_norm = cfg.MODEL.BACKBONE.NORM
num_splits = cfg.MODEL.BACKBONE.NORM_SPLIT
depth = cfg.MODEL.BACKBONE.DEPTH
num_blocks_per_stage = [2, 2, 2]
num_channels_per_stage = {"x1_0": [64, 256, 384, 512], "x0_75": [48, 192, 288, 384], "x0_5": [32, 128, 192, 256],
"x0_25": [16, 64, 96, 128]}[depth]
model = OSNet([OSBlock, OSBlock, OSBlock], num_blocks_per_stage, num_channels_per_stage,
bn_norm, num_splits, IN=with_ibn)
if pretrain:
# Load pretrain path if specifically
if pretrain_path:
try:
state_dict = torch.load(pretrain_path, map_location=torch.device('cpu'))
logger.info(f"Loading pretrained model from {pretrain_path}")
model.load_state_dict(state_dict)
except FileNotFoundError as e:
logger.info(f'{pretrain_path} is not found! Please check this path.')
raise e
except KeyError as e:
logger.info("State dict keys error! Please check the state dict.")
raise e
else:
if with_ibn:
pretrain_key = "osnet_ibn_" + depth
else:
pretrain_key = "osnet_" + depth
init_pretrained_weights(model, pretrain_key)
return model
| 30.143389 | 117 | 0.563044 |
794921a5ae0caed3631fb1de596f2943856dc3dd
| 593 |
py
|
Python
|
Python Programs/Hackerrank Solutions/betweenTwoSets.py
|
Chibi-Shem/Hacktoberfest2020-Expert
|
324843464aec039e130e85a16e74b76d310f1497
|
[
"MIT"
] | 77 |
2020-10-01T10:06:59.000Z
|
2021-11-08T08:57:18.000Z
|
Python Programs/Hackerrank Solutions/betweenTwoSets.py
|
Chibi-Shem/Hacktoberfest2020-Expert
|
324843464aec039e130e85a16e74b76d310f1497
|
[
"MIT"
] | 46 |
2020-09-27T04:55:36.000Z
|
2021-05-14T18:49:06.000Z
|
Python Programs/Hackerrank Solutions/betweenTwoSets.py
|
Chibi-Shem/Hacktoberfest2020-Expert
|
324843464aec039e130e85a16e74b76d310f1497
|
[
"MIT"
] | 327 |
2020-09-26T17:06:03.000Z
|
2021-10-09T06:04:39.000Z
|
def getTotalX(a, b):
aray=[]
possible = []
aray = [i for i in range(a[-1],b[0]+1)]
index = 0
possible = aray.copy()
for i in a:
for j in range(len(aray)):
if (aray[j]%i != 0):
if aray[j] in possible:
index = possible.index(aray[j])
possible.pop(index)
for j in b:
for i in range(len(aray)):
if (j%aray[i] != 0):
if aray[i] in possible:
index = possible.index(aray[i])
possible.pop(index)
return len(possible)
| 29.65 | 51 | 0.448567 |
79492435a02d6ac926e2d6c30cfd0232a203bd96
| 169 |
py
|
Python
|
cse/writer/__init__.py
|
CodeLionX/CommentSearchEngine
|
9ebab14b93865a3e18db32aaa0bdb9579163da4a
|
[
"MIT"
] | 2 |
2017-11-18T22:40:56.000Z
|
2018-12-21T15:09:24.000Z
|
cse/writer/__init__.py
|
CodeLionX/CommentSearchEngine
|
9ebab14b93865a3e18db32aaa0bdb9579163da4a
|
[
"MIT"
] | 49 |
2017-10-26T14:58:58.000Z
|
2018-02-11T23:50:50.000Z
|
cse/writer/__init__.py
|
CodeLionX/CommentSearchEngine
|
9ebab14b93865a3e18db32aaa0bdb9579163da4a
|
[
"MIT"
] | null | null | null |
"""
CSE - a web crawling and web searching application for news paper
comments written in Python
writer classes
"""
from cse.writer.CommentWriter import CommentWriter
| 18.777778 | 65 | 0.792899 |
794924bc3b7c7b3b82e534753d96b7ff08371b34
| 106 |
py
|
Python
|
32_app_config_in_excel/index.py
|
nagasudhirpulla/python_wrldc_training
|
c3a3216c0a11e1dac03d4637b4b59b28f1bb83c6
|
[
"MIT"
] | null | null | null |
32_app_config_in_excel/index.py
|
nagasudhirpulla/python_wrldc_training
|
c3a3216c0a11e1dac03d4637b4b59b28f1bb83c6
|
[
"MIT"
] | null | null | null |
32_app_config_in_excel/index.py
|
nagasudhirpulla/python_wrldc_training
|
c3a3216c0a11e1dac03d4637b4b59b28f1bb83c6
|
[
"MIT"
] | 2 |
2020-09-30T16:32:18.000Z
|
2020-10-23T01:13:51.000Z
|
# %%
from appConfig import getConfig
# test the function for usage
print(getConfig('angleFolder'))
# %%
| 13.25 | 31 | 0.716981 |
794924fd8bbe704f9aeebf7870b4850ab25602b1
| 904 |
py
|
Python
|
image_hosting_service/jd.py
|
fujiawei-dev/tookit-py
|
5ab3a18a41885f6166150cc27183621b96f8f991
|
[
"BSD-3-Clause"
] | null | null | null |
image_hosting_service/jd.py
|
fujiawei-dev/tookit-py
|
5ab3a18a41885f6166150cc27183621b96f8f991
|
[
"BSD-3-Clause"
] | null | null | null |
image_hosting_service/jd.py
|
fujiawei-dev/tookit-py
|
5ab3a18a41885f6166150cc27183621b96f8f991
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Date: 2022.02.02 19:56
Description: Omit
LastEditors: Rustle Karl
LastEditTime: 2022.02.02 19:56
"""
import base64
import json
import lxml.html
import requests
from .common import UploadResult, headers
session = requests.Session()
session.headers = headers
def upload(path) -> UploadResult:
result = UploadResult()
response = session.post(
"https://imio.jd.com/uploadfile/file/post.do",
{
"appId": "im.customer",
"clientType": "comet",
"s": base64.b64encode(open(path, "rb").read()),
},
)
if response.status_code == 200:
html: lxml.html.HtmlElement = lxml.html.fromstring(response.content)
body = json.loads(html.find("body").text)
if body["code"] == 0:
result.success = True
result.remote_url = body["path"]
result.message = body["desc"]
return result
| 21.023256 | 76 | 0.612832 |
794927fe6e2b7b173e2270dc9585e466771fc29b
| 7,155 |
py
|
Python
|
src/tools/deep_memory_profiler/tests/dmprof_test.py
|
jxjnjjn/chromium
|
435c1d02fd1b99001dc9e1e831632c894523580d
|
[
"Apache-2.0"
] | 9 |
2018-09-21T05:36:12.000Z
|
2021-11-15T15:14:36.000Z
|
tools/deep_memory_profiler/tests/dmprof_test.py
|
devasia1000/chromium
|
919a8a666862fb866a6bb7aa7f3ae8c0442b4828
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
tools/deep_memory_profiler/tests/dmprof_test.py
|
devasia1000/chromium
|
919a8a666862fb866a6bb7aa7f3ae8c0442b4828
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 3 |
2018-11-28T14:54:13.000Z
|
2020-07-02T07:36:07.000Z
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cStringIO
import logging
import os
import sys
import textwrap
import unittest
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT_DIR)
try:
from collections import OrderedDict # pylint: disable=E0611
except ImportError:
SIMPLEJSON_PATH = os.path.join(ROOT_DIR, os.pardir, os.pardir, 'third_party')
sys.path.insert(0, SIMPLEJSON_PATH)
from simplejson import OrderedDict
import dmprof
from find_runtime_symbols import FUNCTION_SYMBOLS
from find_runtime_symbols import SOURCEFILE_SYMBOLS
from find_runtime_symbols import TYPEINFO_SYMBOLS
class SymbolMappingCacheTest(unittest.TestCase):
class MockBucketSet(object):
def __init__(self, addresses):
self._addresses = addresses
def iter_addresses(self, symbol_type): # pylint: disable=W0613
for address in self._addresses:
yield address
class MockSymbolFinder(object):
def __init__(self, mapping):
self._mapping = mapping
def find(self, address_list):
result = OrderedDict()
for address in address_list:
result[address] = self._mapping[address]
return result
_TEST_FUNCTION_CACHE = textwrap.dedent("""\
1 0x0000000000000001
7fc33eebcaa4 __gnu_cxx::new_allocator::allocate
7fc33ef69242 void DispatchToMethod
""")
_EXPECTED_TEST_FUNCTION_CACHE = textwrap.dedent("""\
1 0x0000000000000001
7fc33eebcaa4 __gnu_cxx::new_allocator::allocate
7fc33ef69242 void DispatchToMethod
2 0x0000000000000002
7fc33ef7bc3e std::map::operator[]
7fc34411f9d5 WTF::RefCounted::operator new
""")
_TEST_FUNCTION_ADDRESS_LIST1 = [
0x1, 0x7fc33eebcaa4, 0x7fc33ef69242]
_TEST_FUNCTION_ADDRESS_LIST2 = [
0x1, 0x2, 0x7fc33eebcaa4, 0x7fc33ef69242, 0x7fc33ef7bc3e, 0x7fc34411f9d5]
_TEST_FUNCTION_DICT = {
0x1: '0x0000000000000001',
0x2: '0x0000000000000002',
0x7fc33eebcaa4: '__gnu_cxx::new_allocator::allocate',
0x7fc33ef69242: 'void DispatchToMethod',
0x7fc33ef7bc3e: 'std::map::operator[]',
0x7fc34411f9d5: 'WTF::RefCounted::operator new',
}
def test_update(self):
symbol_mapping_cache = dmprof.SymbolMappingCache()
cache_f = cStringIO.StringIO()
cache_f.write(self._TEST_FUNCTION_CACHE)
# No update from self._TEST_FUNCTION_CACHE
symbol_mapping_cache.update(
FUNCTION_SYMBOLS,
self.MockBucketSet(self._TEST_FUNCTION_ADDRESS_LIST1),
self.MockSymbolFinder(self._TEST_FUNCTION_DICT), cache_f)
for address in self._TEST_FUNCTION_ADDRESS_LIST1:
self.assertEqual(self._TEST_FUNCTION_DICT[address],
symbol_mapping_cache.lookup(FUNCTION_SYMBOLS, address))
self.assertEqual(self._TEST_FUNCTION_CACHE, cache_f.getvalue())
# Update to self._TEST_FUNCTION_ADDRESS_LIST2
symbol_mapping_cache.update(
FUNCTION_SYMBOLS,
self.MockBucketSet(self._TEST_FUNCTION_ADDRESS_LIST2),
self.MockSymbolFinder(self._TEST_FUNCTION_DICT), cache_f)
for address in self._TEST_FUNCTION_ADDRESS_LIST2:
self.assertEqual(self._TEST_FUNCTION_DICT[address],
symbol_mapping_cache.lookup(FUNCTION_SYMBOLS, address))
self.assertEqual(self._EXPECTED_TEST_FUNCTION_CACHE, cache_f.getvalue())
class PolicyTest(unittest.TestCase):
class MockSymbolMappingCache(object):
def __init__(self):
self._symbol_caches = {
FUNCTION_SYMBOLS: {},
SOURCEFILE_SYMBOLS: {},
TYPEINFO_SYMBOLS: {},
}
def add(self, symbol_type, address, symbol):
self._symbol_caches[symbol_type][address] = symbol
def lookup(self, symbol_type, address):
symbol = self._symbol_caches[symbol_type].get(address)
return symbol if symbol else '0x%016x' % address
_TEST_POLICY = textwrap.dedent("""\
{
"components": [
"second",
"mmap-v8",
"malloc-v8",
"malloc-WebKit",
"mmap-catch-all",
"malloc-catch-all"
],
"rules": [
{
"name": "second",
"stacktrace": "optional",
"allocator": "optional"
},
{
"name": "mmap-v8",
"stacktrace": ".*v8::.*",
"allocator": "mmap"
},
{
"name": "malloc-v8",
"stacktrace": ".*v8::.*",
"allocator": "malloc"
},
{
"name": "malloc-WebKit",
"stacktrace": ".*WebKit::.*",
"allocator": "malloc"
},
{
"name": "mmap-catch-all",
"stacktrace": ".*",
"allocator": "mmap"
},
{
"name": "malloc-catch-all",
"stacktrace": ".*",
"allocator": "malloc"
}
],
"version": "POLICY_DEEP_3"
}
""")
def test_load(self):
policy = dmprof.Policy.parse(cStringIO.StringIO(self._TEST_POLICY), 'json')
self.assertTrue(policy)
self.assertEqual('POLICY_DEEP_3', policy.version)
def test_find(self):
policy = dmprof.Policy.parse(cStringIO.StringIO(self._TEST_POLICY), 'json')
self.assertTrue(policy)
symbol_mapping_cache = self.MockSymbolMappingCache()
symbol_mapping_cache.add(FUNCTION_SYMBOLS, 0x1212, 'v8::create')
symbol_mapping_cache.add(FUNCTION_SYMBOLS, 0x1381, 'WebKit::create')
bucket1 = dmprof.Bucket([0x1212, 0x013], False, 0x29492, '_Z')
bucket1.symbolize(symbol_mapping_cache)
bucket2 = dmprof.Bucket([0x18242, 0x1381], False, 0x9492, '_Z')
bucket2.symbolize(symbol_mapping_cache)
bucket3 = dmprof.Bucket([0x18242, 0x181], False, 0x949, '_Z')
bucket3.symbolize(symbol_mapping_cache)
self.assertEqual('malloc-v8', policy.find(bucket1))
self.assertEqual('malloc-WebKit', policy.find(bucket2))
self.assertEqual('malloc-catch-all', policy.find(bucket3))
class BucketsCommandTest(unittest.TestCase):
def test(self):
with open(os.path.join(ROOT_DIR, 'tests', 'output', 'buckets')) as output_f:
expected = output_f.read()
out = cStringIO.StringIO()
command = dmprof.BucketsCommand()
returncode = command.do([
'buckets',
os.path.join(ROOT_DIR, 'tests', 'data', 'heap.01234.0001.heap')], out)
self.assertEqual(0, returncode)
self.assertEqual(expected, out.getvalue())
class UploadCommandTest(unittest.TestCase):
def test(self):
command = dmprof.UploadCommand()
returncode = command.do([
'upload',
'--gsutil',
os.path.join(ROOT_DIR, 'tests', 'mock_gsutil.py'),
os.path.join(ROOT_DIR, 'tests', 'data', 'heap.01234.0001.heap'),
'gs://test-storage/'])
self.assertEqual(0, returncode)
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.ERROR,
format='%(levelname)5s %(filename)15s(%(lineno)3d): %(message)s')
unittest.main()
| 32.085202 | 80 | 0.658281 |
794928155fb9e07130d09974c0a5ad72450d89f7
| 1,236 |
py
|
Python
|
994/Rotting Oranges.py
|
cccccccccccccc/Myleetcode
|
fb3fa6df7c77feb2d252feea7f3507569e057c70
|
[
"Apache-2.0"
] | null | null | null |
994/Rotting Oranges.py
|
cccccccccccccc/Myleetcode
|
fb3fa6df7c77feb2d252feea7f3507569e057c70
|
[
"Apache-2.0"
] | null | null | null |
994/Rotting Oranges.py
|
cccccccccccccc/Myleetcode
|
fb3fa6df7c77feb2d252feea7f3507569e057c70
|
[
"Apache-2.0"
] | null | null | null |
from typing import List
from collections import deque
class Solution:
def orangesRotting(self, grid: List[List[int]]) -> int:
m = len(grid)
n = len(grid[0])
rotdeque = deque()
count_fresh = 0
count = 0
move = [[-1,0],[0,1],[0,-1],[1,0]]
for i in range(m):
for j in range(n):
if grid[i][j] == 2:
rotdeque.append((i,j))
elif grid[i][j] == 1:
count_fresh+=1
if count_fresh == 0:
return 0
while len(rotdeque)>0:
count +=1
size = len(rotdeque)
for i in range(size):
rot = rotdeque.popleft()
for c in move:
x = c[0]+rot[0]
y = c[1]+rot[1]
if x < 0 or y < 0 or x >= m or y >= n or grid[x][y] == 0 or grid[x][y] == 2:
continue
if grid[x][y] == 1:
grid[x][y] = 2
rotdeque.append((x,y))
count_fresh-=1
return count-1 if count_fresh == 0 else -1
A = Solution()
grid = [[2,1,1],[1,1,0],[0,1,1]]
print(A.orangesRotting(grid))
| 34.333333 | 96 | 0.411812 |
7949282e7e611e26446fc8220a41ee3f2103c2d7
| 7,730 |
py
|
Python
|
gae/utils.py
|
MAhsanAkhtar/BDA_TERM_PROJECT
|
68b0deb2b126f81924997b7d9f8c6c1ad53bbac0
|
[
"MIT"
] | 29 |
2018-01-08T21:31:12.000Z
|
2022-02-14T01:11:01.000Z
|
gae/utils.py
|
MAhsanAkhtar/BDA_TERM_PROJECT
|
68b0deb2b126f81924997b7d9f8c6c1ad53bbac0
|
[
"MIT"
] | null | null | null |
gae/utils.py
|
MAhsanAkhtar/BDA_TERM_PROJECT
|
68b0deb2b126f81924997b7d9f8c6c1ad53bbac0
|
[
"MIT"
] | 9 |
2018-01-09T21:04:16.000Z
|
2021-10-18T15:50:35.000Z
|
#MIT License
#
#Copyright (c) 2017 Willian Fuks
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
"""General functions to be used throught the services modules"""
import heapq
import datetime
import uuid
from collections import Counter
import time
from google.appengine.ext import ndb
from config import config
def get_yesterday_date():
"""Returns datetime for yesterday value
:rtype: `datetime.datetime`
:returns: yesterday's datetime
"""
return (datetime.datetime.now() +
datetime.timedelta(days=-1))
def load_query_job_body(date, **kwargs):
"""Returns the body to be used in a query job.
:type date: str
:param date: date to set filtering in query results.
:type kwargs:
:type source.query_path: str
:param query: query string to run against BQ.
:type source.project_id: str
:param source.project: project where to run query from.
:type source.dataset_id: str
:param source.dataset_id: dataset where to run query from.
:type source.table_id: str
:param source.table_id: table where to run query from.
:type destination.table_id: str
:param destination.table_id: table_id where results should be saved.
:type destination.dataset_id: str
:param destination.dataset_id: dataset_id where results should be saved.
:type destination.project_id: str
:param destination.project_id: project_id where results should be saved.
:rtype: dict
:returns: dict containing body to setup job execution.
"""
query = load_query(date, **kwargs)
dest = kwargs['jobs']['export_customers']['query_job']['destination']
return {'jobReference': {
'projectId': dest['project_id'],
'jobId': str(uuid.uuid4())
},
'configuration': {
'query': {
'destinationTable': {
'datasetId': dest['dataset_id'],
'tableId': dest['table_id'],
'projectId': dest['project_id']
},
'maximumBytesBilled': 100000000000,
'query': query,
'useLegacySql': False,
'writeDisposition': 'WRITE_TRUNCATE'
}
}
}
def load_query(date=None, **kwargs):
"""Reads a query from a source file.
:type date: str
:param date: which date to filter on query results.
:param kwargs:
:type query_path: str
:param query_path: location where to find the query file.
:type source.project_id: str
:param source.project_id: project id to format query.
:type source.dataset_id: str
:param source.dataset_id: dataset_id to format query.
:type source.table_id: str
:param source.table_id: table_id to format query.
"""
source = kwargs['jobs']['export_customers']['query_job']['source']
date_str = get_yesterday_date().strftime("%Y%m%d") if not date else date
result = open(source['query_path']).read().format(
project_id=source['project_id'], dataset_id=source['dataset_id'],
table_id=source['table_id'], date=date_str).strip()
return result
def load_extract_job_body(date=None, **kwargs):
"""Returns json config to run extract jobs in BigQuery.
:type date: str
:param date: used to setup output path in GCS. If None then defaults to
yesterday's date. The format expected is "%Y%m%d" and must
be casted to "%Y-%m-%d".
:param kwargs:
:type project_id: str
"""
value = kwargs['jobs']['export_customers']['extract_job']
date = (get_yesterday_date().strftime("%Y-%m-%d") if not date else
format_date(date))
output = value['output'].format(date=date)
return {
'jobReference': {
'projectId': value['project_id'],
'jobId': str(uuid.uuid4())
},
'configuration': {
'extract': {
'sourceTable': {
'projectId': value['project_id'],
'datasetId': value['dataset_id'],
'tableId': value['table_id'],
},
'destinationUris': [output],
'destinationFormat': value['format'],
'compression': value['compression']
}
}
}
def format_date(input_date, format="%Y-%m-%d"):
"""Changes input date to a new format.
:type input_date: str
:param input_date: date string of format "%Y%m%d".
:type format: str
:param format: new format to port input date.
:rtype: str
:returns: date string in format `format`.
"""
return datetime.datetime.strptime(input_date, "%Y%m%d").strftime(
format)
def process_url_date(date):
"""Gets the variable ``date`` from URL.
:type date: str
:param date: date to process.
:raises: `ValueError` if ``date`` is not in format "%Y%m%d" and is
not null.
:rtype: str
:returns: `None` is `date` is empty or a string representation of date
"""
# if ``date`` is defined then it was sent as parameter in the URL request
if date:
try:
datetime.datetime.strptime(date, "%Y%m%d")
except ValueError:
raise
return date
class SkuModel(ndb.Model):
@classmethod
def _get_kind(cls):
return config['recos']['kind']
items = ndb.StringProperty(repeated=True)
scores = ndb.FloatProperty(repeated=True)
def process_recommendations(entities, scores, n=10):
"""Process items and scores from entities retrieved from Datastore, combine
them up and sorts top n recommendations.
:type entities: list of `ndb.Model`
:param entities: entities retrieved from Datastore, following expected
pattern of ndb.Model(key=Key('kind', 'sku0'),
items=['sku1', 'sku2'], scores=[0.1, 0.83])
:type scores: dict
:param scores: each key corresponds to a sku and the value is the score
we observed our customer had with given sku, such as
{'sku0': 2.5}.
:type n: int
:param n: returns ``n`` largest scores from list of recommendations.
:rtype: list
:returns: list with top skus to recommend.
"""
t0 = time.time()
r = sum([Counter({e.items[i]: e.scores[i] * scores[e.key.id()]
for i in range(len(e.items))}) for e in entities], Counter()).items()
time_build_recos = time.time() - t0
t0 = time.time()
heapq.heapify(r)
return {'result': [{"item": k, "score": v} for k, v in heapq.nlargest(
n, r, key= lambda x: x[1])]}
| 34.052863 | 79 | 0.624062 |
79492b8b8018ea418c603ea8e7b81bbb2b56ae71
| 5,395 |
py
|
Python
|
FNCI/v7/inventories/createWorkflowDetails.py
|
flexera/sca-codeinsight-workflow-v6
|
4d15d2c921ab2d42dbd2cdb62f77ab79f65b6061
|
[
"MIT"
] | 1 |
2021-03-04T20:00:14.000Z
|
2021-03-04T20:00:14.000Z
|
FNCI/v7/inventories/createWorkflowDetails.py
|
flexera/sca-codeinsight-workflow-v6
|
4d15d2c921ab2d42dbd2cdb62f77ab79f65b6061
|
[
"MIT"
] | 1 |
2021-06-02T02:54:23.000Z
|
2021-06-02T02:54:23.000Z
|
FNCI/v7/inventories/createWorkflowDetails.py
|
flexera/sca-codeinsight-workflow-v6
|
4d15d2c921ab2d42dbd2cdb62f77ab79f65b6061
|
[
"MIT"
] | null | null | null |
'''
Copyright 2020 Flexera Software LLC
See LICENSE.TXT for full license text
SPDX-License-Identifier: MIT
Author : sgeary
Created On : Tue Feb 11 2020
File : createWorkflowDetails.py
'''
import logging
import requests
import sys
import config
logger = logging.getLogger(__name__)
#######################################################################
# If the calling app is a flask app then we can use
# the flask abort function to catch exceptions
# so see if its defined in a common config file
try:
FLASKAPP = config.FLASKAPP
except:
FLASKAPP = False
if FLASKAPP:
from flask import abort
#######################################################################
FNCI_API = "FNCI Create Workflow Details API"
ENDPOINT_URL = config.BASEURL + "inventories/"
#------------------------------------------------------------------------------------------#
def update_inventory_workflow_details(inventoryId, UPDATEDETAILS, authToken):
logger.info("Entering update_inventory_workflow_details")
RESTAPI_URL = ENDPOINT_URL + str(inventoryId) + "/workflows"
logger.debug(" RESTAPI_URL: %s" %RESTAPI_URL)
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + authToken}
updateBody = '''
['''
for key in UPDATEDETAILS:
updateBody += '''
{
"key": "''' + key + '''",
"value": "''' + str(UPDATEDETAILS[key]) + '''"
},'''
updateBody = updateBody[:-1] # Remove last comma
updateBody +='''
]
'''
# Make the REST API call with the project data
try:
response = requests.post(RESTAPI_URL, data=updateBody, headers=headers)
response.raise_for_status()
except requests.exceptions.ConnectionError:
# Connection Error - Is the server up and running?
abort_message = FNCI_API + " - Error Connecting to FNCI Server - " + (ENDPOINT_URL).split("codeinsight")[0] # Get rid of everything after codeinsight in url
logger.error(" %s" %(abort_message))
if FLASKAPP:
# Using error code 500 (Internal Server Error) to cover connection errors
# in the flask apps
abort(500, FNCI_API + " - %s" %abort_message)
else:
print(abort_message)
print("Is the FNCI server running?")
print("Exiting script")
sys.exit()
except requests.exceptions.RequestException as e: # Catch the exception for the logs but process below
logger.error(e)
# We at least received a response from FNCI so check the status to see
# what happened if there was an error or the expected data
if response.status_code == 200:
logger.debug(" Call to %s was successful." %FNCI_API)
logger.info("Inventory Workflow Details Updated")
elif response.status_code == 400:
# Bad Request
logger.error(" %s - Error: %s - Bad Request." %(FNCI_API, response.status_code ))
if FLASKAPP:
abort(400, FNCI_API + " - Bad Request - Look at debug log for more details")
else:
print("%s - Error: %s - Bad Request." %(FNCI_API, response.status_code ))
print(" Exiting script")
sys.exit()
elif response.status_code == 401:
# Unauthorized Access
logger.error(" %s - Error: %s - Authentication Failed: JWT token is not valid or user does not have correct permissions." %(FNCI_API, response.status_code ))
if FLASKAPP:
abort(401, FNCI_API + " - Authentication Failed: JWT token is not valid or user does not have correct permissions.")
else:
print("%s - Error: %s - Authentication Failed: JWT token is not valid or user does not have correct permissions." %(FNCI_API, response.status_code ))
print(" Exiting script")
sys.exit()
elif response.status_code == 404:
# Not Found
logger.error(" %s - Error: %s - URL endpoint not found: %s" %(FNCI_API, response.status_code, RESTAPI_URL ))
if FLASKAPP:
abort(400, FNCI_API + " - Bad Request - URL endpoint not found")
else:
print(" %s - Error: %s - URL endpoint not found: %s" %(FNCI_API, response.status_code, RESTAPI_URL ))
print(" Exiting script")
sys.exit()
elif response.status_code == 405:
# Method Not Allowed
logger.error(" %s - Error: %s - Method (GET/POST/PUT//DELETE/ETC) Not Allowed." %(FNCI_API, response.status_code ))
if FLASKAPP:
abort(405, FNCI_API + " - Method Not Allowed.")
else:
print(" %s - Error: %s - Method (GET/POST/PUT//DELETE/ETC) Not Allowed." %(FNCI_API, response.status_code ))
print(" Exiting script")
sys.exit()
elif response.status_code == 500:
# Internal Server Error
logger.error(" %s - Error: %s - Internal Server Error." %(FNCI_API, response.status_code ))
if FLASKAPP:
abort(500, FNCI_API + " - Internal Server Error.")
else:
print(" %s - Error: %s - Internal Server Error." %(FNCI_API, response.status_code ))
print(" Exiting script")
sys.exit()
| 40.56391 | 169 | 0.573865 |
79492b90db34a88b583ba424761726e47e00dcb2
| 1,082 |
py
|
Python
|
kubernetes/test/test_v1_preconditions.py
|
amanagarwal33/python
|
e31693557f75950805fb4dc5af4cb7434a470e26
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1_preconditions.py
|
amanagarwal33/python
|
e31693557f75950805fb4dc5af4cb7434a470e26
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1_preconditions.py
|
amanagarwal33/python
|
e31693557f75950805fb4dc5af4cb7434a470e26
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
<<<<<<< HEAD
OpenAPI spec version: v1.15.6
Generated by: https://openapi-generator.tech
=======
OpenAPI spec version: v1.5.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
>>>>>>> release-1.0
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v1_preconditions import V1Preconditions # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1Preconditions(unittest.TestCase):
"""V1Preconditions unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1Preconditions(self):
"""Test V1Preconditions"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.v1_preconditions.V1Preconditions() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.521739 | 124 | 0.698706 |
79492c47d79ccc93adb74a66a8ed4b31d0e6dc1b
| 2,128 |
py
|
Python
|
watermarking.py
|
mlouielu/watermarking
|
a317d26727b09e0b51018ba8f40c549a56ae181f
|
[
"MIT"
] | null | null | null |
watermarking.py
|
mlouielu/watermarking
|
a317d26727b09e0b51018ba8f40c549a56ae181f
|
[
"MIT"
] | null | null | null |
watermarking.py
|
mlouielu/watermarking
|
a317d26727b09e0b51018ba8f40c549a56ae181f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import random
from typing import Tuple
import click
from PIL import Image, ImageFont, ImageDraw
def get_text_im(font: ImageFont, text: str, color: Tuple, rotate: int) -> Image:
# Text
text_im = Image.new("RGBA", font.getsize(text), (255, 255, 255, 0))
text_draw = ImageDraw.Draw(text_im)
text_draw.text((0, 0), text, color, font=font)
# Rotate
rotate_im = text_im.rotate(rotate, expand=True)
return rotate_im
@click.command()
@click.argument("filename")
@click.argument("watermark_text")
@click.argument("outfile")
@click.option(
"--row-density", default=6, type=int, help="Row density of the watermark text"
)
@click.option(
"--col-density", default=8, type=int, help="Col density of the watermark text"
)
@click.option("--rotate", default=25, type=int, help="Rotate text by degree")
@click.option("--font-size", default=24, type=int, help="Watermark text font size")
def watermarking(
filename: str,
watermark_text: str,
outfile: str,
row_density: int = 6,
col_density: int = 8,
rotate: int = 25,
font_size: int = 24,
):
"""Watermarking FILENAME image with WATERMARK_TEXT."""
im = Image.open(filename).convert("RGBA")
draw = ImageDraw.Draw(im)
font = ImageFont.truetype("NotoSerifCJK-Bold.ttc", size=font_size)
# Watermark & Composite
for col in range(-im.size[0] // 2, im.size[0], im.size[0] // col_density):
for row in range(-im.size[1] // 2, im.size[1], im.size[1] // row_density):
color = tuple(
[random.randint(0, 255) for _ in range(3)] + [random.randint(70, 150)]
)
rotate_im = get_text_im(
font, watermark_text, color, rotate + random.randint(-5, 5)
)
watermark_im = Image.new("RGBA", im.size, (255, 255, 255, 0))
watermark_im.paste(
rotate_im,
(col + random.randrange(-80, 80), row + random.randrange(-40, 40)),
)
im = Image.alpha_composite(im, watermark_im)
im.save(outfile)
if __name__ == "__main__":
watermarking()
| 31.294118 | 86 | 0.620301 |
79492cade74844c8535c1bb5848e831ddfedc302
| 10,645 |
py
|
Python
|
nlp_architect/data/ptb.py
|
ikuyamada/nlp-architect
|
2769bbf948b2509b4ac7dc287fddf907046bf283
|
[
"Apache-2.0"
] | 1 |
2020-07-18T08:35:52.000Z
|
2020-07-18T08:35:52.000Z
|
nlp_architect/data/ptb.py
|
SIVASHANKAR-S/nlp-architect
|
b9d7df0afde39b62b2c23e24211e368b82623abc
|
[
"Apache-2.0"
] | null | null | null |
nlp_architect/data/ptb.py
|
SIVASHANKAR-S/nlp-architect
|
b9d7df0afde39b62b2c23e24211e368b82623abc
|
[
"Apache-2.0"
] | 1 |
2020-09-30T17:29:26.000Z
|
2020-09-30T17:29:26.000Z
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
"""
Data loader for penn tree bank dataset
"""
import os
import sys
import numpy as np
import urllib.request
LICENSE_URL = {
"PTB": "http://www.fit.vutbr.cz/~imikolov/rnnlm/",
"WikiText-103": "https://einstein.ai/research/the-wikitext-long-term-dependency-"
"language-modeling-dataset",
}
SOURCE_URL = {
"PTB": "http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz",
"WikiText-103": "https://s3.amazonaws.com/research.metamind.io/wikitext/"
+ "wikitext-103-v1.zip",
}
FILENAME = {"PTB": "simple-examples", "WikiText-103": "wikitext-103"}
EXTENSION = {"PTB": "tgz", "WikiText-103": "zip"}
FILES = {
"PTB": lambda x: "data/ptb." + x + ".txt",
"WikiText-103": lambda x: "wiki." + x + ".tokens",
}
class PTBDictionary:
"""
Class for generating a dictionary of all words in the PTB corpus
"""
def __init__(self, data_dir=os.path.expanduser("~/data"), dataset="WikiText-103"):
"""
Initialize class
Args:
data_dir: str, location of data
dataset: str, name of data corpus
"""
self.data_dir = data_dir
self.dataset = dataset
self.filepath = os.path.join(data_dir, FILENAME[self.dataset])
self._maybe_download(data_dir)
self.word2idx = {}
self.idx2word = []
self.load_dictionary()
print("Loaded dictionary of words of size {}".format(len(self.idx2word)))
self.sos_symbol = self.word2idx["<sos>"]
self.eos_symbol = self.word2idx["<eos>"]
self.save_dictionary()
def add_word(self, word):
"""
Method for adding a single word to the dictionary
Args:
word: str, word to be added
Returns:
None
"""
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def load_dictionary(self):
"""
Populate the corpus with words from train, test and valid splits of data
Returns:
None
"""
for split_type in ["train", "test", "valid"]:
path = os.path.join(
self.data_dir, FILENAME[self.dataset], FILES[self.dataset](split_type)
)
# Add words to the dictionary
with open(path, "r") as fp:
tokens = 0
for line in fp:
words = ["<sos>"] + line.split() + ["<eos>"]
tokens += len(words)
for word in words:
self.add_word(word)
def save_dictionary(self):
"""
Save dictionary to file
Returns:
None
"""
with open(os.path.join(self.data_dir, "dictionary.txt"), "w") as fp:
for k in self.word2idx:
fp.write("%s,%d\n" % (k, self.word2idx[k]))
def _maybe_download(self, work_directory):
"""
This function downloads the corpus if its not already present
Args:
work_directory: str, location to download data to
Returns:
None
"""
if not os.path.exists(self.filepath):
print(
"{} was not found in the directory: {}, looking for compressed version".format(
FILENAME[self.dataset], self.filepath
)
)
full_filepath = os.path.join(
work_directory, FILENAME[self.dataset] + "." + EXTENSION[self.dataset]
)
if not os.path.exists(full_filepath):
print("Did not find data")
print(
"PTB can be downloaded from http://www.fit.vutbr.cz/~imikolov/rnnlm/ \n"
"wikitext can be downloaded from"
" https://einstein.ai/research/the-wikitext-long-term-dependency-language"
"-modeling-dataset"
)
print(
"\nThe terms and conditions of the data set license apply. Intel does not "
"grant any rights to the data files or database\n"
)
response = input(
"\nTo download data from {}, please enter YES: ".format(
LICENSE_URL[self.dataset]
)
)
res = response.lower().strip()
if res == "yes" or (len(res) == 1 and res == "y"):
print("Downloading...")
self._download_data(work_directory)
self._uncompress_data(work_directory)
else:
print("Download declined. Response received {} != YES|Y. ".format(res))
print(
"Please download the model manually from the links above "
"and place in directory: {}".format(work_directory)
)
sys.exit()
else:
self._uncompress_data(work_directory)
def _download_data(self, work_directory):
"""
This function downloads the corpus
Args:
work_directory: str, location to download data to
Returns:
None
"""
work_directory = os.path.abspath(work_directory)
if not os.path.exists(work_directory):
os.mkdir(work_directory)
headers = {"User-Agent": "Mozilla/5.0"}
full_filepath = os.path.join(
work_directory, FILENAME[self.dataset] + "." + EXTENSION[self.dataset]
)
req = urllib.request.Request(SOURCE_URL[self.dataset], headers=headers)
data_handle = urllib.request.urlopen(req)
with open(full_filepath, "wb") as fp:
fp.write(data_handle.read())
print("Successfully downloaded data to {}".format(full_filepath))
def _uncompress_data(self, work_directory):
full_filepath = os.path.join(
work_directory, FILENAME[self.dataset] + "." + EXTENSION[self.dataset]
)
if EXTENSION[self.dataset] == "tgz":
import tarfile
with tarfile.open(full_filepath, "r:gz") as tar:
tar.extractall(path=work_directory)
if EXTENSION[self.dataset] == "zip":
import zipfile
with zipfile.ZipFile(full_filepath, "r") as zip_handle:
zip_handle.extractall(work_directory)
print(
"Successfully unzipped data to {}".format(
os.path.join(work_directory, FILENAME[self.dataset])
)
)
class PTBDataLoader:
"""
Class that defines data loader
"""
def __init__(
self,
word_dict,
seq_len=100,
data_dir=os.path.expanduser("~/data"),
dataset="WikiText-103",
batch_size=32,
skip=30,
split_type="train",
loop=True,
):
"""
Initialize class
Args:
word_dict: PTBDictionary object
seq_len: int, sequence length of data
data_dir: str, location of corpus data
dataset: str, name of corpus
batch_size: int, batch size
skip: int, number of words to skip over while generating batches
split_type: str, train/test/valid
loop: boolean, whether or not to loop over data when it runs out
"""
self.seq_len = seq_len
self.dataset = dataset
self.loop = loop
self.skip = skip
self.word2idx = word_dict.word2idx
self.idx2word = word_dict.idx2word
self.data = self.load_series(
os.path.join(data_dir, FILENAME[self.dataset], FILES[self.dataset](split_type))
)
self.random_index = np.random.permutation(
np.arange(0, self.data.shape[0] - self.seq_len, self.skip)
)
self.n_train = self.random_index.shape[0]
self.batch_size = batch_size
self.sample_count = 0
def __iter__(self):
return self
def __next__(self):
return self.get_batch()
def reset(self):
"""
Resets the sample count to zero, re-shuffles data
Returns:
None
"""
self.sample_count = 0
self.random_index = np.random.permutation(
np.arange(0, self.data.shape[0] - self.seq_len, self.skip)
)
def get_batch(self):
"""
Get one batch of the data
Returns:
None
"""
if self.sample_count + self.batch_size > self.n_train:
if self.loop:
self.reset()
else:
raise StopIteration("Ran out of data")
batch_x = []
batch_y = []
for _ in range(self.batch_size):
c_i = int(self.random_index[self.sample_count])
batch_x.append(self.data[c_i : c_i + self.seq_len])
batch_y.append(self.data[c_i + 1 : c_i + self.seq_len + 1])
self.sample_count += 1
batch = (np.array(batch_x), np.array(batch_y))
return batch
def load_series(self, path):
"""
Load all the data into an array
Args:
path: str, location of the input data file
Returns:
"""
# Tokenize file content
with open(path, "r") as fp:
ids = []
for line in fp:
words = line.split() + ["<eos>"]
for word in words:
ids.append(self.word2idx[word])
data = np.array(ids)
return data
def decode_line(self, tokens):
"""
Decode a given line from index to word
Args:
tokens: List of indexes
Returns:
str, a sentence
"""
return " ".join([self.idx2word[t] for t in tokens])
| 32.753846 | 95 | 0.541381 |
79492cb876b3f8b211e329fc310cd1ac2d8ee45a
| 7,603 |
py
|
Python
|
tools/reader_base.py
|
bjlittle/mint
|
fe4862951e0c570caef6e86440b090650dca000b
|
[
"0BSD"
] | null | null | null |
tools/reader_base.py
|
bjlittle/mint
|
fe4862951e0c570caef6e86440b090650dca000b
|
[
"0BSD"
] | null | null | null |
tools/reader_base.py
|
bjlittle/mint
|
fe4862951e0c570caef6e86440b090650dca000b
|
[
"0BSD"
] | null | null | null |
import netCDF4
import numpy
import vtk
class ReaderBase(object):
PERIODICITY_LENGTH = 360.
def __init__(self):
"""
Constructor
No args
"""
self.vtk = {
'pointArray': [],
'pointData': vtk.vtkDoubleArray(),
'points': vtk.vtkPoints(),
'grid': vtk.vtkUnstructuredGrid(),
}
# 2
# 3--->----2
# | |
# 3 ^ ^ 1
# | |
# 0--->----1
# 0
self.edgeIndex2PointInds = {
0: (0, 1),
1: (1, 2),
2: (3, 2),
3: (0, 3),
}
def getLonLatPoints(self):
"""
Get the longitudes and latitudes in radian at the cell vertices
@return array
"""
return self.vtk['pointArray'][:, :2]
def printCellPoints(self):
"""
Print cells points
"""
grd = self.vtk['grid']
pts = self.vtk['points']
ncells = grd.GetNumberOfCells()
ptIds = vtk.vtkIdList()
for icell in range(ncells):
msg = 'cell {:10d} points '.format(icell)
grd.GetCellPoints(icell, ptIds)
npts = ptIds.GetNumberOfIds()
xs, ys = [], []
for i in range(npts):
ptId = ptIds.GetId(i)
x, y, _ = pts.GetPoint(ptId)
xs.append(x)
ys.append(y)
msg += '({:7.2f}, {:7.2f}) '.format(x, y)
area013, area231 = self.getQuadTriangleAreas(xs, ys)
msg += ' triangle areas {:10.2g} {:10.2g}'.format(area013, area231)
print(msg)
def loadFromVtkFile(self, filename):
"""
Load the grid and gfields form a VTK file
@param filename VTK file
"""
self.reader = vtk.vtkUnstructuredGridReader()
self.reader.SetFileName(filename)
self.reader.Update()
self.vtk['grid'] = reader.GetOutput()
def saveToVtkFile(self, filename, binary=True):
"""
Save the grid to a VTK file
@param filename VTK file
"""
writer = vtk.vtkUnstructuredGridWriter()
if binary:
writer.SetFileTypeToBinary()
writer.SetFileName(filename)
writer.SetInputData(self.vtk['grid'])
writer.Update()
def getUnstructuredGrid(self):
"""
Get the unstructured grid
@return vtkUnstructuredGrid instance
"""
return self.vtk['grid']
def getNumberOfCells(self):
"""
Get the number of cells
@return number
"""
return self.vtk['grid'].GetNumberOfCells()
def getLonLat(self):
"""
Get the longitudes and latitudes as separate arrays
@return lon and lat arrays of size (numCells, 4)
"""
xy = self.vtk['pointArray'].reshape((self.getNumberOfCells(), 4, 3))
return xy[..., 0].copy(), xy[..., 1].copy()
def setLonLat(self, x, y):
"""
Set the longitudes and latitudes from numpy arrays
@param x longitudes, array of size numCells*4
@param y latitudes, array of size numCells*4
"""
self.vtk['pointArray'][:, 0] = x
self.vtk['pointArray'][:, 1] = y
def setEdgeField(self, name, data):
"""
Set edge field
@param name name of the field
@param data array of size (numCells, 4)
"""
self.edgeArray = data
self.edgeData = vtk.vtkDoubleArray()
self.edgeData.SetName(name)
self.edgeData.SetNumberOfComponents(4)
numCells = self.getNumberOfCells()
self.edgeData.SetNumberOfTuples(numCells)
self.edgeData.SetVoidArray(self.edgeArray, numCells*4, 1)
self.vtk['grid'].GetCellData().AddArray(self.edgeData)
def setPointField(self, name, data):
"""
Set point field
@param name name of the field
@param data array of size (numCells, 4)
"""
nComps = 1
# get the number of components from the third index, if present
if len(data.shape) > 2:
nComps = data.shape[2]
self.pointArray = data
self.pointData = vtk.vtkDoubleArray()
self.pointData.SetName(name)
self.pointData.SetNumberOfComponents(nComps)
numCells = self.getNumberOfCells()
self.pointData.SetNumberOfTuples(numCells*4)
self.pointData.SetVoidArray(self.pointArray, numCells*4*nComps, 1)
self.vtk['grid'].GetPointData().AddArray(self.pointData)
def setPointVectorField(self, name, uData, vData):
"""
Set vector field on cell points
@param name name of the field
@param data array of size (numCells, 4)
"""
numCells = self.getNumberOfCells()
self.pointVectorArray = numpy.zeros((numCells*4, 3), numpy.float64)
self.pointVectorArray[:, 0] = uData.flat
self.pointVectorArray[:, 1] = vData.flat
self.pointVectorData = vtk.vtkDoubleArray()
self.pointVectorData.SetName(name)
# 4 points per cell, 3 components
self.pointVectorData.SetNumberOfComponents(3)
self.pointVectorData.SetNumberOfTuples(numCells*4)
self.pointVectorData.SetVoidArray(self.pointVectorArray, numCells*4*3, 1)
self.vtk['grid'].GetPointData().AddArray(self.pointVectorData)
def getEdgeFieldFromStreamData(self, streamFuncData):
"""
Get the edge integrated values from the nodal stream function data
@param streamFuncData stream function data
"""
numCells = self.getNumberOfCells()
edgeArray = numpy.zeros((numCells, 4), numpy.float64)
for ie in range(4):
i0, i1 = self.edgeIndex2PointInds[ie]
edgeArray[:, ie] = 0.5 * (streamFuncData[:, i1] + streamFuncData[:, i0]) #streamFuncData[:, i1] - streamFuncData[:, i0]
return edgeArray
def getLoopIntegralsFromStreamData(self, streamFuncData):
"""
Get the cell loop integral from the nodal stream function data
@param streamFuncData stream function data
"""
numCells = self.getNumberOfCells()
cellArray = numpy.zeros((numCells,), numpy.float64)
# going counterclockwise around the cell
for i0 in range(4):
i1 = (i0 + 1) % 4
cellArray[:] += streamFuncData[:, i1] - streamFuncData[:, i0]
return cellArray
def setLoopIntegrals(self, name, data):
"""
Set cell loop integral field
@param name name of the field
@param data array of size (numCells,)
"""
self.cellArray = data
self.cellData = vtk.vtkDoubleArray()
self.cellData.SetName(name)
self.cellData.SetNumberOfComponents(1)
numCells = self.getNumberOfCells()
self.cellData.SetNumberOfTuples(numCells)
self.cellData.SetVoidArray(self.cellArray, numCells*1, 1)
self.vtk['grid'].GetCellData().AddArray(self.cellData)
def getQuadTriangleAreas(self, lons, lats):
"""
Compute the two triangle cell areas of a quad
@param lons longitudes
@param lats latitudes
@return two areas
"""
lon00, lon10, lon11, lon01 = lons
lat00, lat10, lat11, lat01 = lats
area013 = 0.5*( (lon10 - lon00)*(lat01 - lat00) - (lat10 - lat00)*(lon01 - lon00) )
area231 = 0.5*( (lon01 - lon11)*(lat10 - lat11) - (lat01 - lat11)*(lon10 - lon11) )
return area013, area231
| 32.080169 | 131 | 0.56846 |
79492cdc0024a2815b59470d3dc531eaab66d170
| 3,124 |
py
|
Python
|
app/app/settings.py
|
Hasib404/recipe-app
|
0f85dc0dcedefa43488439078b71b3aff6e4223e
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
Hasib404/recipe-app
|
0f85dc0dcedefa43488439078b71b3aff6e4223e
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
Hasib404/recipe-app
|
0f85dc0dcedefa43488439078b71b3aff6e4223e
|
[
"MIT"
] | null | null | null |
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@66a3w@n2xwcarp+b9yh8v!p5vmyvrisc(1hutub$b&z56r9h8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
| 25.193548 | 91 | 0.695583 |
79492d9d8667e4900afc068be394d3999978217d
| 676 |
py
|
Python
|
mint/util/prev_transaction_block.py
|
sai-genesis/rc1-test
|
56e565952b283450c8589296f87c31b1c67b8502
|
[
"Apache-2.0"
] | 12 |
2021-08-18T20:53:31.000Z
|
2022-03-15T21:45:13.000Z
|
mint/util/prev_transaction_block.py
|
sai-genesis/rc1-test
|
56e565952b283450c8589296f87c31b1c67b8502
|
[
"Apache-2.0"
] | 34 |
2021-08-18T19:12:11.000Z
|
2022-01-06T17:15:34.000Z
|
mint/util/prev_transaction_block.py
|
sai-genesis/rc1-test
|
56e565952b283450c8589296f87c31b1c67b8502
|
[
"Apache-2.0"
] | 7 |
2021-08-18T20:53:34.000Z
|
2022-03-15T08:37:40.000Z
|
from typing import Tuple
from mint.consensus.block_record import BlockRecord
from mint.consensus.blockchain_interface import BlockchainInterface
from mint.util.ints import uint128
def get_prev_transaction_block(
curr: BlockRecord,
blocks: BlockchainInterface,
total_iters_sp: uint128,
) -> Tuple[bool, BlockRecord]:
prev_transaction_block = curr
while not curr.is_transaction_block:
curr = blocks.block_record(curr.prev_hash)
if total_iters_sp > curr.total_iters:
prev_transaction_block = curr
is_transaction_block = True
else:
is_transaction_block = False
return is_transaction_block, prev_transaction_block
| 30.727273 | 67 | 0.767751 |
79492dbee81fde041cc3fe88cc97ca01b9d402f0
| 17,404 |
py
|
Python
|
mcbv/edit.py
|
akulakov/django
|
68970eae1733b2b252265d6d1384946f80e23ed8
|
[
"BSD-3-Clause"
] | 150 |
2015-01-13T02:25:41.000Z
|
2018-05-25T09:05:36.000Z
|
mcbv/edit.py
|
akulakov/django
|
68970eae1733b2b252265d6d1384946f80e23ed8
|
[
"BSD-3-Clause"
] | 9 |
2015-01-12T02:03:07.000Z
|
2018-04-12T20:45:09.000Z
|
mcbv/edit.py
|
akulakov/django-mcbv
|
68970eae1733b2b252265d6d1384946f80e23ed8
|
[
"BSD-3-Clause"
] | 164 |
2015-01-17T07:43:28.000Z
|
2018-05-26T06:26:55.000Z
|
from django.forms import models as model_forms
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponseRedirect
from django.utils.encoding import force_text
from django.db import models
from django.contrib import messages
from django.utils.functional import curry
from django.forms.formsets import formset_factory, BaseFormSet, all_valid
from django.forms.models import modelformset_factory
from base import TemplateResponseMixin, ContextMixin, View
from detail import SingleObjectMixin, SingleObjectTemplateResponseMixin, BaseDetailView, DetailView
from list import MultipleObjectMixin, ListView
class FormMixin(ContextMixin):
"""
A mixin that provides a way to show and handle a form in a request.
"""
initial = {}
form_class = None
success_url = None
form_kwarg_user = False # provide request user to form
def get_initial(self):
"""
Returns the initial data to use for forms on this view.
"""
return self.initial.copy()
def get_form_class(self):
"""
Returns the form class to use in this view
"""
return self.form_class
def get_form(self, form_class=None):
"""
Returns an instance of the form to be used in this view.
"""
form_class = form_class or self.get_form_class()
return form_class(**self.get_form_kwargs())
def get_form_kwargs(self):
"""
Returns the keyword arguments for instantiating the form.
"""
kwargs = {'initial': self.get_initial()}
if self.form_kwarg_user:
kwargs['user'] = self.request.user
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def get_success_url(self):
"""
Returns the supplied success URL.
"""
if self.success_url:
# Forcing possible reverse_lazy evaluation
url = force_text(self.success_url)
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
return url
def form_valid(self, form):
"""
If the form is valid, redirect to the supplied URL.
"""
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
"""
If the form or modelform are invalid, re-render the context data with the
data-filled form and errors.
"""
return self.get_context_data(form=form)
class FormSetMixin(FormMixin):
"""A mixin that provides a way to show and handle a formset in a request."""
formset_form_class = None
formset_initial = {}
formset_class = BaseFormSet
extra = 0
can_delete = False
# ignore_get_args = ("page", ) # TODO this may be better moved to the form class?
formset_kwarg_user = False # provide request user to form
success_url = None
def get_formset_initial(self):
return self.formset_initial.copy()
def get_formset_class(self):
return self.formset_class
def get_formset_form_class(self):
return self.formset_form_class
def get_formset(self, form_class=None):
form_class = form_class or self.formset_form_class
kwargs = dict()
Formset = formset_factory(form_class, extra=self.extra, can_delete=self.can_delete)
if self.form_kwarg_user:
kwargs["user"] = self.user
Formset.form = staticmethod(curry(form_class, **kwargs))
return Formset(**self.get_formset_kwargs())
def get_formset_kwargs(self):
kwargs = dict(initial=self.get_formset_initial())
if self.formset_kwarg_user:
kwargs["user"] = self.request.user
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def get_success_url(self):
if self.success_url:
# Forcing possible reverse_lazy evaluation
url = force_text(self.success_url)
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
return url
def formset_valid(self, formset):
for form in formset:
if form.has_changed():
if form.cleaned_data.get("DELETE"):
self.process_delete(form)
else:
self.process_form(form)
return HttpResponseRedirect(self.get_success_url())
def process_form(self, form):
form.save()
def process_delete(self, form):
"""Process checked 'delete' box."""
pass
def formset_invalid(self, formset):
return self.get_context_data(formset=formset)
class ModelFormSetMixin(FormSetMixin):
formset_model = None
formset_queryset = None
def get_formset_queryset(self):
if self.formset_queryset is not None:
queryset = self.formset_queryset
if hasattr(queryset, '_clone'):
queryset = queryset._clone()
elif self.formset_model is not None:
queryset = self.formset_model._default_manager.all()
else:
raise ImproperlyConfigured("'%s' must define 'formset_queryset' or 'formset_model'"
% self.__class__.__name__)
return queryset
def get_formset(self, form_class=None):
form_class = form_class or self.formset_form_class
kwargs = dict()
Formset = modelformset_factory(self.formset_model, extra=self.extra, can_delete=self.can_delete)
if self.form_kwarg_user:
kwargs["user"] = self.user
Formset.form = staticmethod(curry(form_class, **kwargs))
return Formset(**self.get_formset_kwargs())
def get_formset_kwargs(self):
kwargs = {
'initial' : self.get_formset_initial(),
'queryset' : self.get_formset_queryset(),
}
if self.formset_kwarg_user:
kwargs["user"] = self.request.user
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def process_delete(self, form):
"""Process checked 'delete' box."""
form.instance.delete()
class ModelFormMixin(FormMixin, SingleObjectMixin):
"""
A mixin that provides a way to show and handle a modelform in a request.
"""
form_model = None
modelform_class = None
modelform_queryset = None
modelform_context_object_name = None
modelform_pk_url_kwarg = 'mfpk'
modelform_valid_msg = None
def get_modelform_class(self):
"""Returns the form class to use in this view."""
if self.modelform_class:
return self.modelform_class
else:
if self.form_model is not None:
# If a model has been explicitly provided, use it
model = self.form_model
elif hasattr(self, 'modelform_object') and self.modelform_object is not None:
# If this view is operating on a single object, use
# the class of that object
model = self.modelform_object.__class__
else:
# Try to get a queryset and extract the model class
# from that
model = self.get_modelform_queryset().model
return model_forms.modelform_factory(model)
def get_modelform(self, form_class=None):
form_class = form_class or self.get_modelform_class()
return form_class(**self.get_modelform_kwargs())
def get_modelform_kwargs(self):
"""Returns the keyword arguments for instantiating the form."""
kwargs = super(ModelFormMixin, self).get_form_kwargs()
kwargs.update({'instance': self.modelform_object})
return kwargs
def get_success_url(self):
"""Returns the supplied URL."""
if self.success_url:
url = self.success_url % self.modelform_object.__dict__
else:
try:
url = self.modelform_object.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured(
"No URL to redirect to. Either provide a url or define"
" a get_absolute_url method on the Model.")
return url
def modelform_valid(self, modelform):
self.modelform_object = modelform.save()
if self.modelform_valid_msg:
messages.info(self.request, self.modelform_valid_msg)
return HttpResponseRedirect(self.get_success_url())
def modelform_invalid(self, modelform):
return self.get_context_data(modelform=modelform)
def get_modelform_context_data(self, **kwargs):
"""
If an object has been supplied, inject it into the context with the
supplied modelform_context_object_name name.
"""
context = {}
obj = self.modelform_object
if obj:
context['modelform_object'] = obj
if self.modelform_context_object_name:
context[self.modelform_context_object_name] = obj
elif isinstance(obj, models.Model):
context[obj._meta.object_name.lower()] = obj
context.update(kwargs)
return context
def get_modelform_object(self, queryset=None):
return self.get_object( queryset or self.get_modelform_queryset(), self.modelform_pk_url_kwarg )
def get_modelform_queryset(self):
if self.modelform_queryset:
return self.modelform_queryset._clone()
else:
return self.get_queryset(self.form_model)
class ProcessFormView(View):
"""
A mixin that renders a form on GET and processes it on POST.
"""
def form_get(self, request, *args, **kwargs):
"""
Handles GET requests and instantiates a blank version of the form.
"""
return self.get_context_data( form=self.get_form() )
def formset_get(self, request, *args, **kwargs):
return self.get_context_data( formset=self.get_formset() )
def modelform_get(self, request, *args, **kwargs):
"""
Handles GET requests and instantiates a blank version of the form.
"""
return self.get_modelform_context_data( modelform=self.get_modelform() )
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a form instance with the passed
POST variables and then checked for validity.
"""
form = formset = modelform = None
if isinstance(self, DetailView):
self.detail_object = self.get_detail_object()
if isinstance(self, ListView):
self.object_list = self.get_list_queryset()
if isinstance(self, FormView):
form = self.get_form()
if isinstance(self, (FormSetView, ModelFormSetView)):
formset = self.get_formset()
if isinstance(self, UpdateView):
self.update_post(request, *args, **kwargs)
modelform = self.get_modelform()
if isinstance(self, CreateView):
self.create_post(request, *args, **kwargs)
modelform = self.get_modelform()
if (not form or form and form.is_valid()) and \
(not modelform or modelform and modelform.is_valid()) and \
(not formset or formset and formset.is_valid()):
if isinstance(self, FormView) : resp = self.form_valid(form)
if isinstance(self, (FormSetView, ModelFormSetView)) : resp = self.formset_valid(formset)
if isinstance(self, (UpdateView, CreateView)) : resp = self.modelform_valid(modelform)
return resp
else:
context = self.get_context_data()
update = context.update
if isinstance(self, FormView) : update(self.form_invalid(form))
if isinstance(self, (FormSetView, ModelFormSetView)) : update(self.formset_invalid(formset))
if isinstance(self, (UpdateView, CreateView)) : update(self.modelform_invalid(modelform))
return self.render_to_response(context)
# PUT is a valid HTTP verb for creating (with a known URL) or editing an
# object, note that browsers only support POST for now.
def put(self, *args, **kwargs):
return self.post(*args, **kwargs)
class BaseFormView(FormMixin, ProcessFormView):
""" A base view for displaying a form """
class FormView(TemplateResponseMixin, BaseFormView):
""" A view for displaying a form, and rendering a template response. """
class BaseFormSetView(FormSetMixin, ProcessFormView):
"""A base view for displaying a formset."""
class FormSetView(TemplateResponseMixin, BaseFormSetView):
"""A view for displaying a formset, and rendering a template response."""
class BaseModelFormSetView(ModelFormSetMixin, ProcessFormView):
"""A base view for displaying a modelformset."""
class ModelFormSetView(TemplateResponseMixin, BaseModelFormSetView):
"""A view for displaying a modelformset, and rendering a template response."""
class BaseCreateView(ModelFormMixin, ProcessFormView):
"""
Base view for creating an new object instance.
Using this base class requires subclassing to provide a response mixin.
"""
def create_get(self, request, *args, **kwargs):
self.modelform_object = None
return self.modelform_get(request, *args, **kwargs)
def create_post(self, request, *args, **kwargs):
self.modelform_object = None
class CreateView(SingleObjectTemplateResponseMixin, BaseCreateView):
"""
View for creating a new object instance,
with a response rendered by template.
"""
template_name_suffix = '_modelform'
def get_template_names(self):
return self._get_template_names(self.modelform_object, self.form_model)
class BaseUpdateView(ModelFormMixin, ProcessFormView):
"""
Base view for updating an existing object.
Using this base class requires subclassing to provide a response mixin.
"""
def update_get(self, request, *args, **kwargs):
self.modelform_object = self.get_modelform_object()
return self.modelform_get(request, *args, **kwargs)
def update_post(self, request, *args, **kwargs):
self.modelform_object = self.get_modelform_object()
class UpdateView(SingleObjectTemplateResponseMixin, BaseUpdateView):
"""
View for updating an object,
with a response rendered by template.
"""
template_name_suffix = '_modelform'
def get_template_names(self):
return self._get_template_names(self.modelform_object, self.form_model)
class CreateUpdateView(CreateView):
"""Update object if modelform_pk_url_kwarg is in kwargs, otherwise create it."""
modelform_create_class = None
def get_modelform_class(self):
if self.modelform_pk_url_kwarg in self.kwargs:
return self.modelform_class
else:
return self.modelform_create_class
def create_get(self, request, *args, **kwargs):
if self.modelform_pk_url_kwarg in self.kwargs:
self.modelform_object = self.get_modelform_object()
return self.modelform_get(request, *args, **kwargs)
else:
return super(CreateUpdateView, self).create_get(request, *args, **kwargs)
def create_post(self, request, *args, **kwargs):
if self.modelform_pk_url_kwarg in self.kwargs:
self.modelform_object = self.get_modelform_object()
else:
super(CreateUpdateView, self).create_post(request, *args, **kwargs)
class DeletionMixin(object):
"""
A mixin providing the ability to delete objects
"""
success_url = None
def delete(self, request, *args, **kwargs):
"""
Calls the delete() method on the fetched object and then
redirects to the success URL.
"""
self.modelform_object = self.get_modelform_object()
self.modelform_object.delete()
return HttpResponseRedirect(self.get_success_url())
# Add support for browsers which only accept GET and POST for now.
def post(self, *args, **kwargs):
return self.delete(*args, **kwargs)
def get_success_url(self):
if self.success_url:
return self.success_url
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
class BaseDeleteView(DeletionMixin, BaseDetailView):
"""
Base view for deleting an object.
Using this base class requires subclassing to provide a response mixin.
"""
class DeleteView(SingleObjectTemplateResponseMixin, BaseDeleteView):
"""
View for deleting an object retrieved with `self.get_object()`,
with a response rendered by template.
"""
template_name_suffix = '_confirm_delete'
| 34.669323 | 108 | 0.639623 |
79492e0f436f93d1117bd5261c6798d6b0c3c054
| 745 |
py
|
Python
|
truefalsepython/useful_funcs.py
|
PasaOpasen/true-false-python
|
a26ddd7c61471dd4d60d8ece00c6f03750bb7dca
|
[
"MIT"
] | 1 |
2021-01-16T18:46:45.000Z
|
2021-01-16T18:46:45.000Z
|
truefalsepython/useful_funcs.py
|
PasaOpasen/true-false-python
|
a26ddd7c61471dd4d60d8ece00c6f03750bb7dca
|
[
"MIT"
] | null | null | null |
truefalsepython/useful_funcs.py
|
PasaOpasen/true-false-python
|
a26ddd7c61471dd4d60d8ece00c6f03750bb7dca
|
[
"MIT"
] | null | null | null |
from typing import Sequence, Any
import sys
import random
def randomTrue(prob: float = 0.5):
return True if random.random() < prob else False
def fast_sample(objects: Sequence[Any], probs: Sequence[float]):
"""
returns 1 random object from objects with probs probabilities
"""
x = random.random()
cum = 0
for i, p in enumerate(probs):
cum += p
if x < cum:
return objects[i]
return objects[-1]
def set_trace():
from IPython.core.debugger import Pdb
Pdb(color_scheme = 'Linux').set_trace(sys._getframe().f_back)
def debug(f, *args, **kwargs):
from IPython.core.debugger import Pdb
pdb = Pdb(color_scheme = 'Linux')
return pdb.runcall(f, *args, **kwargs)
| 20.694444 | 66 | 0.644295 |
79492e3349ec373298de91d30b770fd9c142c15f
| 14,559 |
py
|
Python
|
codes/compressai/zoo/image.py
|
WestCityInstitute/InvCompress
|
17deb0f0e2f28432e6d5b38d0898ead58ccbcde4
|
[
"Apache-2.0"
] | 62 |
2021-08-10T01:25:44.000Z
|
2022-03-31T07:53:35.000Z
|
codes/compressai/zoo/image.py
|
WestCityInstitute/InvCompress
|
17deb0f0e2f28432e6d5b38d0898ead58ccbcde4
|
[
"Apache-2.0"
] | 6 |
2021-08-12T06:40:55.000Z
|
2022-03-09T15:34:50.000Z
|
codes/compressai/zoo/image.py
|
WestCityInstitute/InvCompress
|
17deb0f0e2f28432e6d5b38d0898ead58ccbcde4
|
[
"Apache-2.0"
] | 8 |
2021-08-06T02:03:12.000Z
|
2022-03-22T19:33:08.000Z
|
# Copyright 2020 InterDigital Communications, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torch.hub import load_state_dict_from_url
from compressai.models import (
InvCompress,
Cheng2020Anchor,
Cheng2020Attention,
FactorizedPrior,
JointAutoregressiveHierarchicalPriors,
MeanScaleHyperprior,
ScaleHyperprior,
)
from .pretrained import load_pretrained
__all__ = [
"bmshj2018_factorized",
"bmshj2018_hyperprior",
"mbt2018",
"mbt2018_mean",
"cheng2020_anchor",
"cheng2020_attn",
"invcompress",
]
model_architectures = {
"bmshj2018-factorized": FactorizedPrior,
"bmshj2018-hyperprior": ScaleHyperprior,
"mbt2018-mean": MeanScaleHyperprior,
"mbt2018": JointAutoregressiveHierarchicalPriors,
"cheng2020-anchor": Cheng2020Anchor,
"cheng2020-attn": Cheng2020Attention,
"invcompress": InvCompress,
}
root_url = "https://compressai.s3.amazonaws.com/models/v1"
model_urls = {
"bmshj2018-factorized": {
"mse": {
1: f"{root_url}/bmshj2018-factorized-prior-1-446d5c7f.pth.tar",
2: f"{root_url}/bmshj2018-factorized-prior-2-87279a02.pth.tar",
3: f"{root_url}/bmshj2018-factorized-prior-3-5c6f152b.pth.tar",
4: f"{root_url}/bmshj2018-factorized-prior-4-1ed4405a.pth.tar",
5: f"{root_url}/bmshj2018-factorized-prior-5-866ba797.pth.tar",
6: f"{root_url}/bmshj2018-factorized-prior-6-9b02ea3a.pth.tar",
7: f"{root_url}/bmshj2018-factorized-prior-7-6dfd6734.pth.tar",
8: f"{root_url}/bmshj2018-factorized-prior-8-5232faa3.pth.tar",
},
"ms-ssim": {
1: f"{root_url}/bmshj2018-factorized-ms-ssim-1-9781d705.pth.tar",
2: f"{root_url}/bmshj2018-factorized-ms-ssim-2-4a584386.pth.tar",
3: f"{root_url}/bmshj2018-factorized-ms-ssim-3-5352f123.pth.tar",
4: f"{root_url}/bmshj2018-factorized-ms-ssim-4-4f91b847.pth.tar",
5: f"{root_url}/bmshj2018-factorized-ms-ssim-5-b3a88897.pth.tar",
6: f"{root_url}/bmshj2018-factorized-ms-ssim-6-ee028763.pth.tar",
7: f"{root_url}/bmshj2018-factorized-ms-ssim-7-8c265a29.pth.tar",
8: f"{root_url}/bmshj2018-factorized-ms-ssim-8-8811bd14.pth.tar",
},
},
"bmshj2018-hyperprior": {
"mse": {
1: f"{root_url}/bmshj2018-hyperprior-1-7eb97409.pth.tar",
2: f"{root_url}/bmshj2018-hyperprior-2-93677231.pth.tar",
3: f"{root_url}/bmshj2018-hyperprior-3-6d87be32.pth.tar",
4: f"{root_url}/bmshj2018-hyperprior-4-de1b779c.pth.tar",
5: f"{root_url}/bmshj2018-hyperprior-5-f8b614e1.pth.tar",
6: f"{root_url}/bmshj2018-hyperprior-6-1ab9c41e.pth.tar",
7: f"{root_url}/bmshj2018-hyperprior-7-3804dcbd.pth.tar",
8: f"{root_url}/bmshj2018-hyperprior-8-a583f0cf.pth.tar",
},
"ms-ssim": {
1: f"{root_url}/bmshj2018-hyperprior-ms-ssim-1-5cf249be.pth.tar",
2: f"{root_url}/bmshj2018-hyperprior-ms-ssim-2-1ff60d1f.pth.tar",
3: f"{root_url}/bmshj2018-hyperprior-ms-ssim-3-92dd7878.pth.tar",
4: f"{root_url}/bmshj2018-hyperprior-ms-ssim-4-4377354e.pth.tar",
5: f"{root_url}/bmshj2018-hyperprior-ms-ssim-5-c34afc8d.pth.tar",
6: f"{root_url}/bmshj2018-hyperprior-ms-ssim-6-3a6d8229.pth.tar",
7: f"{root_url}/bmshj2018-hyperprior-ms-ssim-7-8747d3bc.pth.tar",
8: f"{root_url}/bmshj2018-hyperprior-ms-ssim-8-cc15b5f3.pth.tar",
},
},
"mbt2018-mean": {
"mse": {
1: f"{root_url}/mbt2018-mean-1-e522738d.pth.tar",
2: f"{root_url}/mbt2018-mean-2-e54a039d.pth.tar",
3: f"{root_url}/mbt2018-mean-3-723404a8.pth.tar",
4: f"{root_url}/mbt2018-mean-4-6dba02a3.pth.tar",
5: f"{root_url}/mbt2018-mean-5-d504e8eb.pth.tar",
6: f"{root_url}/mbt2018-mean-6-a19628ab.pth.tar",
7: f"{root_url}/mbt2018-mean-7-d5d441d1.pth.tar",
8: f"{root_url}/mbt2018-mean-8-8089ae3e.pth.tar",
},
},
"mbt2018": {
"mse": {
1: f"{root_url}/mbt2018-1-3f36cd77.pth.tar",
2: f"{root_url}/mbt2018-2-43b70cdd.pth.tar",
3: f"{root_url}/mbt2018-3-22901978.pth.tar",
4: f"{root_url}/mbt2018-4-456e2af9.pth.tar",
5: f"{root_url}/mbt2018-5-b4a046dd.pth.tar",
6: f"{root_url}/mbt2018-6-7052e5ea.pth.tar",
7: f"{root_url}/mbt2018-7-8ba2bf82.pth.tar",
8: f"{root_url}/mbt2018-8-dd0097aa.pth.tar",
},
},
"cheng2020-anchor": {
"mse": {
1: f"{root_url}/cheng2020-anchor-1-dad2ebff.pth.tar",
2: f"{root_url}/cheng2020-anchor-2-a29008eb.pth.tar",
3: f"{root_url}/cheng2020-anchor-3-e49be189.pth.tar",
4: f"{root_url}/cheng2020-anchor-4-98b0b468.pth.tar",
5: f"{root_url}/cheng2020-anchor-5-23852949.pth.tar",
6: f"{root_url}/cheng2020-anchor-6-4c052b1a.pth.tar",
},
},
"cheng2020-attn": {
"mse": {},
},
}
cfgs = {
"bmshj2018-factorized": {
1: (128, 192),
2: (128, 192),
3: (128, 192),
4: (128, 192),
5: (128, 192),
6: (192, 320),
7: (192, 320),
8: (192, 320),
},
"bmshj2018-hyperprior": {
1: (128, 192),
2: (128, 192),
3: (128, 192),
4: (128, 192),
5: (128, 192),
6: (192, 320),
7: (192, 320),
8: (192, 320),
},
"mbt2018-mean": {
1: (128, 192),
2: (128, 192),
3: (128, 192),
4: (128, 192),
5: (192, 320),
6: (192, 320),
7: (192, 320),
8: (192, 320),
},
"mbt2018": {
1: (192, 192),
2: (192, 192),
3: (192, 192),
4: (192, 192),
5: (192, 320),
6: (192, 320),
7: (192, 320),
8: (192, 320),
},
"cheng2020-anchor": {
1: (128,),
2: (128,),
3: (128,),
4: (192,),
5: (192,),
6: (192,),
},
"cheng2020-attn": {
1: (128,),
2: (128,),
3: (128,),
4: (192,),
5: (192,),
6: (192,),
7: (256,),
8: (384,),
},
"invcompress": {
1: (128,),
2: (128,),
3: (128,),
3: (128,),
5: (192,),
6: (192,),
7: (192,),
8: (192,),
},
}
def _load_model(
architecture, metric, quality, pretrained=False, progress=True, **kwargs
):
if architecture not in model_architectures:
raise ValueError(f'Invalid architecture name "{architecture}"')
if quality not in cfgs[architecture]:
raise ValueError(f'Invalid quality value "{quality}"')
if pretrained:
if (
architecture not in model_urls
or metric not in model_urls[architecture]
or quality not in model_urls[architecture][metric]
):
raise RuntimeError("Pre-trained model not yet available")
url = model_urls[architecture][metric][quality]
state_dict = load_state_dict_from_url(url, progress=progress)
state_dict = load_pretrained(state_dict)
model = model_architectures[architecture].from_state_dict(state_dict)
return model
model = model_architectures[architecture](*cfgs[architecture][quality], **kwargs)
return model
def bmshj2018_factorized(
quality, metric="mse", pretrained=False, progress=True, **kwargs
):
r"""Factorized Prior model from J. Balle, D. Minnen, S. Singh, S.J. Hwang,
N. Johnston: `"Variational Image Compression with a Scale Hyperprior"
<https://arxiv.org/abs/1802.01436>`_, Int Conf. on Learning Representations
(ICLR), 2018.
Args:
quality (int): Quality levels (1: lowest, highest: 8)
metric (str): Optimized metric, choose from ('mse')
pretrained (bool): If True, returns a pre-trained model
progress (bool): If True, displays a progress bar of the download to stderr
"""
if metric not in ("mse", "ms-ssim"):
raise ValueError(f'Invalid metric "{metric}"')
if quality < 1 or quality > 8:
raise ValueError(f'Invalid quality "{quality}", should be between (1, 8)')
return _load_model(
"bmshj2018-factorized", metric, quality, pretrained, progress, **kwargs
)
def bmshj2018_hyperprior(
quality, metric="mse", pretrained=False, progress=True, **kwargs
):
r"""Scale Hyperprior model from J. Balle, D. Minnen, S. Singh, S.J. Hwang,
N. Johnston: `"Variational Image Compression with a Scale Hyperprior"
<https://arxiv.org/abs/1802.01436>`_ Int. Conf. on Learning Representations
(ICLR), 2018.
Args:
quality (int): Quality levels (1: lowest, highest: 8)
metric (str): Optimized metric, choose from ('mse')
pretrained (bool): If True, returns a pre-trained model
progress (bool): If True, displays a progress bar of the download to stderr
"""
if metric not in ("mse", "ms-ssim"):
raise ValueError(f'Invalid metric "{metric}"')
if quality < 1 or quality > 8:
raise ValueError(f'Invalid quality "{quality}", should be between (1, 8)')
return _load_model(
"bmshj2018-hyperprior", metric, quality, pretrained, progress, **kwargs
)
def mbt2018_mean(quality, metric="mse", pretrained=False, progress=True, **kwargs):
r"""Scale Hyperprior with non zero-mean Gaussian conditionals from D.
Minnen, J. Balle, G.D. Toderici: `"Joint Autoregressive and Hierarchical
Priors for Learned Image Compression" <https://arxiv.org/abs/1809.02736>`_,
Adv. in Neural Information Processing Systems 31 (NeurIPS 2018).
Args:
quality (int): Quality levels (1: lowest, highest: 8)
metric (str): Optimized metric, choose from ('mse')
pretrained (bool): If True, returns a pre-trained model
progress (bool): If True, displays a progress bar of the download to stderr
"""
if metric not in ("mse",):
raise ValueError(f'Invalid metric "{metric}"')
if quality < 1 or quality > 8:
raise ValueError(f'Invalid quality "{quality}", should be between (1, 8)')
return _load_model("mbt2018-mean", metric, quality, pretrained, progress, **kwargs)
def mbt2018(quality, metric="mse", pretrained=False, progress=True, **kwargs):
r"""Joint Autoregressive Hierarchical Priors model from D.
Minnen, J. Balle, G.D. Toderici: `"Joint Autoregressive and Hierarchical
Priors for Learned Image Compression" <https://arxiv.org/abs/1809.02736>`_,
Adv. in Neural Information Processing Systems 31 (NeurIPS 2018).
Args:
quality (int): Quality levels (1: lowest, highest: 8)
metric (str): Optimized metric, choose from ('mse')
pretrained (bool): If True, returns a pre-trained model
progress (bool): If True, displays a progress bar of the download to stderr
"""
if metric not in ("mse",):
raise ValueError(f'Invalid metric "{metric}"')
if quality < 1 or quality > 8:
raise ValueError(f'Invalid quality "{quality}", should be between (1, 8)')
return _load_model("mbt2018", metric, quality, pretrained, progress, **kwargs)
def cheng2020_anchor(quality, metric="mse", pretrained=False, progress=True, **kwargs):
r"""Anchor model variant from `"Learned Image Compression with
Discretized Gaussian Mixture Likelihoods and Attention Modules"
<https://arxiv.org/abs/2001.01568>`_, by Zhengxue Cheng, Heming Sun, Masaru
Takeuchi, Jiro Katto.
Args:
quality (int): Quality levels (1: lowest, highest: 6)
metric (str): Optimized metric, choose from ('mse')
pretrained (bool): If True, returns a pre-trained model
progress (bool): If True, displays a progress bar of the download to stderr
"""
if metric not in ("mse",):
raise ValueError(f'Invalid metric "{metric}"')
if quality < 1 or quality > 6:
raise ValueError(f'Invalid quality "{quality}", should be between (1, 6)')
return _load_model(
"cheng2020-anchor", metric, quality, pretrained, progress, **kwargs
)
def cheng2020_attn(quality, metric="mse", pretrained=False, progress=True, **kwargs):
r"""Self-attention model variant from `"Learned Image Compression with
Discretized Gaussian Mixture Likelihoods and Attention Modules"
<https://arxiv.org/abs/2001.01568>`_, by Zhengxue Cheng, Heming Sun, Masaru
Takeuchi, Jiro Katto.
Args:
quality (int): Quality levels (1: lowest, highest: 6)
metric (str): Optimized metric, choose from ('mse')
pretrained (bool): If True, returns a pre-trained model
progress (bool): If True, displays a progress bar of the download to stderr
"""
if metric not in ("mse",):
raise ValueError(f'Invalid metric "{metric}"')
if quality < 1 or quality > 8:
raise ValueError(f'Invalid quality "{quality}", should be between (1, 8)')
return _load_model(
"cheng2020-attn", metric, quality, pretrained, progress, **kwargs
)
def invcompress(
quality, metric="mse", pretrained=False, progress=True, **kwargs
):
r"""Our InvCompress model
Args:
quality (int): Quality levels (1: lowest, highest: 8)
metric (str): Optimized metric, choose from ('mse')
pretrained (bool): If True, returns a pre-trained model
progress (bool): If True, displays a progress bar of the download to stderr
"""
if metric not in ("mse", "ms-ssim"):
raise ValueError(f'Invalid metric "{metric}"')
if quality < 1 or quality > 8:
raise ValueError(f'Invalid quality "{quality}", should be between (1, 13)')
if pretrained == True:
raise ValueError(f'Invalid pretrain "{pretrain}", not yet supported')
return _load_model(
"invcompress", metric, quality, pretrained, progress, **kwargs
)
| 37.045802 | 87 | 0.615221 |
79492f18a87359eb66e2044ef93033e382892b05
| 10,147 |
py
|
Python
|
psdash/node.py
|
luoyang-123/psdash-master_new
|
dd3b344a81a7f6a49db0ec3f91f23e0902d6e792
|
[
"CC0-1.0"
] | null | null | null |
psdash/node.py
|
luoyang-123/psdash-master_new
|
dd3b344a81a7f6a49db0ec3f91f23e0902d6e792
|
[
"CC0-1.0"
] | null | null | null |
psdash/node.py
|
luoyang-123/psdash-master_new
|
dd3b344a81a7f6a49db0ec3f91f23e0902d6e792
|
[
"CC0-1.0"
] | null | null | null |
# coding=UTF-8
import logging
import os
import platform
import psutil
import socket
import time
import zerorpc
from psdash.log import Logs
from psdash.helpers import socket_families, socket_types
from psdash.net import get_interface_addresses, NetIOCounters
logger = logging.getLogger("psdash.node")
class Node(object):
def __init__(self):
self._service = None
def get_id(self):
raise NotImplementedError
def _create_service(self):
raise NotImplementedError
def get_service(self):
if not self._service:
self._service = self._create_service()
return self._service
class RemoteNode(Node):
def __init__(self, name, host, port):
super(RemoteNode, self).__init__()
self.name = name
self.host = host
self.port = int(port)
self.last_registered = None
def get_id(self):
return '%s:%s' % (self.host, self.port)
def update_last_registered(self):
self.last_registered = int(time.time())
class LocalNode(Node):
def __init__(self):
super(LocalNode, self).__init__()
self.name = "psDash"
self.net_io_counters = NetIOCounters()
self.logs = Logs()
def get_id(self):
return 'localhost'
def _create_service(self):
return LocalService(self)
class LocalService(object):
def __init__(self, node):
self.node = node
def get_sysinfo(self):
uptime = int(time.time() - psutil.boot_time())
sysinfo = {
'uptime': uptime,
'hostname': socket.gethostname(),
'os': platform.platform(),
'load_avg': os.getloadavg(),
'num_cpus': psutil.cpu_count()
}
return sysinfo
def get_memory(self):
return psutil.virtual_memory()._asdict()
def get_swap_space(self):
sm = psutil.swap_memory()
swap = {
'total': sm.total,
'free': sm.free,
'used': sm.used,
'percent': sm.percent,
'swapped_in': sm.sin,
'swapped_out': sm.sout
}
return swap
def get_cpu(self):
return psutil.cpu_times_percent(0)._asdict()
def get_cpu_cores(self):
return [c._asdict() for c in psutil.cpu_times_percent(0, percpu=True)]
def get_disks(self, all_partitions=False):
disks = []
for dp in psutil.disk_partitions(all_partitions):
usage = psutil.disk_usage(dp.mountpoint)
disk = {
'device': dp.device,
'mountpoint': dp.mountpoint,
'type': dp.fstype,
'options': dp.opts,
'space_total': usage.total,
'space_used': usage.used,
'space_used_percent': usage.percent,
'space_free': usage.free
}
disks.append(disk)
return disks
def get_disks_counters(self, perdisk=True):
return dict((dev, c._asdict()) for dev, c in psutil.disk_io_counters(perdisk=perdisk).iteritems())
def get_users(self):
return [u._asdict() for u in psutil.users()]
def get_network_interfaces(self):
io_counters = self.node.net_io_counters.get()
addresses = get_interface_addresses()
netifs = {}
for addr in addresses:
c = io_counters.get(addr['name'])
if not c:
continue
netifs[addr['name']] = {
'name': addr['name'],
'ip': addr['ip'],
'bytes_sent': c['bytes_sent'],
'bytes_recv': c['bytes_recv'],
'packets_sent': c['packets_sent'],
'packets_recv': c['packets_recv'],
'errors_in': c['errin'],
'errors_out': c['errout'],
'dropped_in': c['dropin'],
'dropped_out': c['dropout'],
'send_rate': c['tx_per_sec'],
'recv_rate': c['rx_per_sec']
}
return netifs
def get_process_list(self):
process_list = []
for p in psutil.process_iter():
mem = p.memory_info()
# psutil throws a KeyError when the uid of a process is not associated with an user.
try:
username = p.username()
except KeyError:
username = None
proc = {
'pid': p.pid,
'name': p.name(),
'cmdline': ' '.join(p.cmdline()),
'user': username,
'status': p.status(),
'created': p.create_time(),
'mem_rss': mem.rss,
'mem_vms': mem.vms,
'mem_percent': p.memory_percent(),
'cpu_percent': p.cpu_percent(0)
}
process_list.append(proc)
return process_list
def get_process(self, pid):
p = psutil.Process(pid)
mem = p.memory_info_ex()
cpu_times = p.cpu_times()
# psutil throws a KeyError when the uid of a process is not associated with an user.
try:
username = p.username()
except KeyError:
username = None
return {
'pid': p.pid,
'ppid': p.ppid(),
'parent_name': p.parent().name() if p.parent() else '',
'name': p.name(),
'cmdline': ' '.join(p.cmdline()),
'user': username,
'uid_real': p.uids().real,
'uid_effective': p.uids().effective,
'uid_saved': p.uids().saved,
'gid_real': p.gids().real,
'gid_effective': p.gids().effective,
'gid_saved': p.gids().saved,
'status': p.status(),
'created': p.create_time(),
'terminal': p.terminal(),
'mem_rss': mem.rss,
'mem_vms': mem.vms,
'mem_shared': mem.shared,
'mem_text': mem.text,
'mem_lib': mem.lib,
'mem_data': mem.data,
'mem_dirty': mem.dirty,
'mem_percent': p.memory_percent(),
'cwd': p.cwd(),
'nice': p.nice(),
'io_nice_class': p.ionice()[0],
'io_nice_value': p.ionice()[1],
'cpu_percent': p.cpu_percent(0),
'num_threads': p.num_threads(),
'num_files': len(p.open_files()),
'num_children': len(p.children()),
'num_ctx_switches_invol': p.num_ctx_switches().involuntary,
'num_ctx_switches_vol': p.num_ctx_switches().voluntary,
'cpu_times_user': cpu_times.user,
'cpu_times_system': cpu_times.system,
'cpu_affinity': p.cpu_affinity()
}
def get_process_environment(self, pid):
with open('/proc/%d/environ' % pid) as f:
contents = f.read()
env_vars = dict(row.split('=', 1) for row in contents.split('\0') if '=' in row)
return env_vars
def get_process_threads(self, pid):
threads = []
proc = psutil.Process(pid)
for t in proc.threads():
thread = {
'id': t.id,
'cpu_time_user': t.user_time,
'cpu_time_system': t.system_time,
}
threads.append(thread)
return threads
def get_process_open_files(self, pid):
proc = psutil.Process(pid)
return [f._asdict() for f in proc.open_files()]
def get_process_connections(self, pid):
proc = psutil.Process(pid)
connections = []
for c in proc.connections(kind='all'):
conn = {
'fd': c.fd,
'family': socket_families[c.family],
'type': socket_types[c.type],
'local_addr_host': c.laddr[0] if c.laddr else None,
'local_addr_port': c.laddr[1] if c.laddr else None,
'remote_addr_host': c.raddr[0] if c.raddr else None,
'remote_addr_port': c.raddr[1] if c.raddr else None,
'state': c.status
}
connections.append(conn)
return connections
def get_process_memory_maps(self, pid):
return [m._asdict() for m in psutil.Process(pid).memory_maps()]
def get_process_children(self, pid):
proc = psutil.Process(pid)
children = []
for c in proc.children():
child = {
'pid': c.pid,
'name': c.name(),
'cmdline': ' '.join(c.cmdline()),
'status': c.status()
}
children.append(child)
return children
def get_connections(self, filters=None):
filters = filters or {}
connections = []
for c in psutil.net_connections('all'):
conn = {
'fd': c.fd,
'pid': c.pid,
'family': socket_families[c.family],
'type': socket_types[c.type],
'local_addr_host': c.laddr[0] if c.laddr else None,
'local_addr_port': c.laddr[1] if c.laddr else None,
'remote_addr_host': c.raddr[0] if c.raddr else None,
'remote_addr_port': c.raddr[1] if c.raddr else None,
'state': c.status
}
for k, v in filters.iteritems():
if v and conn.get(k) != v:
break
else:
connections.append(conn)
return connections
def get_logs(self):
available_logs = []
for log in self.node.logs.get_available():
try:
stat = os.stat(log.filename)
available_logs.append({
'path': log.filename.encode("utf-8"),
'size': stat.st_size,
'atime': stat.st_atime,
'mtime': stat.st_mtime
})
except OSError:
logger.info('Could not stat "%s", removing from available logs', log.filename)
self.node.logs.remove_available(log.filename)
return available_logs
| 30.748485 | 106 | 0.520449 |
79492f62b4e20ce1b041dd0a128d3221344f2a89
| 3,228 |
py
|
Python
|
tool/run/generate_image/main.py
|
yesii4u/covid19fks
|
329f83000d42264354f27d7b6ac3d7b481d99160
|
[
"MIT"
] | 33 |
2020-03-20T03:39:05.000Z
|
2021-02-26T05:05:30.000Z
|
tool/run/generate_image/main.py
|
yesii4u/covid19fks
|
329f83000d42264354f27d7b6ac3d7b481d99160
|
[
"MIT"
] | 80 |
2020-03-18T16:16:14.000Z
|
2022-02-11T08:56:08.000Z
|
tool/run/generate_image/main.py
|
yesii4u/covid19fks
|
329f83000d42264354f27d7b6ac3d7b481d99160
|
[
"MIT"
] | 16 |
2020-03-19T11:22:12.000Z
|
2021-01-20T12:20:29.000Z
|
import time
from flask import Flask, send_file
from selenium import webdriver
import chromedriver_binary # Adds chromedriver binary to path
from PIL import Image
from io import BytesIO
from google.cloud import storage
app = Flask(__name__)
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--hide-scrollbars")
chrome_options.add_argument("--lang=ja")
chrome_options.add_argument("--window-size=1200,1000")
chrome_options.add_argument("--no-sandbox")
# chrome_options.add_argument("--force-device-scale-factor=2")
GCS_BUCKET_ID = 'fukushima-covid19'
BASE_URL = 'https://fukushima-covid19.web.app/'
CARD_URL = BASE_URL + 'cards/{}/?embed=true'
CARD_IDS = [
'attributes-of-confirmed-cases',
'details-of-confirmed-cases',
'number-of-confirmed-cases',
'number-of-reports-to-covid19-consultation-desk',
'number-of-reports-to-covid19-telephone-advisory-center',
'number-of-tested'
]
def upload_png(bucket_name, destination_blob_name, image):
# storage_client = storage.Client.from_service_account_json('./cred.json')
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
blob.cache_control = 'max-age=60'
blob.upload_from_string(image, content_type='image/png')
def get_card_image(browser, id_name):
browser.get(CARD_URL.format(id_name))
time.sleep(5)
remove_share_btn = 'var elements = document.querySelectorAll(".Footer-Right"); for (let i = 0; i < elements.length; i++) {const element = elements[i];element.remove();}'
browser.execute_script(remove_share_btn)
add_prefix = 'var titles = document.querySelectorAll(".DataView-Title"); var prefNamePrefix = "福島:"; for (let i = 0; i < titles.length; i++) { const element = titles[i]; element.innerHTML = prefNamePrefix + element.innerHTML }'
browser.execute_script(add_prefix)
# TODO: Generate high resolution image
# change_origin = 'document.getElementById("app").style.transformOrigin = "top left"'
# browser.execute_script(change_origin)
# zoom_element = 'document.getElementById("app").style.transform = "scale(2)"'
# browser.execute_script(zoom_element)
return browser.find_element_by_id(id_name).screenshot_as_png
@app.route("/images/generate")
def generate_all_images():
# Initialize a new browser
browser = webdriver.Chrome(chrome_options=chrome_options)
browser.get(BASE_URL)
time.sleep(1)
for id_name in CARD_IDS:
image = get_card_image(browser, id_name)
upload_png(GCS_BUCKET_ID, 'images/{}.png'.format(id_name), image)
browser.close()
return 'OK'
@app.route("/image/generate/<id_name>")
def generate_image(id_name):
# Initialize a new browser
browser = webdriver.Chrome(chrome_options=chrome_options)
browser.get(BASE_URL)
time.sleep(1)
image = get_card_image(browser, id_name)
upload_png(GCS_BUCKET_ID, 'images/{}.png'.format(id_name), image)
browser.close()
tmp_file_name = './tmp_image_{}.png'.format(id_name)
with open(tmp_file_name, 'wb') as f:
f.write(image)
return send_file(tmp_file_name)
| 38.428571 | 231 | 0.73544 |
794933464587f9f653686326bb9c543f0be0c008
| 5,792 |
py
|
Python
|
optuna/visualization/_slice.py
|
captain-pool/optuna
|
2ae8c17afea54362460320870304c763e91c0596
|
[
"MIT"
] | null | null | null |
optuna/visualization/_slice.py
|
captain-pool/optuna
|
2ae8c17afea54362460320870304c763e91c0596
|
[
"MIT"
] | null | null | null |
optuna/visualization/_slice.py
|
captain-pool/optuna
|
2ae8c17afea54362460320870304c763e91c0596
|
[
"MIT"
] | null | null | null |
from typing import Callable
from typing import cast
from typing import List
from typing import Optional
from optuna.logging import get_logger
from optuna.study import Study
from optuna.trial import FrozenTrial
from optuna.trial import TrialState
from optuna.visualization._plotly_imports import _imports
from optuna.visualization._utils import _check_plot_args
from optuna.visualization._utils import _is_log_scale
from optuna.visualization._utils import COLOR_SCALE
if _imports.is_successful():
from optuna.visualization._plotly_imports import go
from optuna.visualization._plotly_imports import make_subplots
from optuna.visualization._plotly_imports import Scatter
_logger = get_logger(__name__)
def plot_slice(
study: Study,
params: Optional[List[str]] = None,
*,
target: Optional[Callable[[FrozenTrial], float]] = None,
target_name: str = "Objective Value",
) -> "go.Figure":
"""Plot the parameter relationship as slice plot in a study.
Note that, if a parameter contains missing values, a trial with missing values is not plotted.
Example:
The following code snippet shows how to plot the parameter relationship as slice plot.
.. plotly::
import optuna
def objective(trial):
x = trial.suggest_float("x", -100, 100)
y = trial.suggest_categorical("y", [-1, 0, 1])
return x ** 2 + y
sampler = optuna.samplers.TPESampler(seed=10)
study = optuna.create_study(sampler=sampler)
study.optimize(objective, n_trials=10)
fig = optuna.visualization.plot_slice(study, params=["x", "y"])
fig.show()
Args:
study:
A :class:`~optuna.study.Study` object whose trials are plotted for their target values.
params:
Parameter list to visualize. The default is all parameters.
target:
A function to specify the value to display. If it is :obj:`None` and ``study`` is being
used for single-objective optimization, the objective values are plotted.
.. note::
Specify this argument if ``study`` is being used for multi-objective optimization.
target_name:
Target's name to display on the axis label.
Returns:
A :class:`plotly.graph_objs.Figure` object.
Raises:
:exc:`ValueError`:
If ``target`` is :obj:`None` and ``study`` is being used for multi-objective
optimization.
"""
_imports.check()
_check_plot_args(study, target, target_name)
return _get_slice_plot(study, params, target, target_name)
def _get_slice_plot(
study: Study,
params: Optional[List[str]] = None,
target: Optional[Callable[[FrozenTrial], float]] = None,
target_name: str = "Objective Value",
) -> "go.Figure":
layout = go.Layout(title="Slice Plot")
trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE]
if len(trials) == 0:
_logger.warning("Your study does not have any completed trials.")
return go.Figure(data=[], layout=layout)
all_params = {p_name for t in trials for p_name in t.params.keys()}
if params is None:
sorted_params = sorted(all_params)
else:
for input_p_name in params:
if input_p_name not in all_params:
raise ValueError("Parameter {} does not exist in your study.".format(input_p_name))
sorted_params = sorted(set(params))
n_params = len(sorted_params)
if n_params == 1:
figure = go.Figure(
data=[_generate_slice_subplot(trials, sorted_params[0], target)], layout=layout
)
figure.update_xaxes(title_text=sorted_params[0])
figure.update_yaxes(title_text=target_name)
if _is_log_scale(trials, sorted_params[0]):
figure.update_xaxes(type="log")
else:
figure = make_subplots(rows=1, cols=len(sorted_params), shared_yaxes=True)
figure.update_layout(layout)
showscale = True # showscale option only needs to be specified once.
for i, param in enumerate(sorted_params):
trace = _generate_slice_subplot(trials, param, target)
trace.update(marker={"showscale": showscale}) # showscale's default is True.
if showscale:
showscale = False
figure.add_trace(trace, row=1, col=i + 1)
figure.update_xaxes(title_text=param, row=1, col=i + 1)
if i == 0:
figure.update_yaxes(title_text=target_name, row=1, col=1)
if _is_log_scale(trials, param):
figure.update_xaxes(type="log", row=1, col=i + 1)
if n_params > 3:
# Ensure that each subplot has a minimum width without relying on autusizing.
figure.update_layout(width=300 * n_params)
return figure
def _generate_slice_subplot(
trials: List[FrozenTrial],
param: str,
target: Optional[Callable[[FrozenTrial], float]],
) -> "Scatter":
if target is None:
def _target(t: FrozenTrial) -> float:
return cast(float, t.value)
target = _target
return go.Scatter(
x=[t.params[param] for t in trials if param in t.params],
y=[target(t) for t in trials if param in t.params],
mode="markers",
marker={
"line": {"width": 0.5, "color": "Grey"},
"color": [t.number for t in trials if param in t.params],
"colorscale": COLOR_SCALE,
"colorbar": {
"title": "#Trials",
"x": 1.0, # Offset the colorbar position with a fixed width `xpad`.
"xpad": 40,
},
},
showlegend=False,
)
| 34.070588 | 99 | 0.63346 |
7949340c1f676f4b04f1788c566c2281e8f3f1d3
| 14,142 |
py
|
Python
|
aen_CEM.py
|
mattolson93/Contrastive-Explanation-Method
|
62f2660895d42c08ab36f589c0f9ebf9bf25948c
|
[
"Apache-2.0"
] | null | null | null |
aen_CEM.py
|
mattolson93/Contrastive-Explanation-Method
|
62f2660895d42c08ab36f589c0f9ebf9bf25948c
|
[
"Apache-2.0"
] | null | null | null |
aen_CEM.py
|
mattolson93/Contrastive-Explanation-Method
|
62f2660895d42c08ab36f589c0f9ebf9bf25948c
|
[
"Apache-2.0"
] | null | null | null |
## aen_attack.py -- attack a network optimizing elastic-net distance with an en decision rule
## when autoencoder loss is applied
##
## Copyright (C) 2018, IBM Corp
## Chun-Chen Tu <timtu@umich.edu>
## PaiShun Ting <paishun@umich.edu>
## Pin-Yu Chen <Pin-Yu.Chen@ibm.com>
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import sys
import tensorflow as tf
import numpy as np
class AEADEN:
def __init__(self, sess, model, mode, AE, batch_size, kappa, init_learning_rate,
binary_search_steps, max_iterations, initial_const, beta, gamma, shape_type = 0):
image_size, num_channels, nun_classes = model.image_size, model.num_channels, model.num_labels
if shape_type == 0:
shape = (batch_size, image_size, image_size, num_channels )
else:
shape = (batch_size, num_channels, image_size, image_size )
self.sess = sess
self.INIT_LEARNING_RATE = init_learning_rate
self.MAX_ITERATIONS = max_iterations
self.BINARY_SEARCH_STEPS = binary_search_steps
self.kappa = kappa
self.init_const = initial_const
self.batch_size = batch_size
self.AE = AE
self.mode = mode
self.beta = beta
self.gamma = gamma
# these are variables to be more efficient in sending data to tf
self.orig_img = tf.Variable(np.zeros(shape), dtype=tf.float32)
self.adv_img = tf.Variable(np.zeros(shape), dtype=tf.float32)
self.adv_img_s = tf.Variable(np.zeros(shape), dtype=tf.float32)
self.target_lab = tf.Variable(np.zeros((batch_size,nun_classes)), dtype=tf.float32)
self.const = tf.Variable(np.zeros(batch_size), dtype=tf.float32)
self.global_step = tf.Variable(0.0, trainable=False)
# and here's what we use to assign them
self.assign_orig_img = tf.placeholder(tf.float32, shape)
self.assign_adv_img = tf.placeholder(tf.float32, shape)
self.assign_adv_img_s = tf.placeholder(tf.float32, shape)
self.assign_target_lab = tf.placeholder(tf.float32, (batch_size,nun_classes))
self.assign_const = tf.placeholder(tf.float32, [batch_size])
"""Fast Iterative Soft Thresholding"""
"""--------------------------------"""
self.zt = tf.divide(self.global_step, self.global_step+tf.cast(3, tf.float32))
cond1 = tf.cast(tf.greater(tf.subtract(self.adv_img_s, self.orig_img),self.beta), tf.float32)
cond2 = tf.cast(tf.less_equal(tf.abs(tf.subtract(self.adv_img_s,self.orig_img)),self.beta), tf.float32)
cond3 = tf.cast(tf.less(tf.subtract(self.adv_img_s, self.orig_img),tf.negative(self.beta)), tf.float32)
upper = tf.minimum(tf.subtract(self.adv_img_s, self.beta), tf.cast(0.5, tf.float32))
lower = tf.maximum(tf.add(self.adv_img_s, self.beta), tf.cast(-0.5, tf.float32))
self.assign_adv_img = tf.multiply(cond1,upper)+tf.multiply(cond2,self.orig_img)+tf.multiply(cond3,lower)
cond4=tf.cast(tf.greater(tf.subtract( self.assign_adv_img, self.orig_img),0), tf.float32)
cond5=tf.cast(tf.less_equal(tf.subtract( self.assign_adv_img,self.orig_img),0), tf.float32)
if self.mode == "PP":
self.assign_adv_img = tf.multiply(cond5,self.assign_adv_img)+tf.multiply(cond4,self.orig_img)
elif self.mode == "PN":
self.assign_adv_img = tf.multiply(cond4,self.assign_adv_img)+tf.multiply(cond5,self.orig_img)
self.assign_adv_img_s = self.assign_adv_img+tf.multiply(self.zt, self.assign_adv_img-self.adv_img)
cond6=tf.cast(tf.greater(tf.subtract( self.assign_adv_img_s, self.orig_img),0), tf.float32)
cond7=tf.cast(tf.less_equal(tf.subtract( self.assign_adv_img_s,self.orig_img),0), tf.float32)
if self.mode == "PP":
self.assign_adv_img_s = tf.multiply(cond7, self.assign_adv_img_s)+tf.multiply(cond6,self.orig_img)
elif self.mode == "PN":
self.assign_adv_img_s = tf.multiply(cond6, self.assign_adv_img_s)+tf.multiply(cond7,self.orig_img)
self.adv_updater = tf.assign(self.adv_img, self.assign_adv_img)
self.adv_updater_s = tf.assign(self.adv_img_s, self.assign_adv_img_s)
"""--------------------------------"""
# prediction BEFORE-SOFTMAX of the model
self.delta_img = self.orig_img-self.adv_img
self.delta_img_s = self.orig_img-self.adv_img_s
if self.mode == "PP":
self.ImgToEnforceLabel_Score = model.predict(self.delta_img)
self.ImgToEnforceLabel_Score_s = model.predict(self.delta_img_s)
elif self.mode == "PN":
self.ImgToEnforceLabel_Score = model.predict(self.adv_img)
self.ImgToEnforceLabel_Score_s = model.predict(self.adv_img_s)
# distance to the input data
self.L2_dist = tf.reduce_sum(tf.square(self.delta_img),[1,2,3])
self.L2_dist_s = tf.reduce_sum(tf.square(self.delta_img_s),[1,2,3])
self.L1_dist = tf.reduce_sum(tf.abs(self.delta_img),[1,2,3])
self.L1_dist_s = tf.reduce_sum(tf.abs(self.delta_img_s),[1,2,3])
self.EN_dist = self.L2_dist + tf.multiply(self.L1_dist, self.beta)
self.EN_dist_s = self.L2_dist_s + tf.multiply(self.L1_dist_s, self.beta)
# compute the probability of the label class versus the maximum other
self.target_lab_score = tf.reduce_sum((self.target_lab)*self.ImgToEnforceLabel_Score,1)
target_lab_score_s = tf.reduce_sum((self.target_lab)*self.ImgToEnforceLabel_Score_s,1)
self.max_nontarget_lab_score = tf.reduce_max((1-self.target_lab)*self.ImgToEnforceLabel_Score - (self.target_lab*10000),1)
max_nontarget_lab_score_s = tf.reduce_max((1-self.target_lab)*self.ImgToEnforceLabel_Score_s - (self.target_lab*10000),1)
if self.mode == "PP":
Loss_Attack = tf.maximum(0.0, self.max_nontarget_lab_score - self.target_lab_score + self.kappa)
Loss_Attack_s = tf.maximum(0.0, max_nontarget_lab_score_s - target_lab_score_s + self.kappa)
elif self.mode == "PN":
Loss_Attack = tf.maximum(0.0, -self.max_nontarget_lab_score + self.target_lab_score + self.kappa)
Loss_Attack_s = tf.maximum(0.0, -max_nontarget_lab_score_s + target_lab_score_s + self.kappa)
# sum up the losses
self.Loss_L1Dist = tf.reduce_sum(self.L1_dist)
self.Loss_L1Dist_s = tf.reduce_sum(self.L1_dist_s)
self.Loss_L2Dist = tf.reduce_sum(self.L2_dist)
self.Loss_L2Dist_s = tf.reduce_sum(self.L2_dist_s)
self.Loss_Attack = tf.reduce_sum(self.const*Loss_Attack)
self.Loss_Attack_s = tf.reduce_sum(self.const*Loss_Attack_s)
if self.mode == "PP":
self.Loss_AE_Dist = self.gamma*tf.square(tf.norm(self.AE(self.delta_img)-self.delta_img))
self.Loss_AE_Dist_s = self.gamma*tf.square(tf.norm(self.AE(self.delta_img)-self.delta_img_s))
elif self.mode == "PN":
self.Loss_AE_Dist = self.gamma*tf.square(tf.norm(self.AE(self.adv_img)-self.adv_img))
self.Loss_AE_Dist_s = self.gamma*tf.square(tf.norm(self.AE(self.adv_img_s)-self.adv_img_s))
self.Loss_ToOptimize = self.Loss_Attack_s + self.Loss_L2Dist_s + self.Loss_AE_Dist_s
self.Loss_Overall = self.Loss_Attack + self.Loss_L2Dist + self.Loss_AE_Dist + tf.multiply(self.beta, self.Loss_L1Dist)
self.learning_rate = tf.train.polynomial_decay(self.INIT_LEARNING_RATE, self.global_step, self.MAX_ITERATIONS, 0, power=0.5)
optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
start_vars = set(x.name for x in tf.global_variables())
self.train = optimizer.minimize(self.Loss_ToOptimize, var_list=[self.adv_img_s], global_step=self.global_step)
end_vars = tf.global_variables()
new_vars = [x for x in end_vars if x.name not in start_vars]
# these are the variables to initialize when we run
self.setup = []
self.setup.append(self.orig_img.assign(self.assign_orig_img))
self.setup.append(self.target_lab.assign(self.assign_target_lab))
self.setup.append(self.const.assign(self.assign_const))
self.setup.append(self.adv_img.assign(self.assign_adv_img))
self.setup.append(self.adv_img_s.assign(self.assign_adv_img_s))
self.init = tf.variables_initializer(var_list=[self.global_step]+[self.adv_img_s]+[self.adv_img]+new_vars)
def attack(self, imgs, labs):
def compare(x,y):
if not isinstance(x, (float, int, np.int64)):
x = np.copy(x)
# x[y] -= self.kappa if self.PP else -self.kappa
if self.mode == "PP":
x[y] -= self.kappa
elif self.mode == "PN":
x[y] += self.kappa
x = np.argmax(x)
if self.mode == "PP":
return x==y
else:
return x!=y
batch_size = self.batch_size
# set the lower and upper bounds accordingly
Const_LB = np.zeros(batch_size)
CONST = np.ones(batch_size)*self.init_const
Const_UB = np.ones(batch_size)*1e10
# the best l2, score, and image attack
overall_best_dist = [1e10]*batch_size
overall_best_attack = [np.zeros(imgs[0].shape)]*batch_size
for binary_search_steps_idx in range(self.BINARY_SEARCH_STEPS):
# completely reset adam's internal state.
self.sess.run(self.init)
#TODO: REMOVE THIS HACK CAUSE THE AUTOENCODER ISNT TRAINED
self.sess.run(tf.global_variables_initializer())
img_batch = imgs[:batch_size]
label_batch = labs[:batch_size]
current_step_best_dist = [1e10]*batch_size
current_step_best_score = [-1]*batch_size
# set the variables so that we don't have to send them over again
self.sess.run(self.setup, {self.assign_orig_img: img_batch,
self.assign_target_lab: label_batch,
self.assign_const: CONST,
self.assign_adv_img: img_batch,
self.assign_adv_img_s: img_batch})
for iteration in range(self.MAX_ITERATIONS):
# perform the attack
self.sess.run([self.train])
self.sess.run([self.adv_updater, self.adv_updater_s])
Loss_Overall, Loss_EN, OutputScore, adv_img = self.sess.run([self.Loss_Overall, self.EN_dist, self.ImgToEnforceLabel_Score, self.adv_img])
Loss_Attack, Loss_L2Dist, Loss_L1Dist, Loss_AE_Dist = self.sess.run([self.Loss_Attack, self.Loss_L2Dist, self.Loss_L1Dist, self.Loss_AE_Dist])
target_lab_score, max_nontarget_lab_score_s = self.sess.run([self.target_lab_score, self.max_nontarget_lab_score])
if iteration%(self.MAX_ITERATIONS//10) == 0:
print("iter:{} const:{}". format(iteration, CONST))
print("Loss_Overall:{:.4f}, Loss_Attack:{:.4f}". format(Loss_Overall, Loss_Attack))
print("Loss_L2Dist:{:.4f}, Loss_L1Dist:{:.4f}, AE_loss:{}". format(Loss_L2Dist, Loss_L1Dist, Loss_AE_Dist))
print("target_lab_score:{:.4f}, max_nontarget_lab_score:{:.4f}". format(target_lab_score[0], max_nontarget_lab_score_s[0]))
print("")
sys.stdout.flush()
for batch_idx,(the_dist, the_score, the_adv_img) in enumerate(zip(Loss_EN, OutputScore, adv_img)):
if the_dist < current_step_best_dist[batch_idx] and compare(the_score, np.argmax(label_batch[batch_idx])):
current_step_best_dist[batch_idx] = the_dist
current_step_best_score[batch_idx] = np.argmax(the_score)
if the_dist < overall_best_dist[batch_idx] and compare(the_score, np.argmax(label_batch[batch_idx])):
overall_best_dist[batch_idx] = the_dist
overall_best_attack[batch_idx] = the_adv_img
# adjust the constant as needed
for batch_idx in range(batch_size):
if compare(current_step_best_score[batch_idx], np.argmax(label_batch[batch_idx])) and current_step_best_score[batch_idx] != -1:
# success, divide const by two
Const_UB[batch_idx] = min(Const_UB[batch_idx],CONST[batch_idx])
if Const_UB[batch_idx] < 1e9:
CONST[batch_idx] = (Const_LB[batch_idx] + Const_UB[batch_idx])/2
else:
# failure, either multiply by 10 if no solution found yet
# or do binary search with the known upper bound
Const_LB[batch_idx] = max(Const_LB[batch_idx],CONST[batch_idx])
if Const_UB[batch_idx] < 1e9:
CONST[batch_idx] = (Const_LB[batch_idx] + Const_UB[batch_idx])/2
else:
CONST[batch_idx] *= 10
# return the best solution found
overall_best_attack = overall_best_attack[0]
return overall_best_attack.reshape((1,) + overall_best_attack.shape)
| 56.795181 | 159 | 0.633008 |
79493424107ef0b7578cf24e86902a751845928c
| 261 |
py
|
Python
|
axon/data/__init__.py
|
CONABIO-audio/axon
|
a08cdac7b7b531ea2218ef8866f8125560bf6414
|
[
"BSD-4-Clause"
] | null | null | null |
axon/data/__init__.py
|
CONABIO-audio/axon
|
a08cdac7b7b531ea2218ef8866f8125560bf6414
|
[
"BSD-4-Clause"
] | 3 |
2020-03-31T11:11:07.000Z
|
2021-08-23T20:37:40.000Z
|
axon/data/__init__.py
|
CONABIO-audio/axon
|
a08cdac7b7b531ea2218ef8866f8125560bf6414
|
[
"BSD-4-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Data module.
This module defines the basic building block of processing pipelines: data.
Data objects have a definite data type and may have differents ways of loading
and saving to external storages (database, filesystem, etc).
"""
| 32.625 | 78 | 0.750958 |
794935c9e371f071965a2dcb411ee6c99834aab3
| 5,067 |
py
|
Python
|
todocli/tests/test_todo/test_configmenusetup.py
|
BalenD/TODO-cli
|
a966800d36475c29ed8a3d92dcd0aa0c37018d61
|
[
"MIT"
] | null | null | null |
todocli/tests/test_todo/test_configmenusetup.py
|
BalenD/TODO-cli
|
a966800d36475c29ed8a3d92dcd0aa0c37018d61
|
[
"MIT"
] | null | null | null |
todocli/tests/test_todo/test_configmenusetup.py
|
BalenD/TODO-cli
|
a966800d36475c29ed8a3d92dcd0aa0c37018d61
|
[
"MIT"
] | 2 |
2019-05-13T09:39:27.000Z
|
2019-05-16T19:41:08.000Z
|
from unittest import mock
from todocli.todo.configmenusetup import Setup
from argparse import Namespace
import pytest
class TestConfigMenuSetup(object):
config_file_path = 'pathfilehere'
config_setup = Setup(config_file_path)
def test_ConfigMenuStart(self):
with mock.patch('builtins.input', return_value='y'):
created_config_obj = self.config_setup.config_menu_start()
assert isinstance(created_config_obj, dict)
assert created_config_obj['names'] == 'y'
assert created_config_obj['is_folder'] == True
assert isinstance(created_config_obj['extensions'], list)
assert created_config_obj['extensions'][0] == 'y'
# get folder or file name function
def test_GetFolderOrFileName(self):
with mock.patch('builtins.input', return_value='fileOrFolderName'):
assert self.config_setup.__get_folder_or_file_name__() == 'fileOrFolderName'
# where is folder is true
def test_GetIsFolderTrue(self):
with mock.patch('builtins.input', return_value="y"):
assert self.config_setup.__get_is_folder__() == True
# where is folder is false
def test_GetIsFolderFalse(self):
with mock.patch('builtins.input', return_value="n"):
assert self.config_setup.__get_is_folder__() == False
def test_GetExtensions(self):
with mock.patch('builtins.input', return_value=".py"):
assert self.config_setup.__get_extensions__() == '.py'
def test_CreateConfigObject(self):
createdObject = self.config_setup.create_config_object(' FileNames ', True, '.py .c')
assert isinstance(createdObject, dict)
assert createdObject['names'] == 'FileNames'
assert isinstance(createdObject['names'], str)
assert isinstance(createdObject['is_folder'], bool)
assert createdObject['is_folder'] == True
assert isinstance(createdObject['extensions'], list)
assert createdObject['extensions'][0] == ".py"
assert createdObject['extensions'][1] == ".c"
def test_CombineConfigurations(self):
obj1 = {
'names': None,
'is_folder': True,
'extensions': ['.py', '.c']
}
obj2 = {
'names': 'FileName',
'is_folder': None,
'extensions': None
}
result = self.config_setup.combine_configurations(obj1, obj2)
assert isinstance(result, Namespace)
assert result.names == 'FileName'
assert result.is_folder == True
assert isinstance(result.extensions, list)
assert result.extensions[0] == '.py'
assert result.extensions[1] == '.c'
def test_LoadconfigFromFile(self):
jsonStr = '{"extensions": [".py", ".c"], "is_folder": null, "names": "FileName"}'
mocked_file = mock.mock_open(read_data=jsonStr)
with mock.patch('todocli.todo.configmenusetup.open', mocked_file, create=True):
loaded_config = self.config_setup.load_config_from_file()
assert isinstance(loaded_config, dict)
assert loaded_config['names'] == 'FileName'
assert isinstance(loaded_config['names'], str)
assert loaded_config['is_folder'] == None
assert isinstance(loaded_config['extensions'], list)
assert loaded_config['extensions'][0] == '.py'
assert loaded_config['extensions'][1] == '.c'
def test_LoadConfigFromFileWithoutNameProperty(self):
jsonStr = '{"extensions": [".py", ".c"], "is_folder": null}'
mocked_file = mock.mock_open(read_data=jsonStr)
with mock.patch('todocli.todo.configmenusetup.open', mocked_file, create=True):
loaded_config = self.config_setup.load_config_from_file()
assert loaded_config['names'] == None
def test_LoadConfigFromFileError(self):
jsonStr = '{"extensions": [".py", ".c"], "is_folder": null, "names": "FileName"}'
mocked_file = mock.mock_open(read_data=jsonStr)
mocked_file.side_effect = FileNotFoundError
with mock.patch('todocli.todo.configmenusetup.open', mocked_file, create=True):
with pytest.raises(FileNotFoundError):
self.config_setup.load_config_from_file()
def test_PrintToFile(self):
jsonStr = '{"extensions": [".py", ".c"], "is_folder": null, "names": "FileName"}'
mocked_file = mock.mock_open(read_data=jsonStr)
with mock.patch('todocli.todo.configmenusetup.open', mocked_file, create=True):
self.config_setup.print_to_file(jsonStr)
def test_PrintToFileError(self):
jsonStr = '{"extensions": [".py", ".c"], "is_folder": null, "names": "FileName"}'
mocked_file = mock.mock_open(read_data=jsonStr)
mocked_file.side_effect = OSError
with mock.patch('todocli.todo.configmenusetup.open', mocked_file, create=True):
with pytest.raises(OSError):
self.config_setup.print_to_file(jsonStr)
| 42.579832 | 95 | 0.642787 |
79493603f870fa35197be8383833265cc30eebdb
| 23,447 |
py
|
Python
|
03_Framework/CrawlerChrome.py
|
awareseven/Reproducibility-and-Replicability-of-Web-Measurement-Studies
|
38953c70a9ab03e1d29e4f9c6da13ffcaaeac84b
|
[
"Apache-2.0"
] | 3 |
2022-01-27T07:36:24.000Z
|
2022-02-22T09:32:53.000Z
|
03_Framework/CrawlerChrome.py
|
awareseven/Reproducibility-and-Replicability-of-Web-Measurement-Studies
|
38953c70a9ab03e1d29e4f9c6da13ffcaaeac84b
|
[
"Apache-2.0"
] | null | null | null |
03_Framework/CrawlerChrome.py
|
awareseven/Reproducibility-and-Replicability-of-Web-Measurement-Studies
|
38953c70a9ab03e1d29e4f9c6da13ffcaaeac84b
|
[
"Apache-2.0"
] | 1 |
2022-02-02T08:21:39.000Z
|
2022-02-02T08:21:39.000Z
|
from func_timeout import func_timeout, FunctionTimedOut, func_set_timeout
from logging import root
from tmp.path import find_cookies_path
from Ops import delFolder, delProfileFolder, editLogQueue, isThirdParty, newLogQueue, terminateProcessBySiteID, timestamp2Datetime, LocalStorage, visitLogNew, visitLogUpdate
from re import split, sub
from setup import getConfig, getMode, getDriverPath
from PushOps import execBQRows, pushError, stream2BQ
from Objects import VisitData, SiteData, QueueItem
#from selenium import webdriver
from seleniumwire import webdriver # Import from seleniumwire, to get requests
import time
from selenium.webdriver.chrome.options import Options
import sqlite3
import os
from DBOps import DBOps
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import tldextract
from urllib.parse import urlparse, unquote
import random
from datetime import datetime, timedelta
from selenium.common.exceptions import (
MoveTargetOutOfBoundsException,
TimeoutException,
WebDriverException,
)
import threading
from chrome_cookiejar import ChromeCookieJar
#from DataPreProcessingOps import getEasyListRules
from adblockparser import AdblockRules
rules = None
""""
def loadRules():
global rules
rules=AdblockRules(getEasyListRules(), use_re2=True,
max_mem=512*1024*1024)
"""
#rules = AdblockRules(getEasyListRules(), use_re2=True, max_mem=512*1024*1024)
param_root_site_url = ''
param_root_site_id = ''
def loadBrowser(root_site_id):
p_mode = getMode()
# wire_options = {
# 'enable_har': True,
# 'disable_encoding': True # Ask the server not to compress the response
# }
browser_type = p_mode.split('_')[0]
browser_config = p_mode.split('_')[1]
profile_path = getSeleniumProfilePath(root_site_id)
options = Options()
options.add_argument("--log-level=3")
options.add_argument("user-data-dir=" + profile_path)
options_wire = {
'request_storage_base_dir': getSeleniumProfilePath(root_site_id) + '/.storage/'}
# CHROME: normal
if browser_type == 'chrome':
pass
elif browser_type == 'chromeheadless':
options.add_argument("no-sandbox")
options.add_argument("headless")
options.add_argument("user-agent="+ getConfig('user_agent_chrome'))
resolution = getConfig('resolution')
try:
driver = webdriver.Chrome(
executable_path=getDriverPath(), chrome_options=options, seleniumwire_options=options_wire)
driver.set_window_size(resolution[0], resolution[1])
# driver.set_page_load_timeout(getConfig('timeout'))
# driver.implicitly_wait(30)
except:
time.sleep(2)
driver = webdriver.Chrome(
executable_path=getDriverPath(), chrome_options=options, seleniumwire_options=options_wire)
driver.set_window_size(resolution[0], resolution[1])
# driver.set_page_load_timeout(getConfig('timeout'))
# driver.implicitly_wait(30)
return driver
"""
elif browser_type == 'chrome-headless':
options = Options()
options.add_argument("--log-level=3")
if browser_config == 'mobile':
mobile_emulation = {"deviceName": "iPhone X"}
options.add_experimental_option(
"mobileEmulation", mobile_emulation)
elif browser_config == 'accept-cookies':
options.add_extension(
os.getcwd() + '/resources/extensions/accept_cookies.crx')
options.add_argument("user-data-dir=" +
getSeleniumProfilePath(root_site_id))
driver = webdriver.Chrome(
executable_path=getDriverPath(), chrome_options=options)
"""
def getRequest(input_driver, current_url, root_site_url, root_site_id, subpage_id):
reqList = []
for r in input_driver.requests:
req = {}
req['method'] = r.method
# parse headers
headers = str(r.headers)
splitted_headers = headers.split('\n')
list_headers = []
host = ['host', urlparse(r.url).netloc]
list_headers.append(host)
for item in splitted_headers:
item_split = item.split(':')
header_name = item_split[0]
header_value = ':'.join(item_split[1:])
header_pair = [header_name, header_value]
if header_pair != ['', '']:
list_headers.append(header_pair)
req['headers'] = str(list_headers)
req['url'] = r.url
req['time_stamp'] = str(r.date)
if r.headers.get('X-Requested-With'):
req['is_XHR'] = 1
else:
req['is_XHR'] = 0
if r.headers.get('Referer'):
req['referrer'] = r.headers.get('Referer')
else:
req['referrer'] = None
if r.headers['Upgrade'] == 'websocket':
req['is_websocket'] = 1
else:
req['is_websocket'] = 0
content_hash = None
try:
import hashlib
content_hash = hashlib.sha1(
input_driver.page_source.encode()).hexdigest()
except:
pass
req['content_hash'] = content_hash
req['browser_id'] = getMode()
req['is_third_party_channel'] = isThirdParty(root_site_url, r.url)
req['is_third_party_to_top_window'] = None
req['resource_type'] = None
req['top_level_url'] = current_url # input_driver.current_url
req['site_id'] = root_site_id
req['visit_id'] = str(root_site_id) + '-' + str(subpage_id)
req['subpage_id'] = subpage_id
""""
while(rules==None):
time.sleep(0.3)
try:
req['is_tracker'] = int(rules.should_block(r.url))
except:
pass"""
etld = tldextract.extract(r.url)
req['etld'] = etld.domain + '.' + etld.suffix
reqList.append(req)
return reqList
def getResponses(input_driver, root_site_id, subpage_id):
resList = []
for r in input_driver.requests:
if r.response:
res = {}
res['method'] = r.method
# parse headers
headers = str(r.response.headers)
splitted_headers = headers.split('\n')
list_headers = []
for item in splitted_headers:
item_split = item.split(':')
header_name = item_split[0]
header_value = ':'.join(item_split[1:])
header_pair = [header_name, header_value]
if header_pair != ['', '']:
list_headers.append(header_pair)
res['headers'] = str(list_headers)
res['url'] = r.url
res['time_stamp'] = str(r.response.date)
res['response_status'] = r.response.status_code
res['browser_id'] = getMode()
res['response_status_text'] = r.response.reason
# r.response.body # FIXME: delivered as byte and can't always be decoded (e.g., gzip etc...)
res['content_hash'] = None
res['site_id'] = root_site_id
res['subpage_id'] = subpage_id
res['visit_id'] = str(root_site_id) + '-' + str(subpage_id)
etld = tldextract.extract(r.url)
res['etld'] = etld.domain + '.' + etld.suffix
resList.append(res)
return resList
def getCookies(root_site_id, root_site_url, cookiesFromVisits=None, onlyName=False):
global param_root_site_id
path = getSeleniumProfilePath(root_site_id) + '/Default/Cookies'
try:
sqliteConnection = sqlite3.connect(path)
cursor = sqliteConnection.cursor()
rows = cursor.execute(
"SELECT expires_utc, is_secure, is_httponly, samesite, name, host_key, path,creation_utc, encrypted_value FROM cookies").fetchall()
except:
pushError(root_site_id, 'cookie_sql')
sqliteConnection.close()
if onlyName:
cookieList = []
for r in rows:
cookieList.append(r[4])
return cookieList
else:
cookieList = []
try:
cookiejar = ChromeCookieJar(path)
for r in rows:
try:
cookie = {}
cookie['expiry'] = timestamp2Datetime(r[0])
cookie['is_secure'] = r[1]
cookie['is_http_only'] = r[2]
cookie['same_site'] = r[3]
cookie['name'] = r[4]
cookie['host'] = r[5]
cookie['path'] = r[6]
cookie['time_stamp'] = timestamp2Datetime(r[7])
cookie['browser_id'] = getMode()
cookie['site_id'] = root_site_id
cookie['is_host_only'] = None # FIXME:
cookie['is_session'] = None # FIXME:
# cookie['first_party_domain'] = None # FIXME:
cookie['is_third_party'] = isThirdParty(
root_site_url, r[5])
cookie['value'] = ''
try:
for c in cookiejar:
if c.name == cookie['name'] and c.domain == cookie['host']:
cookie['value'] = c.value
except:
cookie['value'] = '[error]'
pass
if cookiesFromVisits is None:
cookie['visit_id'] = str(root_site_id) + '_0'
else:
try:
for item in cookiesFromVisits:
if r[4] in item[1]:
cookie['visit_id'] = str(
root_site_id) + '_' + str(item[0])
break
except:
cookie['visit_id'] = str(root_site_id) + '_0'
cookieList.append(cookie)
except Exception as e:
print('err_cookie', e)
pushError(root_site_id, 'cookie')
finally:
continue
except:
pushError(root_site_id, 'cookie_extract')
return cookieList
def getLocalStorage(root_site_id, driver):
"""
try:
driver=loadBrowser(root_site_id)
except:
pass
try:
func_timeout(3, getURL, args=(driver,root_site_url))
except (FunctionTimedOut, TimeoutException):
driver.execute_script("window.stop();")
tab_restart_browser(driver)
pass
except Exception as e:
pass
"""
locList = []
try:
storage = LocalStorage(driver)
for key in storage.items():
ls = {}
ls['browser_id'] = getMode()
ls['key'] = key
ls['value'] = storage.get(key)
ls['site_id'] = root_site_id
locList.append(ls)
except:
# pushError(root_site_id,'getLocalStorage_chrome')
# driver.quit()
str()
return locList
def doInteraction(input_driver):
from selenium.webdriver.common.keys import Keys
element_id = 'body'
try:
input_driver.find_element_by_css_selector(
element_id).send_keys(Keys.PAGE_DOWN)
except:
element_id = 'html'
try:
# simulate human interactions
input_driver.find_element_by_css_selector(
element_id).send_keys(Keys.PAGE_DOWN)
time.sleep(0.3)
input_driver.find_element_by_css_selector(
element_id).send_keys(Keys.PAGE_DOWN)
time.sleep(0.7)
input_driver.find_element_by_css_selector(
element_id).send_keys(Keys.PAGE_DOWN)
time.sleep(0.2)
input_driver.find_element_by_css_selector(
element_id).send_keys(Keys.TAB)
time.sleep(0.3)
input_driver.find_element_by_css_selector(
element_id).send_keys(Keys.TAB)
time.sleep(0.3)
input_driver.find_element_by_css_selector(
element_id).send_keys(Keys.TAB)
time.sleep(0.5)
input_driver.find_element_by_css_selector(
element_id).send_keys(Keys.END)
time.sleep(1.3)
except:
pass
"""
https://
http://
"""
def getUrl2(driver, url, state):
#print('I HAVE')
# print(driver.requests)
# print('DELETED!')
del driver.requests # deletes default chrome
driver.get(url)
try:
WebDriverWait(driver, 0.5).until(EC.alert_is_present())
alert = driver.switch_to.alert
alert.dismiss()
time.sleep(1)
except (TimeoutException, WebDriverException):
pass
#print('URL loaded! ', url)
state[0] = True
def getURL(driver, url):
from collections import deque
state = deque(maxlen=1)
state.append(False)
visit_start = datetime.now()
t = threading.Thread(target=getUrl2,
args=(driver, url, state))
t.start()
while(True):
if state[0] == True:
return
else:
final = (datetime.now() - visit_start)
total_sec = final.total_seconds()
# print(str(visit_start), ' - ', str(total_sec),
# ' took and url loading... ', url,)
#print('URL still loading - ', str(total_sec),'sec. - ', url)
if total_sec >= getConfig('timeout'):
# driver.execute_script("window.stop();")
print('Raise TimeoutException! ', url)
del state
raise TimeoutException
else:
pass
time.sleep(1)
def visitSite(url, root_site_url, root_site_id, is_subpage, p_visit_id, subpage_id, is_last_visit):
# We follow OpenWPM visit-strategy here! (s. GetCommand & browser_commands)
visit = VisitData(url)
visit.timeout = 0
try:
driver = loadBrowser(root_site_id)
visit.state = 1
except:
pushError(root_site_id, 'loadBrowser')
visit.state = -1
try:
visitLogNew(root_site_id, url, subpage_id)
#func_timeout(getConfig('timeout'), getURL, args=(driver, url))
getURL(driver, url)
except (FunctionTimedOut, TimeoutException):
visit.timeout = 1
except Exception as e:
pushError(root_site_id, 'getURL-Timeout')
visit.state = -1
finally:
visitLogUpdate(root_site_id, subpage_id,
state=visit.state, timeout=visit.timeout)
try:
try:
if '_interaction' in getMode():
doInteraction(driver)
except:
pushError(root_site_id, 'doInteraction')
if is_last_visit:
visit.localStorage = getLocalStorage(root_site_id, driver)
try:
current_url = driver.current_url
tab_restart_browser(driver)
except:
pass
visit.visit_id = p_visit_id
try:
visit.requests = getRequest(
driver, current_url, root_site_url=root_site_url, root_site_id=root_site_id, subpage_id=subpage_id)
except:
pass
try:
visit.responses = getResponses(
driver, root_site_id=root_site_id, subpage_id=subpage_id)
except:
pass
visit.subpage = is_subpage
print('visit ID ', p_visit_id)
# crawlItem.visit_id = p_visit_id
if visit.subpage is True:
visit.root_url = root_site_url
#del driver.requests
driver.quit()
time.sleep(1)
try:
visit.cookieList = getCookies(
root_site_id, root_site_url, onlyName=True)
except:
pushError(root_site_id, 'cookies_only_name')
except:
driver.quit()
time.sleep(1)
pushError(root_site_id, 'visitSite')
return visit
# code adopted from openwpm
def tab_restart_browser(webdriver):
close_other_windows(webdriver)
if webdriver.current_url.lower() == "about:blank":
return
# webdriver.execute_script("window.open('')")
webdriver.close()
assert len(webdriver.window_handles) == 1
webdriver.switch_to.window(webdriver.window_handles[0])
# code adopted from openwpm
def close_other_windows(input_driver):
main_handle = input_driver.current_window_handle
windows = input_driver.window_handles
if len(windows) > 1:
for window in windows:
if window != main_handle:
input_driver.switch_to.window(window)
input_driver.close()
input_driver.switch_to.window(main_handle)
def visitSubpages(root_site_id, root_site_url, subpages, p_crawlDataList):
print('visiting subpages for ', root_site_url)
root_site_id = root_site_id
crawlDataList = p_crawlDataList
if subpages is None:
return crawlDataList
is_last_visit = False
for index, sub in enumerate(subpages):
visit_id = str(root_site_id) + '_' + str(index + 1)
subpage_id = index + 1
if subpage_id == len(subpages):
is_last_visit = True
try:
visitData = visitSite(root_site_url=root_site_url, url=sub,
root_site_id=root_site_id, is_subpage=True, p_visit_id=visit_id, subpage_id=subpage_id, is_last_visit=is_last_visit)
crawlDataList.append(visitData)
except:
pushError(root_site_id, 'visitSubpages')
#visitLogUpdate(root_site_id, subpage_id, state=-1)
return crawlDataList
def getSeleniumProfilePath(root_site_id):
path = ''
if os.name == 'nt':
path = os.getcwd() + '/profiles/chrome/' + str(root_site_id) + '/'
else:
path = os.getcwd() + '/profiles/chrome/' + str(root_site_id) + '/'
return path
def changeSiteState(siteID, state, timeout=0):
query = "UPDATE sites SET state_{} = {}, timeout={} where id = {} ".format(
getMode(), state, timeout, siteID)
print(query)
db.exec(query)
def runChromeInstance(p_queue, p_queue_item):
"""
t = threading.Thread(target=loadRules)
t.start()
"""
print('\n\n\n\n I RUN A CHROME INSTANCE\n\n\n\n')
root_site_url = p_queue_item.url
root_site_id = p_queue_item.site_ID
try:
delFolder(getSeleniumProfilePath(root_site_id))
except Exception as e:
print(e)
pass
try:
siteData = SiteData()
siteData.browser_id = getMode()
siteData.site_id = root_site_id
visitList = []
is_last_visit = False
if p_queue_item.subpages == None:
is_last_visit = True
try:
#visitLogNew(root_site_id, root_site_url, 0)
visit_id = str(root_site_id) + "_0"
visitData = visitSite(root_site_url, root_site_url, root_site_id, is_subpage=False,
p_visit_id=visit_id, subpage_id=0, is_last_visit=is_last_visit)
if is_last_visit:
siteData.localStorage = visitData.localStorage
visitList.append(visitData) # crawl root site
except:
pushError(root_site_id, 'runChromeInstance')
# crawl subpages
if p_queue_item.subpages != None:
subpages = p_queue_item.subpages.split('\n')
visitList = visitSubpages(
root_site_id, root_site_url, subpages, visitList)
siteData.localStorage = visitList[len(visitList)-1].localStorage
siteData.visitData = visitList
# driver.quit()
cookieList = []
for item in visitList:
cookieList.append(
[int((item.visit_id).split('_')[1]), item.cookieList])
cookieList.sort(key=lambda x: x[0])
siteData.cookies = getCookies(
root_site_id, root_site_url, cookiesFromVisits=cookieList)
"""#2768 - 11
try:
siteData.localStorage = getLocalStorage(root_site_url, root_site_id)
except:
pushError(root_site_id,'chrome_localStorage')
pass
"""
stream2BQ(siteData)
try:
delFolder(getSeleniumProfilePath(root_site_id))
# str() #TODO: Activate it!
except:
pushError(root_site_id, 'delFolder')
pass
siteData.state = 2
siteData.state_text = 'successful'
except (FunctionTimedOut, TimeoutException):
siteData.timeout = 1
siteData.state = 4
siteData.state_text = 'timeout'
pushError(root_site_id, 'timeout')
except:
# -1: not startet, 0:waiting , 1: crawling , 2: success, 3: error, 4: timeout
siteData.state = 3
siteData.state_text = 'error' # fixme: unsuccess :)
pushError(root_site_id, 'chrome_crawler')
finally:
changeSiteState(root_site_id, siteData.state)
return siteData
if __name__ == "__main__":
import sys
id = sys.argv[1]
ready = False
try:
ready = eval(sys.argv[2])
except:
pass
import sys
db = DBOps()
while(not ready):
query = "SELECT 1 FROM sites WHERE ready IS TRUE and id=" + \
str(id)
rows = db.select(query)
if len(rows) > 0:
print('Site ready, so i restart!')
import subprocess
subprocess.Popen(['/home/user/miniconda3/envs/openwpm/bin/python3',
'CrawlerChrome.py', str(id), str(True), str(datetime.now())])
os.system('kill %d' % os.getpid())
else:
query = "SELECT 1 FROM sites WHERE state_chrome_desktop_ger = 0 AND state_chrome_desktop_usa = 0 AND state_chrome_desktop_jp = 0 AND state_chrome_interaction_ger = 0 AND state_chrome_interaction_usa = 0 AND state_chrome_interaction_jp = 0 AND state_chromeheadless_desktop_ger = 0 AND state_chromeheadless_desktop_usa = 0 AND state_chromeheadless_desktop_jp = 0 AND state_chromeheadless_interaction_ger = 0 AND state_chromeheadless_interaction_usa = 0 AND state_chromeheadless_interaction_jp = 0 AND state_openwpm_desktop_ger = 0 AND state_openwpm_desktop_usa = 0 AND state_openwpm_desktop_jp = 0 AND state_openwpm_interaction_ger = 0 AND state_openwpm_interaction_usa = 0 AND state_openwpm_interaction_jp = 0 AND state_openwpmheadless_desktop_ger = 0 AND state_openwpmheadless_desktop_usa = 0 AND state_openwpmheadless_desktop_jp = 0 AND state_openwpmheadless_interaction_ger = 0 AND state_openwpmheadless_interaction_usa = 0 AND state_openwpmheadless_interaction_jp = 0 AND id=" + \
str(id)
rows = db.select(query)
if len(rows) != 0:
query = "UPDATE sites SET ready=true WHERE id=" + \
str(id)
db.exec(query)
time.sleep(3)
continue
r = db.select(
'select id, concat(scheme, site), subpages from sites where id= '+str(sys.argv[1]))[0] # TODO: ADD state for 2. try
queue_item = QueueItem(r[0], r[1], 'waiting', r[2], None, None, None)
changeSiteState(id, 1)
runChromeInstance(None, queue_item)
print('finish:', queue_item.url)
os.system('kill %d' % os.getpid())
| 33.495714 | 1,018 | 0.595982 |
794936226d6ee2480d0471399e3bdd6a0e5ac621
| 3,985 |
py
|
Python
|
representing_code/GenerateAST.py
|
varghesetom/PyLox
|
0f993f8f9ffc967a677d80b4b233205c4485e5e4
|
[
"MIT"
] | null | null | null |
representing_code/GenerateAST.py
|
varghesetom/PyLox
|
0f993f8f9ffc967a677d80b4b233205c4485e5e4
|
[
"MIT"
] | null | null | null |
representing_code/GenerateAST.py
|
varghesetom/PyLox
|
0f993f8f9ffc967a677d80b4b233205c4485e5e4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
'''
Instead of directly writing out each Statement and Expression Syntax node, this file will be used to generate the "Expr.py" and
"Stmt.py" files that will have various expressions to be used as part of our grammar for Lox. Honestly, there's not much of a
need to have this Expr.py file be placed in tool/ but the original version had it as such so I'll keep it the same way as well.
'''
import sys
from typing import List
class GenerateAST:
def __init__(self):
try:
if len(sys.argv) != 2:
print("Usage: ./GenerateAST.py <output_directory>")
sys.exit(1)
self.output_dir = sys.argv[1]
except IOError:
print("ruh-oh can't find directory")
def defineAST(self, base_name : str, types : List):
path = self.output_dir + "/" + base_name + ".py"
with open (path, "w") as f:
f.write("#!/usr/bin/env python\n\n")
f.write("import sys\n")
f.write("sys.path.insert(1, '../../scanner')\n")
f.write("from abc import ABC, abstractmethod\n")
if base_name == "Stmt": ## Statements will need to import created Exprs
f.write("from Expr import Expr\n")
f.write("from Token import Token\n\n")
f.write(f"class {base_name}(ABC):\n\n")
f.write("\t@abstractmethod\n")
f.write(f"\tdef __init__(self, left, operator, right):\n\t\tpass\n")
for line in types:
assert len(line.split(":")) == 2, "The line is not properly formatted. Can't have spaces in between ':' character. Must only be 2 items total"
class_name = line.split(":")[0]
fields = line.split(":")[1] ## each field will have the "static type" along with its identifier
fields = fields if len(fields) == 1 else fields.split(",") ## depends what expression passes in. Binary has multiple fields while Literal only has 1 (value)
arg_fields = [field.split()[1] for field in fields]
arg_fields = ", ".join(arg_fields)
f.write(f"\nclass {class_name}({base_name}):\n\n")
f.write(f"\tdef __init__(self, {arg_fields}):\n")
for field in fields:
static_type, identifier = field.split()
#if identifier != "else_branch" and class_name not in ["Return_Statement", "Class_Statement"]:
#f.write(f"\t\tassert isinstance({identifier}, {static_type}), '{identifier} needs to match {static_type} type'\n")
f.write(f"\t\tself.{identifier} = {identifier}\n")
f.write("\n\tdef accept(self, visitor):\n")
f.write(f"\t\treturn visitor.visit_{class_name}(self)\n")
if __name__ == "__main__":
t = GenerateAST()
t.defineAST("Expr", [
"Assign:Token name, Expr value",
"Binary:Expr left, Token operator, Expr right",
"Call:Expr callee, Token paren, list arguments",
"Get:Expr object, Token name",
"Set:Expr object, Token name, Expr value",
"Super:Token keyword, Token method",
"This:Token keyword",
"Grouping:Expr expression",
"Literal:object value",
"Logical:Expr left, Token operator, Expr right",
"Unary:Token operator, Expr right",
"Variable:Token name"
])
t.defineAST("Stmt", [
"Block:list statements",
"Class_Statement:Token name, Expr superclass, list methods",
"Expression_Statement:Expr expression",
"Function_Statement:Token name, list params, list body",
"If_Statement:Expr condition, Stmt then_branch, Stmt else_branch",
"Print_Statement:Expr expression",
"Return_Statement:Token keyword, Expr value",
"Var_Statement:Token name, Expr initializer",
"While_Statement:Expr condition, Stmt body"
])
| 49.8125 | 174 | 0.591217 |
7949367cd4e8da77ff491b09f96ad901afc2e2cf
| 980 |
py
|
Python
|
tvae/nn/modules/position_wise.py
|
khucnam/Efflux_TransVAE
|
7da1cc614f016d5520648f4853e34e2362181aa7
|
[
"MIT"
] | 43 |
2019-05-15T21:58:56.000Z
|
2022-03-06T03:44:26.000Z
|
tvae/nn/modules/position_wise.py
|
khucnam/Efflux_TransVAE
|
7da1cc614f016d5520648f4853e34e2362181aa7
|
[
"MIT"
] | 1 |
2020-01-11T12:03:00.000Z
|
2020-01-11T12:03:00.000Z
|
tvae/nn/modules/position_wise.py
|
khucnam/Efflux_TransVAE
|
7da1cc614f016d5520648f4853e34e2362181aa7
|
[
"MIT"
] | 6 |
2019-07-24T18:15:41.000Z
|
2022-01-13T22:17:58.000Z
|
from torch import nn
class PositionWise(nn.Module):
def __init__(self, dim_m, dim_i, dropout=0.1):
"""Position-wise Feed-Forward Network.
Args:
dim_m (int): input and output dimension.
dim_i (int): inner dimension.
dropout (float, optional): dropout probability.
Inputs:
- **input** of shape `(batch, *, dim_m)`: a float tensor.
Outputs:
- **output** of shape `(batch, *, dim_m)`: a float tensor.
"""
super(PositionWise, self).__init__()
self.feedforward = nn.Sequential(
nn.Linear(dim_m, dim_i), nn.ReLU(), nn.Linear(dim_i, dim_m),
nn.Dropout(dropout))
self.normalization = nn.LayerNorm(dim_m, eps=1e-12)
def forward(self, input):
# There's nothing difficult here.
residual = input
output = self.feedforward(input)
output = self.normalization(output + residual)
return output
| 30.625 | 72 | 0.581633 |
79493768334886b9e9a97f6b0966cc6b9693297c
| 530 |
py
|
Python
|
llluiop/0014/student.py
|
saurabh896/python-1
|
f8d3aedf4c0fe6e24dfa3269ea7e642c9f7dd9b7
|
[
"MIT"
] | 3,976 |
2015-01-01T15:49:39.000Z
|
2022-03-31T03:47:56.000Z
|
llluiop/0014/student.py
|
dwh65416396/python
|
1a7e3edd1cd3422cc0eaa55471a0b42e004a9a1a
|
[
"MIT"
] | 97 |
2015-01-11T02:59:46.000Z
|
2022-03-16T14:01:56.000Z
|
llluiop/0014/student.py
|
dwh65416396/python
|
1a7e3edd1cd3422cc0eaa55471a0b42e004a9a1a
|
[
"MIT"
] | 3,533 |
2015-01-01T06:19:30.000Z
|
2022-03-28T13:14:54.000Z
|
#!/usr/bin/env python
import xlwt
import json
def load_data(filepath):
f = open(filepath, "r")
return json.load(f)
def write_data_to_xls(data):
xls = xlwt.Workbook()
sheet = xls.add_sheet("student")
for i in range(len(data)):
sheet.write(i, 0, i+1)
json_data = data[str(i+1)]
for j in range(len(json_data)):
sheet.write(i, j+1, json_data[j])
xls.save('student.xls')
if __name__ == '__main__':
data = load_data("student.txt")
write_data_to_xls(data)
| 16.5625 | 45 | 0.607547 |
79493774e129102794f9fa453c38a58eddf6b266
| 2,244 |
py
|
Python
|
scripts/tree2taxa.py
|
CGATOxford/Optic
|
2df92e953b5139ff4e5c383cb4383e6367cd47f1
|
[
"MIT"
] | null | null | null |
scripts/tree2taxa.py
|
CGATOxford/Optic
|
2df92e953b5139ff4e5c383cb4383e6367cd47f1
|
[
"MIT"
] | null | null | null |
scripts/tree2taxa.py
|
CGATOxford/Optic
|
2df92e953b5139ff4e5c383cb4383e6367cd47f1
|
[
"MIT"
] | 1 |
2020-03-31T22:55:50.000Z
|
2020-03-31T22:55:50.000Z
|
"""
tree2taxa.py - extract taxa in a tree
=====================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
This script reads a collections of trees from stdin
and for each tree outputs the taxa found within the
tree.
Usage
-----
Example::
python <script_name>.py --help
Type::
python <script_name>.py --help
for command line help.
Command line options
--------------------
"""
import os
import sys
import string
import re
import optparse
from types import *
import CGAT.Experiment as E
import CGAT.TreeTools as TreeTools
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.OptionParser(version="%prog version: $Id: tree2taxa.py 2782 2009-09-10 11:40:29Z andreas $",
usage=globals()["__doc__"])
parser.add_option("--skip-trees", dest="skip_trees", action="store_true",
help="do not output tree names in third field [default=%default].")
parser.set_defaults(
skip_trees=False
)
(options, args) = E.Start(parser, add_pipe_options=True)
nexus = TreeTools.Newick2Nexus(sys.stdin)
if options.loglevel >= 1:
options.stdlog.write(
"# read %i trees from stdin.\n" % len(nexus.trees))
ntree = 0
ntotal = len(nexus.trees)
if ntotal == 1:
options.stdout.write("taxon\n")
else:
if options.skip_trees:
options.stdout.write("taxon\ttree\n")
else:
options.stdout.write("taxon\ttree\tname\n")
for tree in nexus.trees:
ntree += 1
taxa = TreeTools.GetTaxa(tree)
if ntotal == 1:
for t in taxa:
options.stdout.write("%s\n" % (t))
elif options.skip_trees:
for t in taxa:
options.stdout.write("%s\t%i\n" % (t, ntree))
else:
for t in taxa:
options.stdout.write("%s\t%i\t%s\n" % (t, ntree, tree.name))
if options.loglevel >= 1:
options.stdlog.write("# ntotal=%i\n" % (ntotal))
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 21.576923 | 107 | 0.581105 |
794937c48be080867c99b21131e4f8f38609787f
| 817 |
py
|
Python
|
nagios/check_hads_ingest.py
|
trentford/iem
|
7264d24f2d79a3cd69251a09758e6531233a732f
|
[
"MIT"
] | 1 |
2019-10-07T17:01:24.000Z
|
2019-10-07T17:01:24.000Z
|
nagios/check_hads_ingest.py
|
trentford/iem
|
7264d24f2d79a3cd69251a09758e6531233a732f
|
[
"MIT"
] | null | null | null |
nagios/check_hads_ingest.py
|
trentford/iem
|
7264d24f2d79a3cd69251a09758e6531233a732f
|
[
"MIT"
] | null | null | null |
"""
Check how much HADS data we have
"""
from __future__ import print_function
import sys
from pyiem.util import get_dbconn
IEM = get_dbconn('iem', user='nobody')
icursor = IEM.cursor()
def check():
icursor.execute("""
SELECT count(*) from current_shef
WHERE valid > now() - '1 hour'::interval
""")
row = icursor.fetchone()
return row[0]
def main():
"""Go Main."""
count = check()
if count > 10000:
print('OK - %s count |count=%s;1000;5000;10000' % (count, count))
return 0
elif count > 5000:
print('WARNING - %s count |count=%s;1000;5000;10000' % (count, count))
return 1
else:
print('CRITICAL - %s count |count=%s;1000;5000;10000' % (count, count))
return 2
if __name__ == '__main__':
sys.exit(main())
| 21.5 | 79 | 0.589963 |
794938521cca6e97f1bc080850a42161edbe5089
| 1,699 |
py
|
Python
|
cpfs_generator_validator/cpf_generator.py
|
Lokiatos/Projects_Python
|
44c6d912bc9ce742008eb129bea52963726b8382
|
[
"MIT"
] | 1 |
2020-06-26T17:57:35.000Z
|
2020-06-26T17:57:35.000Z
|
cpfs_generator_validator/cpf_generator.py
|
Lokiatos/Projects_Python
|
44c6d912bc9ce742008eb129bea52963726b8382
|
[
"MIT"
] | null | null | null |
cpfs_generator_validator/cpf_generator.py
|
Lokiatos/Projects_Python
|
44c6d912bc9ce742008eb129bea52963726b8382
|
[
"MIT"
] | null | null | null |
# Objetivo: Programar um gerador de CPF
# Programmer: Hugo Leça Ribeiro
# Date: 27/04/2020
def main():
try:
numbers_cpf = int(input('How many CPFs do you want to generate? '))
for number in range(0, numbers_cpf):
user_cpf = generate_cpf()
complete_cpf = calculation_digits(user_cpf)
print(f'We generated this CPF: {complete_cpf}')
except ValueError:
print('Sorry, wrong value inputed.')
def generate_cpf():
from random import randint
while True:
future_user_cpf = str(randint(100000000, 999999998))
if future_user_cpf == (future_user_cpf[0] * 9):
continue
return future_user_cpf
def calculation_digits(cpf_to_do_calculation):
first_digit = calculation_first_digit(cpf_to_do_calculation)
second_digit = calculation_second_digit(first_digit, cpf_to_do_calculation)
cpf_complete = cpf_to_do_calculation + str(first_digit) + str(second_digit)
return cpf_complete
def calculation_first_digit(cpf_to_calc):
total_first_digit = 0
for index, count in enumerate(range(10, 1, -1)):
total_first_digit += int(cpf_to_calc[index]) * count
module = total_first_digit % 11
if module < 2:
first_digit = 0
else:
first_digit = 11 - module
return first_digit
def calculation_second_digit(first_digit, cpf_to_test):
total_second_digit = 0
for index, count in enumerate(range(11, 2, -1)):
total_second_digit += int(cpf_to_test[index]) * count
total_second_digit += first_digit * 2
second_digit = 11 - (total_second_digit % 11)
if second_digit > 9:
second_digit = 0
return second_digit
main()
| 29.293103 | 79 | 0.683932 |
7949398aac92fdac6d961d1a8ad66b2dd8d61df0
| 7,123 |
py
|
Python
|
tensorflow_model_analysis/eval_saved_model/graph_ref_test.py
|
yifanmai/model-analysis
|
ae11318876ac6233ded77ac30c8aacc94da691d3
|
[
"Apache-2.0"
] | 2 |
2019-10-20T05:40:09.000Z
|
2019-10-31T17:25:51.000Z
|
tensorflow_model_analysis/eval_saved_model/graph_ref_test.py
|
yifanmai/model-analysis
|
ae11318876ac6233ded77ac30c8aacc94da691d3
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_model_analysis/eval_saved_model/graph_ref_test.py
|
yifanmai/model-analysis
|
ae11318876ac6233ded77ac30c8aacc94da691d3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test for graph_ref module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_model_analysis.eval_saved_model import encoding
from tensorflow_model_analysis.eval_saved_model import graph_ref
from tensorflow.core.protobuf import meta_graph_pb2
class GraphRefTest(tf.test.TestCase):
def setUp(self):
self.longMessage = True # pylint: disable=invalid-name
def testExtractSignatureOutputsWithPrefix(self):
signature_def = meta_graph_pb2.SignatureDef()
def make_tensor_info(name):
return tf.compat.v1.saved_model.utils.build_tensor_info(
tf.constant(0.0, name=name))
# Test for single entry (non-dict) tensors.
signature_def.inputs['labels'].CopyFrom(make_tensor_info('labels'))
signature_def.outputs['predictions'].CopyFrom(
make_tensor_info('predictions'))
signature_def.outputs['metrics/mean/value'].CopyFrom(
make_tensor_info('mean_value'))
signature_def.outputs['metrics/mean/update_op'].CopyFrom(
make_tensor_info('mean_update'))
# This case is to check that things like
# predictions/predictions are okay.
signature_def.outputs['prefix'].CopyFrom(make_tensor_info('prefix'))
signature_def.outputs['prefix1'].CopyFrom(make_tensor_info('prefix1'))
signature_def.outputs['prefix2'].CopyFrom(make_tensor_info('prefix2'))
signature_def.outputs['prefix/stuff'].CopyFrom(
make_tensor_info('prefix/stuff'))
signature_def.outputs['prefix/sub/more'].CopyFrom(
make_tensor_info('prefix/sub/more'))
self.assertDictEqual(
{'__labels': signature_def.inputs['labels']},
graph_ref.extract_signature_inputs_or_outputs_with_prefix(
'labels', signature_def.inputs, '__labels'))
self.assertDictEqual(
{'predictions': signature_def.outputs['predictions']},
graph_ref.extract_signature_inputs_or_outputs_with_prefix(
'predictions', signature_def.outputs))
self.assertDictEqual(
{
'mean/value': signature_def.outputs['metrics/mean/value'],
'mean/update_op': signature_def.outputs['metrics/mean/update_op']
},
graph_ref.extract_signature_inputs_or_outputs_with_prefix(
'metrics', signature_def.outputs))
self.assertDictEqual(
{
'prefix': signature_def.outputs['prefix'],
'prefix1': signature_def.outputs['prefix1'],
'prefix2': signature_def.outputs['prefix2'],
'stuff': signature_def.outputs['prefix/stuff'],
'sub/more': signature_def.outputs['prefix/sub/more'],
},
graph_ref.extract_signature_inputs_or_outputs_with_prefix(
'prefix', signature_def.outputs))
def testGetNodeMapBasic(self):
meta_graph_def = meta_graph_pb2.MetaGraphDef()
meta_graph_def.collection_def[
'my_collection/%s' % encoding.KEY_SUFFIX].bytes_list.value[:] = map(
encoding.encode_key, ['alpha', 'bravo', 'charlie'])
meta_graph_def.collection_def[
'my_collection/fruits'].bytes_list.value[:] = [
b'apple', b'banana', b'cherry'
]
expected = {
'alpha': {
'fruits': b'apple'
},
'bravo': {
'fruits': b'banana'
},
'charlie': {
'fruits': b'cherry'
}
}
self.assertDictEqual(
expected,
graph_ref.get_node_map(meta_graph_def, 'my_collection', ['fruits']))
def testGetNodeMapEmpty(self):
meta_graph_def = meta_graph_pb2.MetaGraphDef()
self.assertDictEqual({},
graph_ref.get_node_map(meta_graph_def, 'my_collection',
['fruits']))
def testGetNodeMapMultiple(self):
meta_graph_def = meta_graph_pb2.MetaGraphDef()
meta_graph_def.collection_def[
'my_collection/%s' % encoding.KEY_SUFFIX].bytes_list.value[:] = map(
encoding.encode_key, ['alpha', 'bravo', 'charlie'])
meta_graph_def.collection_def[
'my_collection/fruits'].bytes_list.value[:] = [
b'apple', b'banana', b'cherry'
]
meta_graph_def.collection_def[
'my_collection/animals'].bytes_list.value[:] = [
b'aardvark', b'badger', b'camel'
]
expected = {
'alpha': {
'fruits': b'apple',
'animals': b'aardvark'
},
'bravo': {
'fruits': b'banana',
'animals': b'badger'
},
'charlie': {
'fruits': b'cherry',
'animals': b'camel'
}
}
self.assertDictEqual(
expected,
graph_ref.get_node_map(meta_graph_def, 'my_collection',
['fruits', 'animals']))
def testGetNodeMapInGraph(self):
g = tf.Graph()
with g.as_default():
apple = tf.constant(1.0)
banana = tf.constant(2.0)
cherry = tf.constant(3.0)
aardvark = tf.constant('a')
badger = tf.constant('b')
camel = tf.constant('c')
meta_graph_def = meta_graph_pb2.MetaGraphDef()
meta_graph_def.collection_def[
'my_collection/%s' % encoding.KEY_SUFFIX].bytes_list.value[:] = map(
encoding.encode_key, ['alpha', 'bravo', 'charlie'])
meta_graph_def.collection_def['my_collection/fruits'].any_list.value.extend(
map(encoding.encode_tensor_node, [apple, banana, cherry]))
meta_graph_def.collection_def[
'my_collection/animals'].any_list.value.extend(
map(encoding.encode_tensor_node, [aardvark, badger, camel]))
expected = {
'alpha': {
'fruits': apple,
'animals': aardvark,
},
'bravo': {
'fruits': banana,
'animals': badger,
},
'charlie': {
'fruits': cherry,
'animals': camel,
}
}
self.assertDictEqual(
expected,
graph_ref.get_node_map_in_graph(meta_graph_def, 'my_collection',
['fruits', 'animals'], g))
def testGetNodeInGraph(self):
g = tf.Graph()
with g.as_default():
apple = tf.constant(1.0)
meta_graph_def = meta_graph_pb2.MetaGraphDef()
meta_graph_def.collection_def['fruit_node'].any_list.value.extend(
[encoding.encode_tensor_node(apple)])
self.assertEqual(
apple, graph_ref.get_node_in_graph(meta_graph_def, 'fruit_node', g))
if __name__ == '__main__':
tf.test.main()
| 34.916667 | 80 | 0.639197 |
79493b0785c4581c1ecb5c455b3e7154481e8345
| 751 |
py
|
Python
|
rocketgram/api/create_chat_invite_link.py
|
rocketbots/rocketgram
|
e509dcfad85d47a2449caf6dd302ec8581f95bf6
|
[
"MIT"
] | 16 |
2019-02-27T20:15:52.000Z
|
2019-08-06T10:59:41.000Z
|
rocketgram/api/create_chat_invite_link.py
|
rocketbots/rocketgram
|
e509dcfad85d47a2449caf6dd302ec8581f95bf6
|
[
"MIT"
] | 1 |
2022-01-16T13:56:45.000Z
|
2022-01-16T13:56:45.000Z
|
rocketgram/api/create_chat_invite_link.py
|
rocketbots/rocketgram
|
e509dcfad85d47a2449caf6dd302ec8581f95bf6
|
[
"MIT"
] | 3 |
2019-03-19T16:01:22.000Z
|
2019-04-05T15:58:12.000Z
|
# Copyright (C) 2015-2022 by Vd.
# This file is part of Rocketgram, the modern Telegram bot framework.
# Rocketgram is released under the MIT License (see LICENSE).
from dataclasses import dataclass
from datetime import datetime
from typing import Union, Optional
from .request import Request
from .utils import ChatInviteLinkResultMixin
@dataclass(frozen=True)
class CreateChatInviteLink(ChatInviteLinkResultMixin, Request):
"""\
Represents CreateChatInviteLink request object:
https://core.telegram.org/bots/api#createchatinvitelink
"""
chat_id: Union[int, str]
name: Optional[str] = None
expire_date: Optional[datetime] = None
member_limit: Optional[int] = None
creates_join_request: Optional[bool] = None
| 28.884615 | 69 | 0.76032 |
79493b26c0ba6d4e080606ee9c5d95a79ffaa2f9
| 29,729 |
py
|
Python
|
pandas/core/generic.py
|
flexlee/pandas
|
749318f4a1b4ae56d3eee507db312052532e7f4b
|
[
"BSD-2-Clause"
] | null | null | null |
pandas/core/generic.py
|
flexlee/pandas
|
749318f4a1b4ae56d3eee507db312052532e7f4b
|
[
"BSD-2-Clause"
] | null | null | null |
pandas/core/generic.py
|
flexlee/pandas
|
749318f4a1b4ae56d3eee507db312052532e7f4b
|
[
"BSD-2-Clause"
] | null | null | null |
# pylint: disable=W0231,E1101
from datetime import timedelta
import numpy as np
from pandas.core.index import MultiIndex
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.offsets import DateOffset
import pandas.core.common as com
import pandas.lib as lib
class PandasError(Exception):
pass
class PandasObject(object):
_AXIS_NUMBERS = {
'index': 0,
'columns': 1
}
_AXIS_ALIASES = {}
_AXIS_NAMES = dict((v, k) for k, v in _AXIS_NUMBERS.iteritems())
def save(self, path):
com.save(self, path)
@classmethod
def load(cls, path):
return com.load(path)
#----------------------------------------------------------------------
# Axis name business
@classmethod
def _get_axis_number(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if isinstance(axis, int):
if axis in cls._AXIS_NAMES:
return axis
else:
raise Exception('No %d axis' % axis)
else:
return cls._AXIS_NUMBERS[axis]
@classmethod
def _get_axis_name(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if isinstance(axis, basestring):
if axis in cls._AXIS_NUMBERS:
return axis
else:
raise Exception('No axis named %s' % axis)
else:
return cls._AXIS_NAMES[axis]
def _get_axis(self, axis):
name = self._get_axis_name(axis)
return getattr(self, name)
def abs(self):
"""
Return an object with absolute value taken. Only applicable to objects
that are all numeric
Returns
-------
abs: type of caller
"""
return np.abs(self)
def get(self, key, default=None):
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found
Parameters
----------
key : object
Returns
-------
value : type of items contained in object
"""
try:
return self[key]
except KeyError:
return default
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
group_keys=True):
"""
Group series using mapper (dict or key function, apply given function
to group, return result as series) or by a series of columns
Parameters
----------
by : mapping function / list of functions, dict, Series, or tuple /
list of column names.
Called on each element of the object index to determine the groups.
If a dict or Series is passed, the Series or dict VALUES will be
used to determine the groups
axis : int, default 0
level : int, level name, or sequence of such, default None
If the axis is a MultiIndex (hierarchical), group by a particular
level or levels
as_index : boolean, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output
sort : boolean, default True
Sort group keys. Get better performance by turning this off
group_keys : boolean, default True
When calling apply, add group keys to index to identify pieces
Examples
--------
# DataFrame result
>>> data.groupby(func, axis=0).mean()
# DataFrame result
>>> data.groupby(['col1', 'col2'])['col3'].mean()
# DataFrame with hierarchical index
>>> data.groupby(['col1', 'col2']).mean()
Returns
-------
GroupBy object
"""
from pandas.core.groupby import groupby
return groupby(self, by, axis=axis, level=level, as_index=as_index,
sort=sort, group_keys=group_keys)
def asfreq(self, freq, method=None, how=None, normalize=False):
"""
Convert all TimeSeries inside to specified frequency using DateOffset
objects. Optionally provide fill method to pad/backfill missing values.
Parameters
----------
freq : DateOffset object, or string
method : {'backfill', 'bfill', 'pad', 'ffill', None}
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill methdo
how : {'start', 'end'}, default end
For PeriodIndex only, see PeriodIndex.asfreq
normalize : bool, default False
Whether to reset output index to midnight
Returns
-------
converted : type of caller
"""
from pandas.tseries.resample import asfreq
return asfreq(self, freq, method=method, how=how,
normalize=normalize)
def at_time(self, time, asof=False):
"""
Select values at particular time of day (e.g. 9:30AM)
Parameters
----------
time : datetime.time or string
Returns
-------
values_at_time : type of caller
"""
try:
indexer = self.index.indexer_at_time(time, asof=asof)
return self.take(indexer)
except AttributeError:
raise TypeError('Index must be DatetimeIndex')
def between_time(self, start_time, end_time, include_start=True,
include_end=True):
"""
Select values between particular times of the day (e.g., 9:00-9:30 AM)
Parameters
----------
start_time : datetime.time or string
end_time : datetime.time or string
include_start : boolean, default True
include_end : boolean, default True
Returns
-------
values_between_time : type of caller
"""
try:
indexer = self.index.indexer_between_time(
start_time, end_time, include_start=include_start,
include_end=include_end)
return self.take(indexer)
except AttributeError:
raise TypeError('Index must be DatetimeIndex')
def resample(self, rule, how=None, axis=0, fill_method=None,
closed='right', label='right', convention='start',
kind=None, loffset=None, limit=None, base=0):
"""
Convenience method for frequency conversion and resampling of regular
time-series data.
Parameters
----------
rule : the offset string or object representing target conversion
how : string, method for down- or re-sampling, default to 'mean' for
downsampling
fill_method : string, fill_method for upsampling, default None
axis : int, optional, default 0
closed : {'right', 'left'}, default 'right'
Which side of bin interval is closed
label : {'right', 'left'}, default 'right'
Which bin edge label to label bucket with
convention : {'start', 'end', 's', 'e'}
loffset : timedelta
Adjust the resampled time labels
base : int, default 0
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0
"""
from pandas.tseries.resample import TimeGrouper
sampler = TimeGrouper(rule, label=label, closed=closed, how=how,
axis=axis, kind=kind, loffset=loffset,
fill_method=fill_method, convention=convention,
limit=limit, base=base)
return sampler.resample(self)
def first(self, offset):
"""
Convenience method for subsetting initial periods of time series data
based on a date offset
Parameters
----------
offset : string, DateOffset, dateutil.relativedelta
Examples
--------
ts.last('10D') -> First 10 days
Returns
-------
subset : type of caller
"""
from pandas.tseries.frequencies import to_offset
if not isinstance(self.index, DatetimeIndex):
raise NotImplementedError
if len(self.index) == 0:
return self
offset = to_offset(offset)
end_date = end = self.index[0] + offset
# Tick-like, e.g. 3 weeks
if not offset.isAnchored() and hasattr(offset, '_inc'):
if end_date in self.index:
end = self.index.searchsorted(end_date, side='left')
return self.ix[:end]
def last(self, offset):
"""
Convenience method for subsetting final periods of time series data
based on a date offset
Parameters
----------
offset : string, DateOffset, dateutil.relativedelta
Examples
--------
ts.last('5M') -> Last 5 months
Returns
-------
subset : type of caller
"""
from pandas.tseries.frequencies import to_offset
if not isinstance(self.index, DatetimeIndex):
raise NotImplementedError
if len(self.index) == 0:
return self
offset = to_offset(offset)
start_date = start = self.index[-1] - offset
start = self.index.searchsorted(start_date, side='right')
return self.ix[start:]
def select(self, crit, axis=0):
"""
Return data corresponding to axis labels matching criteria
Parameters
----------
crit : function
To be called on each index (label). Should return True or False
axis : int
Returns
-------
selection : type of caller
"""
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
if len(axis) > 0:
new_axis = axis[np.asarray([crit(label) for label in axis])]
else:
new_axis = axis
return self.reindex(**{axis_name: new_axis})
def drop(self, labels, axis=0, level=None):
"""
Return new object with labels in requested axis removed
Parameters
----------
labels : array-like
axis : int
level : int or name, default None
For MultiIndex
Returns
-------
dropped : type of caller
"""
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError('axis must be a MultiIndex')
new_axis = axis.drop(labels, level=level)
else:
new_axis = axis.drop(labels)
return self.reindex(**{axis_name: new_axis})
else:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError('axis must be a MultiIndex')
indexer = -lib.ismember(axis.get_level_values(level),
set(labels))
else:
indexer = -axis.isin(labels)
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
return self.ix[tuple(slicer)]
def sort_index(self, axis=0, ascending=True):
"""
Sort object by labels (along an axis)
Parameters
----------
axis : {0, 1}
Sort index/rows versus columns
ascending : boolean, default True
Sort ascending vs. descending
Returns
-------
sorted_obj : type of caller
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
labels = self._get_axis(axis)
sort_index = labels.argsort()
if not ascending:
sort_index = sort_index[::-1]
new_axis = labels.take(sort_index)
return self.reindex(**{axis_name: new_axis})
@property
def ix(self):
raise NotImplementedError
def reindex(self, *args, **kwds):
raise NotImplementedError
def tshift(self, periods=1, freq=None, **kwds):
"""
Shift the time index, using the index's frequency if available
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
freq : DateOffset, timedelta, or time rule string, default None
Increment to use from datetools module or time rule (e.g. 'EOM')
Notes
-----
If freq is not specified then tries to use the freq or inferred_freq
attributes of the index. If neither of those attributes exist, a
ValueError is thrown
Returns
-------
shifted : Series
"""
if freq is None:
freq = getattr(self.index, 'freq', None)
if freq is None:
freq = getattr(self.index, 'inferred_freq', None)
if freq is None:
msg = 'Freq was not given and was not set in the index'
raise ValueError(msg)
return self.shift(periods, freq, **kwds)
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,
**kwds):
"""
Percent change over given number of periods
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change
fill_method : str, default 'pad'
How to handle NAs before computing percent changes
limit : int, default None
The number of consecutive NAs to fill before stopping
freq : DateOffset, timedelta, or offset alias string, optional
Increment to use from time series API (e.g. 'M' or BDay())
Returns
-------
chg : Series or DataFrame
"""
if fill_method is None:
data = self
else:
data = self.fillna(method=fill_method, limit=limit)
rs = data / data.shift(periods=periods, freq=freq, **kwds) - 1
if freq is None:
mask = com.isnull(self.values)
np.putmask(rs.values, mask, np.nan)
return rs
class NDFrame(PandasObject):
"""
N-dimensional analogue of DataFrame. Store multi-dimensional in a
size-mutable, labeled data structure
Parameters
----------
data : BlockManager
axes : list
copy : boolean, default False
"""
# kludge
_default_stat_axis = 0
def __init__(self, data, axes=None, copy=False, dtype=None):
if dtype is not None:
data = data.astype(dtype)
elif copy:
data = data.copy()
if axes is not None:
for i, ax in enumerate(axes):
data = data.reindex_axis(ax, axis=i)
object.__setattr__(self, '_data', data)
object.__setattr__(self, '_item_cache', {})
def astype(self, dtype):
"""
Cast object to input numpy.dtype
Parameters
----------
dtype : numpy.dtype or Python type
Returns
-------
casted : type of caller
"""
return self._constructor(self._data, dtype=dtype)
@property
def _constructor(self):
return NDFrame
@property
def axes(self):
return self._data.axes
def __repr__(self):
return 'NDFrame'
@property
def values(self):
return self._data.as_matrix()
@property
def ndim(self):
return self._data.ndim
def _set_axis(self, axis, labels):
self._data.set_axis(axis, labels)
self._clear_item_cache()
def __getitem__(self, item):
return self._get_item_cache(item)
def _get_item_cache(self, item):
cache = self._item_cache
try:
return cache[item]
except Exception:
values = self._data.get(item)
res = self._box_item_values(item, values)
cache[item] = res
return res
def _box_item_values(self, key, values):
raise NotImplementedError
def _clear_item_cache(self):
self._item_cache.clear()
def _set_item(self, key, value):
self._data.set(key, value)
self._clear_item_cache()
def __delitem__(self, key):
"""
Delete item
"""
deleted = False
maybe_shortcut = False
if hasattr(self, 'columns') and isinstance(self.columns, MultiIndex):
try:
maybe_shortcut = key not in self.columns._engine
except TypeError:
pass
if maybe_shortcut:
# Allow shorthand to delete all columns whose first len(key)
# elements match key:
if not isinstance(key, tuple):
key = (key,)
for col in self.columns:
if isinstance(col, tuple) and col[:len(key)] == key:
del self[col]
deleted = True
if not deleted:
# If the above loop ran and didn't delete anything because
# there was no match, this call should raise the appropriate
# exception:
self._data.delete(key)
try:
del self._item_cache[key]
except KeyError:
pass
def pop(self, item):
"""
Return item and drop from frame. Raise KeyError if not found.
"""
result = self[item]
del self[item]
return result
def _expand_axes(self, key):
new_axes = []
for k, ax in zip(key, self.axes):
if k not in ax:
if type(k) != ax.dtype.type:
ax = ax.astype('O')
new_axes.append(ax.insert(len(ax), k))
else:
new_axes.append(ax)
return new_axes
#----------------------------------------------------------------------
# Consolidation of internals
def _consolidate_inplace(self):
self._clear_item_cache()
self._data = self._data.consolidate()
def consolidate(self, inplace=False):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray). Mainly an internal API function,
but available here to the savvy user
Parameters
----------
inplace : boolean, default False
If False return new object, otherwise modify existing object
Returns
-------
consolidated : type of caller
"""
if inplace:
self._consolidate_inplace()
return self
else:
cons_data = self._data.consolidate()
if cons_data is self._data:
cons_data = cons_data.copy()
return self._constructor(cons_data)
@property
def _is_mixed_type(self):
self._consolidate_inplace()
return len(self._data.blocks) > 1
def _reindex_axis(self, new_index, fill_method, axis, copy):
new_data = self._data.reindex_axis(new_index, axis=axis,
method=fill_method, copy=copy)
if new_data is self._data and not copy:
return self
else:
return self._constructor(new_data)
def cumsum(self, axis=None, skipna=True):
"""
Return DataFrame of cumulative sums over requested axis.
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
Returns
-------
y : DataFrame
"""
if axis is None:
axis = self._default_stat_axis
else:
axis = self._get_axis_number(axis)
y = self.values.copy()
if not issubclass(y.dtype.type, np.integer):
mask = np.isnan(self.values)
if skipna:
np.putmask(y, mask, 0.)
result = y.cumsum(axis)
if skipna:
np.putmask(result, mask, np.nan)
else:
result = y.cumsum(axis)
return self._wrap_array(result, self.axes, copy=False)
def _wrap_array(self, array, axes, copy=False):
raise NotImplementedError
def cumprod(self, axis=None, skipna=True):
"""
Return cumulative product over requested axis as DataFrame
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
Returns
-------
y : DataFrame
"""
if axis is None:
axis = self._default_stat_axis
else:
axis = self._get_axis_number(axis)
y = self.values.copy()
if not issubclass(y.dtype.type, np.integer):
mask = np.isnan(self.values)
if skipna:
np.putmask(y, mask, 1.)
result = y.cumprod(axis)
if skipna:
np.putmask(result, mask, np.nan)
else:
result = y.cumprod(axis)
return self._wrap_array(result, self.axes, copy=False)
def cummax(self, axis=None, skipna=True):
"""
Return DataFrame of cumulative max over requested axis.
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
Returns
-------
y : DataFrame
"""
if axis is None:
axis = self._default_stat_axis
else:
axis = self._get_axis_number(axis)
y = self.values.copy()
if not issubclass(y.dtype.type, np.integer):
mask = np.isnan(self.values)
if skipna:
np.putmask(y, mask, -np.inf)
result = np.maximum.accumulate(y, axis)
if skipna:
np.putmask(result, mask, np.nan)
else:
result = np.maximum.accumulate(y, axis)
return self._wrap_array(result, self.axes, copy=False)
def cummin(self, axis=None, skipna=True):
"""
Return DataFrame of cumulative min over requested axis.
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
Returns
-------
y : DataFrame
"""
if axis is None:
axis = self._default_stat_axis
else:
axis = self._get_axis_number(axis)
y = self.values.copy()
if not issubclass(y.dtype.type, np.integer):
mask = np.isnan(self.values)
if skipna:
np.putmask(y, mask, np.inf)
result = np.minimum.accumulate(y, axis)
if skipna:
np.putmask(result, mask, np.nan)
else:
result = np.minimum.accumulate(y, axis)
return self._wrap_array(result, self.axes, copy=False)
def copy(self, deep=True):
"""
Make a copy of this object
Parameters
----------
deep : boolean, default True
Make a deep copy, i.e. also copy data
Returns
-------
copy : type of caller
"""
data = self._data
if deep:
data = data.copy()
return self._constructor(data)
def swaplevel(self, i, j, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Returns
-------
swapped : type of caller (new object)
"""
axis = self._get_axis_number(axis)
result = self.copy()
labels = result._data.axes[axis]
result._data.set_axis(axis, labels.swaplevel(i, j))
return result
def add_prefix(self, prefix):
"""
Concatenate prefix string with panel items names.
Parameters
----------
prefix : string
Returns
-------
with_prefix : type of caller
"""
new_data = self._data.add_prefix(prefix)
return self._constructor(new_data)
def add_suffix(self, suffix):
"""
Concatenate suffix string with panel items names
Parameters
----------
suffix : string
Returns
-------
with_suffix : type of caller
"""
new_data = self._data.add_suffix(suffix)
return self._constructor(new_data)
def rename_axis(self, mapper, axis=0, copy=True):
"""
Alter index and / or columns using input function or functions.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is.
Parameters
----------
mapper : dict-like or function, optional
axis : int, default 0
copy : boolean, default True
Also copy underlying data
See also
--------
DataFrame.rename
Returns
-------
renamed : type of caller
"""
# should move this at some point
from pandas.core.series import _get_rename_function
mapper_f = _get_rename_function(mapper)
if axis == 0:
new_data = self._data.rename_items(mapper_f, copydata=copy)
else:
new_data = self._data.rename_axis(mapper_f, axis=axis)
if copy:
new_data = new_data.copy()
return self._constructor(new_data)
def take(self, indices, axis=0):
"""
Analogous to ndarray.take
Parameters
----------
indices : list / array of ints
axis : int, default 0
Returns
-------
taken : type of caller
"""
if axis == 0:
labels = self._get_axis(axis)
new_items = labels.take(indices)
new_data = self._data.reindex_axis(new_items, axis=0)
else:
new_data = self._data.take(indices, axis=axis)
return self._constructor(new_data)
def tz_convert(self, tz, axis=0, copy=True):
"""
Convert TimeSeries to target time zone. If it is time zone naive, it
will be localized to the passed time zone.
Parameters
----------
tz : string or pytz.timezone object
copy : boolean, default True
Also make a copy of the underlying data
Returns
-------
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
if not hasattr(ax, 'tz_convert'):
ax_name = self._get_axis_name(axis)
raise TypeError('%s is not a valid DatetimeIndex or PeriodIndex' %
ax_name)
new_data = self._data
if copy:
new_data = new_data.copy()
new_obj = self._constructor(new_data)
new_ax = ax.tz_convert(tz)
if axis == 0:
new_obj._set_axis(1, new_ax)
elif axis == 1:
new_obj._set_axis(0, new_ax)
self._clear_item_cache()
return new_obj
def tz_localize(self, tz, axis=0, copy=True):
"""
Localize tz-naive TimeSeries to target time zone
Parameters
----------
tz : string or pytz.timezone object
copy : boolean, default True
Also make a copy of the underlying data
Returns
-------
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
if not hasattr(ax, 'tz_localize'):
ax_name = self._get_axis_name(axis)
raise TypeError('%s is not a valid DatetimeIndex or PeriodIndex' %
ax_name)
new_data = self._data
if copy:
new_data = new_data.copy()
new_obj = self._constructor(new_data)
new_ax = ax.tz_localize(tz)
if axis == 0:
new_obj._set_axis(1, new_ax)
elif axis == 1:
new_obj._set_axis(0, new_ax)
self._clear_item_cache()
return new_obj
# Good for either Series or DataFrame
def truncate(self, before=None, after=None, copy=True):
"""Function truncate a sorted DataFrame / Series before and/or after
some particular dates.
Parameters
----------
before : date
Truncate before date
after : date
Truncate after date
Returns
-------
truncated : type of caller
"""
from pandas.tseries.tools import to_datetime
before = to_datetime(before)
after = to_datetime(after)
if before is not None and after is not None:
if before > after:
raise AssertionError('Truncate: %s must be after %s' %
(before, after))
result = self.ix[before:after]
if isinstance(self.index, MultiIndex):
result.index = self.index.truncate(before, after)
if copy:
result = result.copy()
return result
| 29.089041 | 79 | 0.551112 |
79493b4b1cd39146785dd4ca3e96131a505fa11b
| 386 |
py
|
Python
|
misc/get_local_code.py
|
byung-u/real_estate_in_seoul
|
c3c16c25c337e8c6869f8d891b7b2a195b937289
|
[
"MIT"
] | 1 |
2019-09-30T13:36:53.000Z
|
2019-09-30T13:36:53.000Z
|
misc/get_local_code.py
|
byung-u/real_estate_in_seoul
|
c3c16c25c337e8c6869f8d891b7b2a195b937289
|
[
"MIT"
] | 1 |
2017-03-11T14:08:22.000Z
|
2017-03-11T14:08:22.000Z
|
misc/get_local_code.py
|
byung-u/real_estate_in_seoul
|
c3c16c25c337e8c6869f8d891b7b2a195b937289
|
[
"MIT"
] | 1 |
2017-10-27T05:31:12.000Z
|
2017-10-27T05:31:12.000Z
|
#!/usr/bin/env python3.5
# -*- coding: utf-8 -*-
import sqlite3
def main():
conn = sqlite3.connect('../local_code.db')
c = conn.cursor()
gu = '마포'
query = '''SELECT code FROM local_code WHERE
province='서울특별시' and district="%s구"''' % (gu)
c.execute(query)
data = c.fetchone()
conn.close
print(int(data[0]))
if __name__ == '__main__':
main()
| 17.545455 | 49 | 0.580311 |
79493c424af838f4f1fbc8e79ba7461c2b65b2b2
| 38,620 |
py
|
Python
|
xmlschema/validators/xsdbase.py
|
delocalizer/xmlschema
|
c24169f05112f67a45493d390476307919cacd22
|
[
"MIT"
] | null | null | null |
xmlschema/validators/xsdbase.py
|
delocalizer/xmlschema
|
c24169f05112f67a45493d390476307919cacd22
|
[
"MIT"
] | null | null | null |
xmlschema/validators/xsdbase.py
|
delocalizer/xmlschema
|
c24169f05112f67a45493d390476307919cacd22
|
[
"MIT"
] | null | null | null |
#
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
"""
This module contains base functions and classes XML Schema components.
"""
import re
from ..exceptions import XMLSchemaValueError, XMLSchemaTypeError
from ..qnames import XSD_ANNOTATION, XSD_APPINFO, XSD_DOCUMENTATION, XML_LANG, \
XSD_ANY_TYPE, XSD_ANY_SIMPLE_TYPE, XSD_ANY_ATOMIC_TYPE, XSD_ID, XSD_QNAME, \
XSD_OVERRIDE, get_qname, local_name, qname_to_prefixed, is_not_xsd_annotation
from ..etree import etree_tostring
from ..helpers import is_etree_element
from .exceptions import XMLSchemaParseError, XMLSchemaValidationError
XSD_TYPE_DERIVATIONS = {'extension', 'restriction'}
XSD_ELEMENT_DERIVATIONS = {'extension', 'restriction', 'substitution'}
XSD_VALIDATION_MODES = {'strict', 'lax', 'skip'}
"""
XML Schema validation modes
Ref.: https://www.w3.org/TR/xmlschema11-1/#key-va
"""
def check_validation_mode(validation):
if validation not in XSD_VALIDATION_MODES:
raise XMLSchemaValueError("validation mode can be 'strict', "
"'lax' or 'skip': %r" % validation)
class XsdValidator(object):
"""
Common base class for XML Schema validator, that represents a PSVI (Post Schema Validation
Infoset) information item. A concrete XSD validator have to report its validity collecting
building errors and implementing the properties.
:param validation: defines the XSD validation mode to use for build the validator, \
its value can be 'strict', 'lax' or 'skip'. Strict mode is the default.
:type validation: str
:ivar validation: XSD validation mode.
:vartype validation: str
:ivar errors: XSD validator building errors.
:vartype errors: list
"""
xsd_version = None
def __init__(self, validation='strict'):
self.validation = validation
self.errors = []
def __str__(self):
return self.__repr__()
@property
def built(self):
"""
Property that is ``True`` if XSD validator has been fully parsed and built,
``False`` otherwise. For schemas the property is checked on all global
components. For XSD components check only the building of local subcomponents.
"""
raise NotImplementedError()
@property
def validation_attempted(self):
"""
Property that returns the *validation status* of the XSD validator.
It can be 'full', 'partial' or 'none'.
| https://www.w3.org/TR/xmlschema-1/#e-validation_attempted
| https://www.w3.org/TR/2012/REC-xmlschema11-1-20120405/#e-validation_attempted
"""
raise NotImplementedError()
@property
def validity(self):
"""
Property that returns the XSD validator's validity.
It can be ‘valid’, ‘invalid’ or ‘notKnown’.
| https://www.w3.org/TR/xmlschema-1/#e-validity
| https://www.w3.org/TR/2012/REC-xmlschema11-1-20120405/#e-validity
"""
if self.validation == 'skip':
return 'notKnown'
elif self.errors or any(comp.errors for comp in self.iter_components()):
return 'invalid'
elif self.built:
return 'valid'
else:
return 'notKnown'
def iter_components(self, xsd_classes=None):
"""
Creates an iterator for traversing all XSD components of the validator.
:param xsd_classes: returns only a specific class/classes of components, \
otherwise returns all components.
"""
raise NotImplementedError()
@property
def all_errors(self):
"""
A list with all the building errors of the XSD validator and its components.
"""
errors = []
for comp in self.iter_components():
if comp.errors:
errors.extend(comp.errors)
return errors
def copy(self):
validator = object.__new__(self.__class__)
validator.__dict__.update(self.__dict__)
validator.errors = self.errors[:]
return validator
__copy__ = copy
def parse_error(self, error, elem=None, validation=None):
"""
Helper method for registering parse errors. Does nothing if validation mode is 'skip'.
Il validation mode is 'lax' collects the error, otherwise raise the error.
:param error: can be a parse error or an error message.
:param elem: the Element instance related to the error, for default uses the 'elem' \
attribute of the validator, if it's present.
:param validation: overrides the default validation mode of the validator.
"""
if validation:
check_validation_mode(validation)
else:
validation = self.validation
if validation == 'skip':
return
if is_etree_element(elem):
pass
elif elem is None:
elem = getattr(self, 'elem', None)
else:
msg = "the argument 'elem' must be an Element instance, not {!r}."
raise XMLSchemaTypeError(msg.format(elem))
if isinstance(error, XMLSchemaParseError):
error.validator = self
error.namespaces = getattr(self, 'namespaces', None)
error.elem = elem
error.source = getattr(self, 'source', None)
elif isinstance(error, Exception):
message = str(error).strip()
if message[0] in '\'"' and message[0] == message[-1]:
message = message.strip('\'"')
error = XMLSchemaParseError(self, message, elem)
elif isinstance(error, str):
error = XMLSchemaParseError(self, error, elem)
else:
msg = "'error' argument must be an exception or a string, not {!r}."
raise XMLSchemaTypeError(msg.format(error))
if validation == 'lax':
self.errors.append(error)
else:
raise error
def _parse_xpath_default_namespace(self, elem):
"""
Parse XSD 1.1 xpathDefaultNamespace attribute for schema, alternative, assert, assertion
and selector declarations, checking if the value is conforming to the specification. In
case the attribute is missing or for wrong attribute values defaults to ''.
"""
try:
value = elem.attrib['xpathDefaultNamespace']
except KeyError:
return ''
value = value.strip()
if value == '##local':
return ''
elif value == '##defaultNamespace':
return getattr(self, 'default_namespace')
elif value == '##targetNamespace':
return getattr(self, 'target_namespace')
elif len(value.split()) == 1:
return value
else:
admitted_values = ('##defaultNamespace', '##targetNamespace', '##local')
msg = "wrong value %r for 'xpathDefaultNamespace' attribute, can be (anyURI | %s)."
self.parse_error(msg % (value, ' | '.join(admitted_values)), elem)
return ''
class XsdComponent(XsdValidator):
"""
Class for XSD components. See: https://www.w3.org/TR/xmlschema-ref/
:param elem: ElementTree's node containing the definition.
:param schema: the XMLSchema object that owns the definition.
:param parent: the XSD parent, `None` means that is a global component that \
has the schema as parent.
:param name: name of the component, maybe overwritten by the parse of the `elem` argument.
:cvar qualified: for name matching, unqualified matching may be admitted only \
for elements and attributes.
:vartype qualified: bool
"""
_REGEX_SPACE = re.compile(r'\s')
_REGEX_SPACES = re.compile(r'\s+')
_ADMITTED_TAGS = ()
parent = None
name = None
ref = None
qualified = True
def __init__(self, elem, schema, parent=None, name=None):
super(XsdComponent, self).__init__(schema.validation)
if name is not None:
assert name and (name[0] == '{' or not schema.target_namespace), \
"name=%r argument: must be a qualified name of the target namespace." % name
self.name = name
if parent is not None:
self.parent = parent
self.schema = schema
self.elem = elem
def __setattr__(self, name, value):
if name == "elem":
if not is_etree_element(value):
raise XMLSchemaTypeError(
"%r attribute must be an Etree Element: %r" % (name, value)
)
elif value.tag not in self._ADMITTED_TAGS:
raise XMLSchemaValueError(
"wrong XSD element %r for %r, must be one of %r." % (
local_name(value.tag), self,
[local_name(tag) for tag in self._ADMITTED_TAGS]
)
)
super(XsdComponent, self).__setattr__(name, value)
self._parse()
return
elif name == "schema":
if hasattr(self, 'schema') and self.schema.target_namespace != value.target_namespace:
raise XMLSchemaValueError(
"cannot change 'schema' attribute of %r: the actual %r has a different "
"target namespace than %r." % (self, self.schema, value)
)
super(XsdComponent, self).__setattr__(name, value)
@property
def xsd_version(self):
return self.schema.XSD_VERSION
def is_global(self):
"""Returns `True` if the instance is a global component, `False` if it's local."""
return self.parent is None
def is_override(self):
"""Returns `True` if the instance is an override of a global component."""
if self.parent is not None:
return False
return any(self.elem in x for x in self.schema.root if x.tag == XSD_OVERRIDE)
@property
def schema_elem(self):
"""The reference element of the schema for the component instance."""
return self.elem
@property
def source(self):
"""Property that references to schema source."""
return self.schema.source
@property
def target_namespace(self):
"""Property that references to schema's targetNamespace."""
return self.schema.target_namespace if self.ref is None else self.ref.target_namespace
@property
def default_namespace(self):
"""Property that references to schema's default namespaces."""
return self.schema.namespaces.get('')
@property
def namespaces(self):
"""Property that references to schema's namespace mapping."""
return self.schema.namespaces
@property
def maps(self):
"""Property that references to schema's global maps."""
return self.schema.maps
@property
def any_type(self):
"""Property that references to the xs:anyType instance of the global maps."""
return self.schema.maps.types[XSD_ANY_TYPE]
@property
def any_simple_type(self):
"""Property that references to the xs:anySimpleType instance of the global maps."""
return self.schema.maps.types[XSD_ANY_SIMPLE_TYPE]
@property
def any_atomic_type(self):
"""Property that references to the xs:anyAtomicType instance of the global maps."""
return self.schema.maps.types[XSD_ANY_ATOMIC_TYPE]
def __repr__(self):
if self.name is None:
return '<%s at %#x>' % (self.__class__.__name__, id(self))
elif self.ref is not None:
return '%s(ref=%r)' % (self.__class__.__name__, self.prefixed_name)
else:
return '%s(name=%r)' % (self.__class__.__name__, self.prefixed_name)
def _parse(self):
del self.errors[:]
try:
if self.elem[0].tag == XSD_ANNOTATION:
self.annotation = XsdAnnotation(self.elem[0], self.schema, self)
else:
self.annotation = None
except (TypeError, IndexError):
self.annotation = None
def _parse_reference(self):
"""
Helper method for referable components. Returns `True` if a valid reference QName
is found without any error, otherwise returns `None`. Sets an id-related name for
the component ('nameless_<id of the instance>') if both the attributes 'ref' and
'name' are missing.
"""
ref = self.elem.get('ref')
if ref is None:
if 'name' in self.elem.attrib:
return
elif self.parent is None:
self.parse_error("missing attribute 'name' in a global %r" % type(self))
else:
self.parse_error(
"missing both attributes 'name' and 'ref' in local %r" % type(self)
)
elif 'name' in self.elem.attrib:
self.parse_error("attributes 'name' and 'ref' are mutually exclusive")
elif self.parent is None:
self.parse_error("attribute 'ref' not allowed in a global %r" % type(self))
else:
try:
self.name = self.schema.resolve_qname(ref)
except (KeyError, ValueError, RuntimeError) as err:
self.parse_error(err)
else:
if self._parse_child_component(self.elem) is not None:
self.parse_error("a reference component cannot have "
"child definitions/declarations")
return True
def _parse_boolean_attribute(self, name):
try:
value = self.elem.attrib[name].strip()
except KeyError:
return
else:
if value in ('true', '1'):
return True
elif value in ('false', '0'):
return False
else:
self.parse_error("wrong value %r for boolean attribute %r" % (value, name))
def _parse_child_component(self, elem, strict=True):
child = None
for index, child in enumerate(filter(is_not_xsd_annotation, elem)):
if not strict:
return child
elif index:
msg = "too many XSD components, unexpected {!r} found at position {}"
self.parse_error(msg.format(child, index), elem)
return child
def _parse_target_namespace(self):
"""
XSD 1.1 targetNamespace attribute in elements and attributes declarations.
"""
if 'targetNamespace' not in self.elem.attrib:
return
self._target_namespace = self.elem.attrib['targetNamespace'].strip()
if 'name' not in self.elem.attrib:
self.parse_error("attribute 'name' must be present when "
"'targetNamespace' attribute is provided")
if 'form' in self.elem.attrib:
self.parse_error("attribute 'form' must be absent when "
"'targetNamespace' attribute is provided")
if self._target_namespace != self.schema.target_namespace:
if self.parent is None:
self.parse_error("a global %s must have the same namespace as "
"its parent schema" % self.__class__.__name__)
xsd_type = self.get_parent_type()
if not xsd_type or xsd_type.parent is not None:
pass
elif xsd_type.derivation != 'restriction' or xsd_type.base_type.name == XSD_ANY_TYPE:
self.parse_error("a declaration contained in a global complexType "
"must have the same namespace as its parent schema")
if not self._target_namespace:
self.name = local_name(self.name)
else:
self.name = '{%s}%s' % (self._target_namespace, local_name(self.name))
@property
def local_name(self):
"""The local part of the name of the component, or `None` if the name is `None`."""
return local_name(self.name)
@property
def qualified_name(self):
"""The name of the component in extended format, or `None` if the name is `None`."""
return get_qname(self.target_namespace, self.name)
@property
def prefixed_name(self):
"""The name of the component in prefixed format, or `None` if the name is `None`."""
return qname_to_prefixed(self.name, self.namespaces)
@property
def id(self):
"""The ``'id'`` attribute of the component tag, ``None`` if missing."""
return self.elem.get('id')
@property
def validation_attempted(self):
return 'full' if self.built else 'partial'
@property
def built(self):
raise NotImplementedError()
def is_matching(self, name, default_namespace=None, **kwargs):
"""
Returns `True` if the component name is matching the name provided as argument,
`False` otherwise. For XSD elements the matching is extended to substitutes.
:param name: a local or fully-qualified name.
:param default_namespace: used if it's not None and not empty for completing \
the name argument in case it's a local name.
:param kwargs: additional options that can be used by certain components.
"""
if not name:
return self.name == name
elif name[0] == '{':
return self.qualified_name == name
elif not default_namespace:
return self.name == name or not self.qualified and self.local_name == name
else:
qname = '{%s}%s' % (default_namespace, name)
return self.qualified_name == qname or not self.qualified and self.local_name == name
def match(self, name, default_namespace=None, **kwargs):
"""
Returns the component if its name is matching the name provided as argument,
`None` otherwise.
"""
return self if self.is_matching(name, default_namespace, **kwargs) else None
def get_global(self):
"""Returns the global XSD component that contains the component instance."""
if self.parent is None:
return self
component = self.parent
while component is not self:
if component.parent is None:
return component
component = component.parent
def get_parent_type(self):
"""
Returns the nearest XSD type that contains the component instance,
or `None` if the component doesn't have an XSD type parent.
"""
component = self.parent
while component is not self and component is not None:
if isinstance(component, XsdType):
return component
component = component.parent
def iter_components(self, xsd_classes=None):
"""
Creates an iterator for XSD subcomponents.
:param xsd_classes: provide a class or a tuple of classes to iterates over only a \
specific classes of components.
"""
if xsd_classes is None or isinstance(self, xsd_classes):
yield self
def iter_ancestors(self, xsd_classes=None):
"""
Creates an iterator for XSD ancestor components, schema excluded. Stops when the component
is global or if the ancestor is not an instance of the specified class/classes.
:param xsd_classes: provide a class or a tuple of classes to iterates over only a \
specific classes of components.
"""
ancestor = self
while True:
ancestor = ancestor.parent
if ancestor is None:
break
elif xsd_classes is None or isinstance(ancestor, xsd_classes):
yield ancestor
else:
break
def tostring(self, indent='', max_lines=None, spaces_for_tab=4):
"""Serializes the XML elements that declare or define the component to a string."""
return etree_tostring(self.schema_elem, self.namespaces, indent, max_lines, spaces_for_tab)
class XsdAnnotation(XsdComponent):
"""
Class for XSD *annotation* definitions.
:ivar appinfo: a list containing the xs:appinfo children.
:ivar documentation: a list containing the xs:documentation children.
.. <annotation
id = ID
{any attributes with non-schema namespace . . .}>
Content: (appinfo | documentation)*
</annotation>
.. <appinfo
source = anyURI
{any attributes with non-schema namespace . . .}>
Content: ({any})*
</appinfo>
.. <documentation
source = anyURI
xml:lang = language
{any attributes with non-schema namespace . . .}>
Content: ({any})*
</documentation>
"""
_ADMITTED_TAGS = {XSD_ANNOTATION}
@property
def built(self):
return True
def _parse(self):
del self.errors[:]
self.appinfo = []
self.documentation = []
for child in self.elem:
if child.tag == XSD_APPINFO:
for key in child.attrib:
if key != 'source':
self.parse_error("wrong attribute %r for appinfo declaration." % key)
self.appinfo.append(child)
elif child.tag == XSD_DOCUMENTATION:
for key in child.attrib:
if key not in ['source', XML_LANG]:
self.parse_error("wrong attribute %r for documentation declaration." % key)
self.documentation.append(child)
class XsdType(XsdComponent):
"""Common base class for XSD types."""
abstract = False
block = None
base_type = None
derivation = None
redefine = None
_final = None
@property
def final(self):
return self.schema.final_default if self._final is None else self._final
@property
def built(self):
raise NotImplementedError()
@property
def content_type_label(self):
if self.is_empty():
return 'empty'
elif self.has_simple_content():
return 'simple'
elif self.is_element_only():
return 'element-only'
elif self.has_mixed_content():
return 'mixed'
else:
return 'unknown'
@property
def root_type(self):
"""The root type of the type definition hierarchy. Is itself for a root type."""
if self.base_type is None:
return self # Note that a XsdUnion type is always considered a root type
try:
if self.base_type.is_simple():
return self.base_type.primitive_type
else:
return self.base_type.content_type.primitive_type
except AttributeError:
# The type has complex or XsdList content
return self.base_type
@staticmethod
def is_simple():
"""Returns `True` if the instance is a simpleType, `False` otherwise."""
raise NotImplementedError()
@staticmethod
def is_complex():
"""Returns `True` if the instance is a complexType, `False` otherwise."""
raise NotImplementedError()
@staticmethod
def is_atomic():
"""Returns `True` if the instance is an atomic simpleType, `False` otherwise."""
return False
@staticmethod
def is_list():
"""Returns `True` if the instance is a list simpleType, `False` otherwise."""
return False
@staticmethod
def is_datetime():
"""
Returns `True` if the instance is a datetime/duration XSD builtin-type, `False` otherwise.
"""
return False
def is_empty(self):
"""Returns `True` if the instance has an empty value or content, `False` otherwise."""
raise NotImplementedError()
def is_emptiable(self):
"""Returns `True` if the instance has an emptiable value or content, `False` otherwise."""
raise NotImplementedError()
def has_simple_content(self):
"""
Returns `True` if the instance is a simpleType or a complexType with simple
content, `False` otherwise.
"""
raise NotImplementedError()
def has_complex_content(self):
"""
Returns `True` if the instance is a complexType with mixed or element-only
content, `False` otherwise.
"""
raise NotImplementedError()
def has_mixed_content(self):
"""
Returns `True` if the instance is a complexType with mixed content, `False` otherwise.
"""
raise NotImplementedError()
def is_element_only(self):
"""
Returns `True` if the instance is a complexType with element-only content,
`False` otherwise.
"""
raise NotImplementedError()
def is_derived(self, other, derivation=None):
raise NotImplementedError()
def is_extension(self):
return self.derivation == 'extension'
def is_restriction(self):
return self.derivation == 'restriction'
def is_blocked(self, xsd_element):
"""
Returns `True` if the base type derivation is blocked, `False` otherwise.
"""
xsd_type = xsd_element.type
if self is xsd_type:
return False
block = ('%s %s' % (xsd_element.block, xsd_type.block)).strip()
if not block:
return False
block = {x for x in block.split() if x in ('extension', 'restriction')}
return any(self.is_derived(xsd_type, derivation) for derivation in block)
def is_dynamic_consistent(self, other):
return other.name == XSD_ANY_TYPE or self.is_derived(other) or \
hasattr(other, 'member_types') and any(self.is_derived(mt) for mt in other.member_types)
def is_key(self):
return self.name == XSD_ID or self.is_derived(self.maps.types[XSD_ID])
def is_qname(self):
return self.name == XSD_QNAME or self.is_derived(self.maps.types[XSD_QNAME])
def text_decode(self, text):
raise NotImplementedError()
class ValidationMixin(object):
"""
Mixin for implementing XML data validators/decoders. A derived class must implement the
methods `iter_decode` and `iter_encode`.
"""
def validate(self, source, use_defaults=True, namespaces=None):
"""
Validates an XML data against the XSD schema/component instance.
:param source: the source of XML data. For a schema can be a path \
to a file or an URI of a resource or an opened file-like object or an Element Tree \
instance or a string containing XML data. For other XSD components can be a string \
for an attribute or a simple type validators, or an ElementTree's Element otherwise.
:param use_defaults: indicates whether to use default values for filling missing data.
:param namespaces: is an optional mapping from namespace prefix to URI.
:raises: :exc:`XMLSchemaValidationError` if XML *data* instance is not a valid.
"""
for error in self.iter_errors(source, use_defaults=use_defaults, namespaces=namespaces):
raise error
def is_valid(self, source, use_defaults=True, namespaces=None):
"""
Like :meth:`validate` except that do not raises an exception but returns ``True`` if
the XML document is valid, ``False`` if it's invalid.
:param source: the source of XML data. For a schema can be a path \
to a file or an URI of a resource or an opened file-like object or an Element Tree \
instance or a string containing XML data. For other XSD components can be a string \
for an attribute or a simple type validators, or an ElementTree's Element otherwise.
:param use_defaults: indicates whether to use default values for filling missing data.
:param namespaces: is an optional mapping from namespace prefix to URI.
"""
return next(
self.iter_errors(source, use_defaults=use_defaults, namespaces=namespaces), None
) is None
def iter_errors(self, source, use_defaults=True, namespaces=None):
"""
Creates an iterator for the errors generated by the validation of an XML data
against the XSD schema/component instance.
:param source: the source of XML data. For a schema can be a path \
to a file or an URI of a resource or an opened file-like object or an Element Tree \
instance or a string containing XML data. For other XSD components can be a string \
for an attribute or a simple type validators, or an ElementTree's Element otherwise.
:param use_defaults: Use schema's default values for filling missing data.
:param namespaces: is an optional mapping from namespace prefix to URI.
"""
for result in self.iter_decode(source, use_defaults=use_defaults, namespaces=namespaces):
if isinstance(result, XMLSchemaValidationError):
yield result
else:
del result
def decode(self, source, validation='strict', **kwargs):
"""
Decodes XML data.
:param source: the XML data. Can be a string for an attribute or for a simple \
type components or a dictionary for an attribute group or an ElementTree's \
Element for other components.
:param validation: the validation mode. Can be 'lax', 'strict' or 'skip.
:param kwargs: optional keyword arguments for the method :func:`iter_decode`.
:return: a dictionary like object if the XSD component is an element, a \
group or a complex type; a list if the XSD component is an attribute group; \
a simple data type object otherwise. If *validation* argument is 'lax' a 2-items \
tuple is returned, where the first item is the decoded object and the second item \
is a list containing the errors.
:raises: :exc:`XMLSchemaValidationError` if the object is not decodable by \
the XSD component, or also if it's invalid when ``validation='strict'`` is provided.
"""
check_validation_mode(validation)
result, errors = None, []
for result in self.iter_decode(source, validation, **kwargs):
if not isinstance(result, XMLSchemaValidationError):
break
elif validation == 'strict':
raise result
elif validation == 'lax':
errors.append(result)
return (result, errors) if validation == 'lax' else result
def encode(self, obj, validation='strict', **kwargs):
"""
Encodes data to XML.
:param obj: the data to be encoded to XML.
:param validation: the validation mode. Can be 'lax', 'strict' or 'skip.
:param kwargs: optional keyword arguments for the method :func:`iter_encode`.
:return: An element tree's Element if the original data is a structured data or \
a string if it's simple type datum. If *validation* argument is 'lax' a 2-items \
tuple is returned, where the first item is the encoded object and the second item \
is a list containing the errors.
:raises: :exc:`XMLSchemaValidationError` if the object is not encodable by the XSD \
component, or also if it's invalid when ``validation='strict'`` is provided.
"""
check_validation_mode(validation)
result, errors = None, []
for result in self.iter_encode(obj, validation=validation, **kwargs):
if not isinstance(result, XMLSchemaValidationError):
break
elif validation == 'strict':
raise result
elif validation == 'lax':
errors.append(result)
return (result, errors) if validation == 'lax' else result
def iter_decode(self, source, validation='lax', **kwargs):
"""
Creates an iterator for decoding an XML source to a Python object.
:param source: the XML data source.
:param validation: the validation mode. Can be 'lax', 'strict' or 'skip.
:param kwargs: keyword arguments for the decoder API.
:return: Yields a decoded object, eventually preceded by a sequence of \
validation or decoding errors.
"""
raise NotImplementedError()
def iter_encode(self, obj, validation='lax', **kwargs):
"""
Creates an iterator for Encode data to an Element.
:param obj: The data that has to be encoded.
:param validation: The validation mode. Can be 'lax', 'strict' or 'skip'.
:param kwargs: keyword arguments for the encoder API.
:return: Yields an Element, eventually preceded by a sequence of validation \
or encoding errors.
"""
raise NotImplementedError()
def validation_error(self, validation, error, obj=None,
source=None, namespaces=None, **_kwargs):
"""
Helper method for generating and updating validation errors. If validation
mode is 'lax' or 'skip' returns the error, otherwise raises the error.
:param validation: an error-compatible validation mode: can be 'lax' or 'strict'.
:param error: an error instance or the detailed reason of failed validation.
:param obj: the instance related to the error.
:param source: the XML resource related to the validation process.
:param namespaces: is an optional mapping from namespace prefix to URI.
:param _kwargs: keyword arguments of the validation process that are not used.
"""
check_validation_mode(validation)
if isinstance(error, XMLSchemaValidationError):
if error.namespaces is None and namespaces is not None:
error.namespaces = namespaces
if error.source is None and source is not None:
error.source = source
if error.obj is None and obj is not None:
error.obj = obj
if error.elem is None and is_etree_element(obj):
error.elem = obj
elif isinstance(error, Exception):
error = XMLSchemaValidationError(self, obj, str(error), source, namespaces)
else:
error = XMLSchemaValidationError(self, obj, error, source, namespaces)
if validation == 'strict' and error.elem is not None:
raise error
return error
class ParticleMixin(object):
"""
Mixin for objects related to XSD Particle Schema Components:
https://www.w3.org/TR/2012/REC-xmlschema11-1-20120405/structures.html#p
https://www.w3.org/TR/2012/REC-xmlschema11-1-20120405/structures.html#t
:ivar min_occurs: the minOccurs property of the XSD particle. Defaults to 1.
:ivar max_occurs: the maxOccurs property of the XSD particle. Defaults to 1, \
a `None` value means 'unbounded'.
"""
min_occurs = 1
max_occurs = 1
@property
def occurs(self):
return [self.min_occurs, self.max_occurs]
@property
def effective_min_occurs(self):
return self.min_occurs
@property
def effective_max_occurs(self):
return self.max_occurs
def is_emptiable(self):
"""
Tests if max_occurs == 0. A zero-length model group is considered emptiable.
For model groups the test outcome depends also on nested particles.
"""
return self.min_occurs == 0
def is_empty(self):
"""
Tests if max_occurs == 0. A zero-length model group is considered empty.
"""
return self.max_occurs == 0
def is_single(self):
"""
Tests if the particle has max_occurs == 1. For elements the test
outcome depends also on parent group. For model groups the test
outcome depends also on nested model groups.
"""
return self.max_occurs == 1
def is_multiple(self):
"""Tests the particle can have multiple occurrences."""
return not self.is_empty() and not self.is_single()
def is_ambiguous(self):
"""Tests if min_occurs != max_occurs."""
return self.min_occurs != self.max_occurs
def is_univocal(self):
"""Tests if min_occurs == max_occurs."""
return self.min_occurs == self.max_occurs
def is_missing(self, occurs):
"""Tests if provided occurrences are under the minimum."""
return not self.is_emptiable() if occurs == 0 else self.min_occurs > occurs
def is_over(self, occurs):
"""Tests if provided occurrences are over the maximum."""
return self.max_occurs is not None and self.max_occurs <= occurs
def has_occurs_restriction(self, other):
if self.min_occurs == self.max_occurs == 0:
return True
elif self.min_occurs < other.min_occurs:
return False
elif other.max_occurs is None:
return True
elif self.max_occurs is None:
return False
else:
return self.max_occurs <= other.max_occurs
def parse_error(self, message):
raise XMLSchemaParseError(self, message)
def _parse_particle(self, elem):
if 'minOccurs' in elem.attrib:
try:
min_occurs = int(elem.attrib['minOccurs'])
except (TypeError, ValueError):
self.parse_error("minOccurs value is not an integer value")
else:
if min_occurs < 0:
self.parse_error("minOccurs value must be a non negative integer")
else:
self.min_occurs = min_occurs
max_occurs = elem.get('maxOccurs')
if max_occurs is None:
if self.min_occurs > 1:
self.parse_error("minOccurs must be lesser or equal than maxOccurs")
elif max_occurs == 'unbounded':
self.max_occurs = None
else:
try:
max_occurs = int(max_occurs)
except ValueError:
self.parse_error("maxOccurs value must be a non negative integer or 'unbounded'")
else:
if self.min_occurs > max_occurs:
self.parse_error("maxOccurs must be 'unbounded' or greater than minOccurs")
else:
self.max_occurs = max_occurs
| 38.389662 | 100 | 0.619601 |
79493cd74be98a5d06437ed16af4e407b98bc5c1
| 180,543 |
py
|
Python
|
7.32.0.dev0/ietf/meeting/views.py
|
kesara/ietf-datatracker
|
dca3ee2ee98bcb75a10687587cf631750be34c79
|
[
"Unlicense"
] | null | null | null |
7.32.0.dev0/ietf/meeting/views.py
|
kesara/ietf-datatracker
|
dca3ee2ee98bcb75a10687587cf631750be34c79
|
[
"Unlicense"
] | null | null | null |
7.32.0.dev0/ietf/meeting/views.py
|
kesara/ietf-datatracker
|
dca3ee2ee98bcb75a10687587cf631750be34c79
|
[
"Unlicense"
] | null | null | null |
# Copyright The IETF Trust 2007-2020, All Rights Reserved
# -*- coding: utf-8 -*-
import csv
import datetime
import glob
import io
import itertools
import json
import math
import os
import pytz
import re
import tarfile
import tempfile
import markdown2
from calendar import timegm
from collections import OrderedDict, Counter, deque, defaultdict
from urllib.parse import unquote
from tempfile import mkstemp
from wsgiref.handlers import format_date_time
from django import forms
from django.shortcuts import render, redirect, get_object_or_404
from django.http import (HttpResponse, HttpResponseRedirect, HttpResponseForbidden,
HttpResponseNotFound, Http404, HttpResponseBadRequest,
JsonResponse)
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ValidationError
from django.core.validators import URLValidator
from django.urls import reverse,reverse_lazy
from django.db.models import F, Min, Max, Q
from django.forms.models import modelform_factory, inlineformset_factory
from django.template import TemplateDoesNotExist
from django.template.loader import render_to_string
from django.utils.encoding import force_str
from django.utils.functional import curry
from django.utils.text import slugify
from django.views.decorators.cache import cache_page
from django.utils.html import format_html
from django.views.decorators.csrf import ensure_csrf_cookie, csrf_exempt
from django.views.generic import RedirectView
import debug # pyflakes:ignore
from ietf.doc.fields import SearchableDocumentsField
from ietf.doc.models import Document, State, DocEvent, NewRevisionDocEvent, DocAlias
from ietf.group.models import Group
from ietf.group.utils import can_manage_session_materials, can_manage_some_groups, can_manage_group
from ietf.person.models import Person
from ietf.ietfauth.utils import role_required, has_role, user_is_person
from ietf.mailtrigger.utils import gather_address_lists
from ietf.meeting.models import Meeting, Session, Schedule, FloorPlan, SessionPresentation, TimeSlot, SlideSubmission
from ietf.meeting.models import SessionStatusName, SchedulingEvent, SchedTimeSessAssignment, Room, TimeSlotTypeName
from ietf.meeting.forms import CustomDurationField
from ietf.meeting.helpers import get_areas, get_person_by_email, get_schedule_by_name
from ietf.meeting.helpers import build_all_agenda_slices, get_wg_name_list
from ietf.meeting.helpers import get_all_assignments_from_schedule
from ietf.meeting.helpers import get_modified_from_assignments
from ietf.meeting.helpers import get_wg_list, find_ads_for_meeting
from ietf.meeting.helpers import get_meeting, get_ietf_meeting, get_current_ietf_meeting_num
from ietf.meeting.helpers import get_schedule, schedule_permissions, is_regular_agenda_filter_group
from ietf.meeting.helpers import preprocess_assignments_for_agenda, read_agenda_file
from ietf.meeting.helpers import filter_keywords_for_session, tag_assignments_with_filter_keywords
from ietf.meeting.helpers import convert_draft_to_pdf, get_earliest_session_date
from ietf.meeting.helpers import can_view_interim_request, can_approve_interim_request
from ietf.meeting.helpers import can_edit_interim_request
from ietf.meeting.helpers import can_request_interim_meeting, get_announcement_initial
from ietf.meeting.helpers import sessions_post_save, is_interim_meeting_approved
from ietf.meeting.helpers import send_interim_meeting_cancellation_notice, send_interim_session_cancellation_notice
from ietf.meeting.helpers import send_interim_approval
from ietf.meeting.helpers import send_interim_approval_request
from ietf.meeting.helpers import send_interim_announcement_request
from ietf.meeting.utils import finalize, sort_accept_tuple, condition_slide_order
from ietf.meeting.utils import add_event_info_to_session_qs
from ietf.meeting.utils import session_time_for_sorting
from ietf.meeting.utils import session_requested_by
from ietf.meeting.utils import current_session_status
from ietf.meeting.utils import data_for_meetings_overview
from ietf.meeting.utils import preprocess_constraints_for_meeting_schedule_editor
from ietf.meeting.utils import diff_meeting_schedules, prefetch_schedule_diff_objects
from ietf.meeting.utils import swap_meeting_schedule_timeslot_assignments
from ietf.meeting.utils import preprocess_meeting_important_dates
from ietf.message.utils import infer_message
from ietf.name.models import SlideSubmissionStatusName
from ietf.secr.proceedings.utils import handle_upload_file
from ietf.secr.proceedings.proc_utils import (get_progress_stats, post_process, import_audio_files,
create_recording)
from ietf.utils.decorators import require_api_key
from ietf.utils.history import find_history_replacements_active_at
from ietf.utils.log import assertion
from ietf.utils.mail import send_mail_message, send_mail_text
from ietf.utils.mime import get_mime_type
from ietf.utils.pipe import pipe
from ietf.utils.pdf import pdf_pages
from ietf.utils.response import permission_denied
from ietf.utils.text import xslugify
from .forms import (InterimMeetingModelForm, InterimAnnounceForm, InterimSessionModelForm,
InterimCancelForm, InterimSessionInlineFormSet, FileUploadForm, RequestMinutesForm,)
def get_interim_menu_entries(request):
'''Setup menu entries for interim meeting view tabs'''
entries = []
entries.append(("Upcoming", reverse("ietf.meeting.views.upcoming")))
entries.append(("Pending", reverse("ietf.meeting.views.interim_pending")))
entries.append(("Announce", reverse("ietf.meeting.views.interim_announce")))
return entries
def send_interim_change_notice(request, meeting):
"""Sends an email notifying changes to a previously scheduled / announced meeting"""
group = meeting.session_set.first().group
form = InterimAnnounceForm(get_announcement_initial(meeting, is_change=True))
message = form.save(user=request.user)
message.related_groups.add(group)
send_mail_message(request, message)
# -------------------------------------------------
# View Functions
# -------------------------------------------------
def materials(request, num=None):
meeting = get_meeting(num)
begin_date = meeting.get_submission_start_date()
cut_off_date = meeting.get_submission_cut_off_date()
cor_cut_off_date = meeting.get_submission_correction_date()
now = datetime.date.today()
old = datetime.datetime.now() - datetime.timedelta(days=1)
if settings.SERVER_MODE != 'production' and '_testoverride' in request.GET:
pass
elif now > cor_cut_off_date:
if meeting.number.isdigit() and int(meeting.number) > 96:
return redirect('ietf.meeting.views.proceedings', num=meeting.number)
else:
return render(request, "meeting/materials_upload_closed.html", {
'meeting_num': meeting.number,
'begin_date': begin_date,
'cut_off_date': cut_off_date,
'cor_cut_off_date': cor_cut_off_date
})
past_cutoff_date = datetime.date.today() > meeting.get_submission_correction_date()
schedule = get_schedule(meeting, None)
sessions = add_event_info_to_session_qs(Session.objects.filter(
meeting__number=meeting.number,
timeslotassignments__schedule__in=[schedule, schedule.base if schedule else None]
).distinct().select_related('meeting__schedule', 'group__state', 'group__parent')).order_by('group__acronym')
plenaries = sessions.filter(name__icontains='plenary')
ietf = sessions.filter(group__parent__type__slug = 'area').exclude(group__acronym='edu')
irtf = sessions.filter(group__parent__acronym = 'irtf')
training = sessions.filter(group__acronym__in=['edu','iaoc'], type_id__in=['regular', 'other', ])
iab = sessions.filter(group__parent__acronym = 'iab')
session_pks = [s.pk for ss in [plenaries, ietf, irtf, training, iab] for s in ss]
other = sessions.filter(type__in=['regular'], group__type__features__has_meetings=True).exclude(pk__in=session_pks)
for topic in [plenaries, ietf, training, irtf, iab]:
for event in topic:
date_list = []
for slide_event in event.all_meeting_slides(): date_list.append(slide_event.time)
for agenda_event in event.all_meeting_agendas(): date_list.append(agenda_event.time)
if date_list: setattr(event, 'last_update', sorted(date_list, reverse=True)[0])
for session_list in [plenaries, ietf, training, irtf, iab, other]:
for session in session_list:
session.past_cutoff_date = past_cutoff_date
return render(request, "meeting/materials.html", {
'meeting': meeting,
'plenaries': plenaries,
'ietf': ietf,
'training': training,
'irtf': irtf,
'iab': iab,
'other': other,
'cut_off_date': cut_off_date,
'cor_cut_off_date': cor_cut_off_date,
'submission_started': now > begin_date,
'old': old,
})
def current_materials(request):
today = datetime.date.today()
meetings = Meeting.objects.exclude(number__startswith='interim-').filter(date__lte=today).order_by('-date')
if meetings:
return redirect(materials, meetings[0].number)
else:
raise Http404('No such meeting')
@cache_page(1 * 60)
def materials_document(request, document, num=None, ext=None):
meeting=get_meeting(num,type_in=['ietf','interim'])
num = meeting.number
if (re.search(r'^\w+-\d+-.+-\d\d$', document) or
re.search(r'^\w+-interim-\d+-.+-\d\d-\d\d$', document) or
re.search(r'^\w+-interim-\d+-.+-sess[a-z]-\d\d$', document) or
re.search(r'^minutes-interim-\d+-.+-\d\d$', document) or
re.search(r'^slides-interim-\d+-.+-\d\d$', document)):
name, rev = document.rsplit('-', 1)
else:
name, rev = document, None
# This view does not allow the use of DocAliases. Right now we are probably only creating one (identity) alias, but that may not hold in the future.
doc = Document.objects.filter(name=name).first()
# Handle edge case where the above name, rev splitter misidentifies the end of a document name as a revision mumber
if not doc:
if rev:
name = name + '-' + rev
rev = None
doc = get_object_or_404(Document, name=name)
else:
raise Http404("No such document")
if not doc.meeting_related():
raise Http404("Not a meeting related document")
if not doc.session_set.filter(meeting__number=num).exists():
raise Http404("No such document for meeting %s" % num)
if not rev:
filename = doc.get_file_name()
else:
filename = os.path.join(doc.get_file_path(), document)
if ext:
if not filename.endswith(ext):
name, _ = os.path.splitext(filename)
filename = name + ext
else:
filenames = glob.glob(filename+'.*')
if filenames:
filename = filenames[0]
_, basename = os.path.split(filename)
if not os.path.exists(filename):
raise Http404("File not found: %s" % filename)
old_proceedings_format = meeting.number.isdigit() and int(meeting.number) <= 96
if settings.MEETING_MATERIALS_SERVE_LOCALLY or old_proceedings_format:
with io.open(filename, 'rb') as file:
bytes = file.read()
mtype, chset = get_mime_type(bytes)
content_type = "%s; charset=%s" % (mtype, chset)
file_ext = os.path.splitext(filename)
if len(file_ext) == 2 and file_ext[1] == '.md' and mtype == 'text/plain':
sorted_accept = sort_accept_tuple(request.META.get('HTTP_ACCEPT'))
for atype in sorted_accept:
if atype[0] == 'text/markdown':
content_type = content_type.replace('plain', 'markdown', 1)
break;
elif atype[0] == 'text/html':
bytes = "<html>\n<head></head>\n<body>\n%s\n</body>\n</html>\n" % markdown2.markdown(bytes)
content_type = content_type.replace('plain', 'html', 1)
break;
elif atype[0] == 'text/plain':
break;
response = HttpResponse(bytes, content_type=content_type)
response['Content-Disposition'] = 'inline; filename="%s"' % basename
return response
else:
return HttpResponseRedirect(redirect_to=doc.get_href(meeting=meeting))
@login_required
def materials_editable_groups(request, num=None):
meeting = get_meeting(num)
return render(request, "meeting/materials_editable_groups.html", {
'meeting_num': meeting.number})
def ascii_alphanumeric(string):
return re.match(r'^[a-zA-Z0-9]*$', string)
class SaveAsForm(forms.Form):
savename = forms.CharField(max_length=16)
@role_required('Area Director','Secretariat')
def schedule_create(request, num=None, owner=None, name=None):
meeting = get_meeting(num)
person = get_person_by_email(owner)
schedule = get_schedule_by_name(meeting, person, name)
if schedule is None:
# here we have to return some ajax to display an error.
messages.error("Error: No meeting information for meeting %s owner %s schedule %s available" % (num, owner, name)) # pylint: disable=no-value-for-parameter
return redirect(edit_schedule, num=num, owner=owner, name=name)
# authorization was enforced by the @group_require decorator above.
saveasform = SaveAsForm(request.POST)
if not saveasform.is_valid():
messages.info(request, "This name is not valid. Please choose another one.")
return redirect(edit_schedule, num=num, owner=owner, name=name)
savedname = saveasform.cleaned_data['savename']
if not ascii_alphanumeric(savedname):
messages.info(request, "This name contains illegal characters. Please choose another one.")
return redirect(edit_schedule, num=num, owner=owner, name=name)
# create the new schedule, and copy the assignments
try:
sched = meeting.schedule_set.get(name=savedname, owner=request.user.person)
if sched:
return redirect(edit_schedule, num=meeting.number, owner=sched.owner_email(), name=sched.name)
else:
messages.info(request, "Schedule creation failed. Please try again.")
return redirect(edit_schedule, num=num, owner=owner, name=name)
except Schedule.DoesNotExist:
pass
# must be done
newschedule = Schedule(name=savedname,
owner=request.user.person,
meeting=meeting,
base=schedule.base,
origin=schedule,
visible=False,
public=False)
newschedule.save()
if newschedule is None:
return HttpResponse(status=500)
# keep a mapping so that extendedfrom references can be chased.
mapping = {};
for ss in schedule.assignments.all():
# hack to copy the object, creating a new one
# just reset the key, and save it again.
oldid = ss.pk
ss.pk = None
ss.schedule=newschedule
ss.save()
mapping[oldid] = ss.pk
#print "Copying %u to %u" % (oldid, ss.pk)
# now fix up any extendedfrom references to new set.
for ss in newschedule.assignments.all():
if ss.extendedfrom is not None:
oldid = ss.extendedfrom.id
newid = mapping[oldid]
#print "Fixing %u to %u" % (oldid, newid)
ss.extendedfrom = newschedule.assignments.get(pk = newid)
ss.save()
# now redirect to this new schedule.
return redirect(edit_schedule, meeting.number, newschedule.owner_email(), newschedule.name)
@role_required('Secretariat')
def edit_timeslots(request, num=None):
meeting = get_meeting(num)
time_slices,date_slices,slots = meeting.build_timeslices()
ts_list = deque()
rooms = meeting.room_set.order_by("capacity","name","id")
for room in rooms:
for day in time_slices:
for slice in date_slices[day]:
ts_list.append(room.timeslot_set.filter(time=slice[0],duration=datetime.timedelta(seconds=slice[2])).first())
return render(request, "meeting/timeslot_edit.html",
{"rooms":rooms,
"time_slices":time_slices,
"slot_slices": slots,
"date_slices":date_slices,
"meeting":meeting,
"ts_list":ts_list,
})
class NewScheduleForm(forms.ModelForm):
class Meta:
model = Schedule
fields = ['name', 'visible', 'public', 'notes', 'base']
def __init__(self, meeting, schedule, new_owner, *args, **kwargs):
super().__init__(*args, **kwargs)
self.meeting = meeting
self.schedule = schedule
self.new_owner = new_owner
username = new_owner.user.username
name_suggestion = username
counter = 2
existing_names = set(Schedule.objects.filter(meeting=meeting, owner=new_owner).values_list('name', flat=True))
while name_suggestion in existing_names:
name_suggestion = username + str(counter)
counter += 1
self.fields['name'].initial = name_suggestion
self.fields['name'].label = "Name of new agenda"
self.fields['base'].queryset = self.fields['base'].queryset.filter(meeting=meeting)
if schedule:
self.fields['visible'].initial = schedule.visible
self.fields['public'].initial = schedule.public
self.fields['base'].queryset = self.fields['base'].queryset.exclude(pk=schedule.pk)
self.fields['base'].initial = schedule.base_id
else:
base = Schedule.objects.filter(meeting=meeting, name='base').first()
if base:
self.fields['base'].initial = base.pk
def clean_name(self):
name = self.cleaned_data.get('name')
if name and Schedule.objects.filter(meeting=self.meeting, owner=self.new_owner, name=name):
raise forms.ValidationError("Schedule with this name already exists.")
return name
@role_required('Area Director','Secretariat')
def new_meeting_schedule(request, num, owner=None, name=None):
meeting = get_meeting(num)
schedule = get_schedule_by_name(meeting, get_person_by_email(owner), name)
if request.method == 'POST':
form = NewScheduleForm(meeting, schedule, request.user.person, request.POST)
if form.is_valid():
new_schedule = form.save(commit=False)
new_schedule.meeting = meeting
new_schedule.owner = request.user.person
new_schedule.origin = schedule
new_schedule.save()
if schedule:
for assignment in schedule.assignments.all():
# clone by resetting primary key
assignment.pk = None
assignment.schedule = new_schedule
assignment.extendedfrom = None
assignment.save()
# now redirect to this new schedule
return redirect(edit_meeting_schedule, meeting.number, new_schedule.owner_email(), new_schedule.name)
else:
form = NewScheduleForm(meeting, schedule, request.user.person)
return render(request, "meeting/new_meeting_schedule.html", {
'meeting': meeting,
'schedule': schedule,
'form': form,
})
class SwapDaysForm(forms.Form):
source_day = forms.DateField(required=True)
target_day = forms.DateField(required=True)
@ensure_csrf_cookie
def edit_meeting_schedule(request, num=None, owner=None, name=None):
meeting = get_meeting(num)
if name is None:
schedule = meeting.schedule
else:
schedule = get_schedule_by_name(meeting, get_person_by_email(owner), name)
if schedule is None:
raise Http404("No meeting information for meeting %s owner %s schedule %s available" % (num, owner, name))
can_see, can_edit, secretariat = schedule_permissions(meeting, schedule, request.user)
if not can_see:
if request.method == 'POST':
permission_denied(request, "Can't view this schedule.")
return render(request, "meeting/private_schedule.html", {
"schedule":schedule,
"meeting": meeting,
"meeting_base_url": request.build_absolute_uri(meeting.base_url()),
"hide_menu": True
}, status=403, content_type="text/html")
assignments = SchedTimeSessAssignment.objects.filter(
schedule__in=[schedule, schedule.base],
timeslot__location__isnull=False,
session__type='regular',
).order_by('timeslot__time','timeslot__name')
assignments_by_session = defaultdict(list)
for a in assignments:
assignments_by_session[a.session_id].append(a)
rooms = meeting.room_set.filter(session_types__slug='regular').distinct().order_by("capacity")
tombstone_states = ['canceled', 'canceledpa', 'resched']
sessions = add_event_info_to_session_qs(
Session.objects.filter(
meeting=meeting,
type='regular',
).order_by('pk'),
requested_time=True,
requested_by=True,
).filter(
Q(current_status__in=['appr', 'schedw', 'scheda', 'sched'])
| Q(current_status__in=tombstone_states, pk__in={a.session_id for a in assignments})
).prefetch_related(
'resources', 'group', 'group__parent', 'group__type', 'joint_with_groups',
)
timeslots_qs = TimeSlot.objects.filter(meeting=meeting, type='regular').prefetch_related('type').order_by('location', 'time', 'name')
min_duration = min(t.duration for t in timeslots_qs)
max_duration = max(t.duration for t in timeslots_qs)
def timedelta_to_css_ems(timedelta):
# we scale the session and slots a bit according to their
# length for an added visual clue
capped_min_d = max(min_duration, datetime.timedelta(minutes=30))
capped_max_d = min(max_duration, datetime.timedelta(hours=4))
capped_timedelta = min(max(capped_min_d, timedelta), capped_max_d)
min_d_css_rems = 8
max_d_css_rems = 10
# interpolate
scale = (capped_timedelta - capped_min_d) / (capped_max_d - capped_min_d) if capped_min_d != capped_max_d else 1
return min_d_css_rems + (max_d_css_rems - min_d_css_rems) * scale
def prepare_sessions_for_display(sessions):
# requesters
requested_by_lookup = {p.pk: p for p in Person.objects.filter(pk__in=set(s.requested_by for s in sessions if s.requested_by))}
# constraints
constraints_for_sessions, formatted_constraints_for_sessions, constraint_names = preprocess_constraints_for_meeting_schedule_editor(meeting, sessions)
sessions_for_group = defaultdict(list)
for s in sessions:
sessions_for_group[s.group_id].append(s)
for s in sessions:
s.requested_by_person = requested_by_lookup.get(s.requested_by)
s.scheduling_label = "???"
if s.group:
s.scheduling_label = s.group.acronym
elif s.name:
s.scheduling_label = s.name
s.requested_duration_in_hours = round(s.requested_duration.seconds / 60.0 / 60.0, 1)
session_layout_margin = 0.2
s.layout_width = timedelta_to_css_ems(s.requested_duration) - 2 * session_layout_margin
s.parent_acronym = s.group.parent.acronym if s.group and s.group.parent else ""
# compress the constraints, so similar constraint labels are
# shared between the conflicting sessions they cover - the JS
# then simply has to detect violations and show the
# preprocessed labels
constrained_sessions_grouped_by_label = defaultdict(set)
for name_id, ts in itertools.groupby(sorted(constraints_for_sessions.get(s.pk, [])), key=lambda t: t[0]):
ts = list(ts)
session_pks = (t[1] for t in ts)
constraint_name = constraint_names[name_id]
if "{count}" in constraint_name.formatted_editor_label:
for session_pk, grouped_session_pks in itertools.groupby(session_pks):
count = sum(1 for i in grouped_session_pks)
constrained_sessions_grouped_by_label[format_html(constraint_name.formatted_editor_label, count=count)].add(session_pk)
else:
constrained_sessions_grouped_by_label[constraint_name.formatted_editor_label].update(session_pks)
s.constrained_sessions = list(constrained_sessions_grouped_by_label.items())
s.formatted_constraints = formatted_constraints_for_sessions.get(s.pk, {})
s.other_sessions = [s_other for s_other in sessions_for_group.get(s.group_id) if s != s_other]
s.readonly = s.current_status in tombstone_states or any(a.schedule_id != schedule.pk for a in assignments_by_session.get(s.pk, []))
if request.method == 'POST':
if not can_edit:
permission_denied(request, "Can't edit this schedule.")
action = request.POST.get('action')
# handle ajax requests
if action == 'assign' and request.POST.get('session', '').isdigit() and request.POST.get('timeslot', '').isdigit():
session = get_object_or_404(sessions, pk=request.POST['session'])
timeslot = get_object_or_404(timeslots_qs, pk=request.POST['timeslot'])
tombstone_session = None
existing_assignments = SchedTimeSessAssignment.objects.filter(session=session, schedule=schedule)
if existing_assignments:
if schedule.pk == meeting.schedule_id and session.current_status == 'sched':
old_timeslot = existing_assignments[0].timeslot
# clone session and leave it as a tombstone
tombstone_session = session
tombstone_session.tombstone_for_id = session.pk
tombstone_session.pk = None
tombstone_session.save()
session = None
SchedulingEvent.objects.create(
session=tombstone_session,
status=SessionStatusName.objects.get(slug='resched'),
by=request.user.person,
)
tombstone_session.current_status = 'resched' # rematerialize status for the rendering
SchedTimeSessAssignment.objects.create(
session=tombstone_session,
schedule=schedule,
timeslot=old_timeslot,
)
existing_assignments.update(timeslot=timeslot, modified=datetime.datetime.now())
else:
SchedTimeSessAssignment.objects.create(
session=session,
schedule=schedule,
timeslot=timeslot,
)
r = {'success': True}
if tombstone_session:
prepare_sessions_for_display([tombstone_session])
r['tombstone'] = render_to_string("meeting/edit_meeting_schedule_session.html", {'session': tombstone_session})
return JsonResponse(r)
elif action == 'unassign' and request.POST.get('session', '').isdigit():
session = get_object_or_404(sessions, pk=request.POST['session'])
SchedTimeSessAssignment.objects.filter(session=session, schedule=schedule).delete()
return JsonResponse({'success': True})
elif action == 'swapdays':
# updating the client side is a bit complicated, so just
# do a full refresh
swap_days_form = SwapDaysForm(request.POST)
if not swap_days_form.is_valid():
return HttpResponse("Invalid swap: {}".format(swap_days_form.errors), status=400)
source_day = swap_days_form.cleaned_data['source_day']
target_day = swap_days_form.cleaned_data['target_day']
source_timeslots = [ts for ts in timeslots_qs if ts.time.date() == source_day]
target_timeslots = [ts for ts in timeslots_qs if ts.time.date() == target_day]
swap_meeting_schedule_timeslot_assignments(schedule, source_timeslots, target_timeslots, target_day - source_day)
return HttpResponseRedirect(request.get_full_path())
return HttpResponse("Invalid parameters", status=400)
# prepare timeslot layout
timeslots_by_room_and_day = defaultdict(list)
room_has_timeslots = set()
for t in timeslots_qs:
room_has_timeslots.add(t.location_id)
timeslots_by_room_and_day[(t.location_id, t.time.date())].append(t)
days = []
for day in sorted(set(t.time.date() for t in timeslots_qs)):
room_timeslots = []
for r in rooms:
if r.pk not in room_has_timeslots:
continue
timeslots = []
for t in timeslots_by_room_and_day.get((r.pk, day), []):
t.layout_width = timedelta_to_css_ems(t.end_time() - t.time)
timeslots.append(t)
room_timeslots.append((r, timeslots))
days.append({
'day': day,
'room_timeslots': room_timeslots,
})
room_labels = [[r for r in rooms if r.pk in room_has_timeslots] for i in range(len(days))]
# possible timeslot start/ends
timeslot_groups = defaultdict(set)
for ts in timeslots_qs:
ts.start_end_group = "ts-group-{}-{}".format(ts.time.strftime("%Y%m%d-%H%M"), int(ts.duration.total_seconds() / 60))
timeslot_groups[ts.time.date()].add((ts.time, ts.end_time(), ts.start_end_group))
# prepare sessions
prepare_sessions_for_display(sessions)
for ts in timeslots_qs:
ts.session_assignments = []
timeslots_by_pk = {ts.pk: ts for ts in timeslots_qs}
unassigned_sessions = []
for s in sessions:
assigned = False
for a in assignments_by_session.get(s.pk, []):
timeslot = timeslots_by_pk.get(a.timeslot_id)
if timeslot:
timeslot.session_assignments.append((a, s))
assigned = True
if not assigned:
unassigned_sessions.append(s)
# group parent colors
def cubehelix(i, total, hue=1.2, start_angle=0.5):
# theory in https://arxiv.org/pdf/1108.5083.pdf
rotations = total // 4
x = float(i + 1) / (total + 1)
phi = 2 * math.pi * (start_angle / 3 + rotations * x)
a = hue * x * (1 - x) / 2.0
return (
max(0, min(x + a * (-0.14861 * math.cos(phi) + 1.78277 * math.sin(phi)), 1)),
max(0, min(x + a * (-0.29227 * math.cos(phi) + -0.90649 * math.sin(phi)), 1)),
max(0, min(x + a * (1.97294 * math.cos(phi)), 1)),
)
session_parents = sorted(set(
s.group.parent for s in sessions
if s.group and s.group.parent and (s.group.parent.type_id == 'area' or s.group.parent.acronym in ('irtf','iab'))
), key=lambda p: p.acronym)
liz_preferred_colors = {
'art' : { 'dark' : (204, 121, 167) , 'light' : (234, 232, 230) },
'gen' : { 'dark' : (29, 78, 17) , 'light' : (232, 237, 231) },
'iab' : { 'dark' : (255, 165, 0) , 'light' : (255, 246, 230) },
'int' : { 'dark' : (132, 240, 240) , 'light' : (232, 240, 241) },
'irtf' : { 'dark' : (154, 119, 230) , 'light' : (243, 239, 248) },
'ops' : { 'dark' : (199, 133, 129) , 'light' : (250, 240, 242) },
'rtg' : { 'dark' : (222, 219, 124) , 'light' : (247, 247, 233) },
'sec' : { 'dark' : (0, 114, 178) , 'light' : (245, 252, 248) },
'tsv' : { 'dark' : (117,201,119) , 'light' : (251, 252, 255) },
}
for i, p in enumerate(session_parents):
if p.acronym in liz_preferred_colors:
colors = liz_preferred_colors[p.acronym]
p.scheduling_color = "rgb({}, {}, {})".format(*colors['dark'])
p.light_scheduling_color = "rgb({}, {}, {})".format(*colors['light'])
else:
rgb_color = cubehelix(i, len(session_parents))
p.scheduling_color = "rgb({}, {}, {})".format(*tuple(int(round(x * 255)) for x in rgb_color))
p.light_scheduling_color = "rgb({}, {}, {})".format(*tuple(int(round((0.9 + 0.1 * x) * 255)) for x in rgb_color))
return render(request, "meeting/edit_meeting_schedule.html", {
'meeting': meeting,
'schedule': schedule,
'can_edit': can_edit,
'can_edit_properties': can_edit or secretariat,
'secretariat': secretariat,
'days': days,
'room_labels': room_labels,
'timeslot_groups': sorted((d, list(sorted(t_groups))) for d, t_groups in timeslot_groups.items()),
'unassigned_sessions': unassigned_sessions,
'session_parents': session_parents,
'hide_menu': True,
})
class RoomNameModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return obj.name
class TimeSlotForm(forms.Form):
day = forms.TypedChoiceField(coerce=lambda t: datetime.datetime.strptime(t, "%Y-%m-%d").date())
time = forms.TimeField()
duration = CustomDurationField() # this is just to make 1:30 turn into 1.5 hours instead of 1.5 minutes
location = RoomNameModelChoiceField(queryset=Room.objects.all(), required=False, empty_label="(No location)")
show_location = forms.BooleanField(initial=True, required=False)
type = forms.ModelChoiceField(queryset=TimeSlotTypeName.objects.filter(used=True), empty_label=None, required=False)
name = forms.CharField(help_text='Name that appears on the agenda', required=False)
short = forms.CharField(max_length=32,label='Short name', help_text='Abbreviated session name used for material file names', required=False)
group = forms.ModelChoiceField(queryset=Group.objects.filter(type__in=['ietf', 'team'], state='active'),
help_text='''Select a group to associate with this session.<br>For example: Tutorials = Education, Code Sprint = Tools Team''',
required=False)
agenda_note = forms.CharField(required=False)
def __init__(self, meeting, schedule, *args, timeslot=None, **kwargs):
super().__init__(*args,**kwargs)
self.fields["time"].widget.attrs["placeholder"] = "HH:MM"
self.fields["duration"].widget.attrs["placeholder"] = "HH:MM"
self.fields["duration"].initial = ""
self.fields["day"].choices = [
((meeting.date + datetime.timedelta(days=i)).isoformat(), (meeting.date + datetime.timedelta(days=i)).strftime("%a %b %d"))
for i in range(meeting.days)
]
self.fields['location'].queryset = self.fields['location'].queryset.filter(meeting=meeting)
self.fields['group'].widget.attrs['data-ietf'] = Group.objects.get(acronym='ietf').pk
self.active_assignment = None
if timeslot:
self.initial = {
'day': timeslot.time.date(),
'time': timeslot.time.time(),
'duration': timeslot.duration,
'location': timeslot.location_id,
'show_location': timeslot.show_location,
'type': timeslot.type_id,
'name': timeslot.name,
}
assignments = sorted(SchedTimeSessAssignment.objects.filter(
timeslot=timeslot,
schedule__in=[schedule, schedule.base if schedule else None]
).select_related('session', 'session__group'), key=lambda a: 0 if a.schedule_id == schedule.pk else 1)
if assignments:
self.active_assignment = assignments[0]
self.initial['short'] = self.active_assignment.session.short
self.initial['group'] = self.active_assignment.session.group_id
if not self.active_assignment or timeslot.type_id != 'regular':
del self.fields['agenda_note'] # at the moment, the UI only shows this field for regular sessions
self.timeslot = timeslot
def clean(self):
group = self.cleaned_data.get('group')
ts_type = self.cleaned_data.get('type')
short = self.cleaned_data.get('short')
if ts_type:
if ts_type.slug in ['break', 'reg', 'reserved', 'unavail', 'regular']:
if ts_type.slug != 'regular':
self.cleaned_data['group'] = self.fields['group'].queryset.get(acronym='secretariat')
else:
if not group:
self.add_error('group', 'When scheduling this type of time slot, a group must be associated')
if not short:
self.add_error('short', 'When scheduling this type of time slot, a short name is required')
if self.timeslot and self.timeslot.type_id == 'regular' and self.active_assignment and ts_type.pk != self.timeslot.type_id:
self.add_error('type', "Can't change type on time slots for regular sessions when a session has been assigned")
if self.active_assignment and self.active_assignment.session.group != self.cleaned_data.get('group') and self.active_assignment.session.materials.exists() and self.timeslot.type_id != 'regular':
self.add_error('group', "Can't change group after materials have been uploaded")
@role_required('Area Director', 'Secretariat')
def edit_meeting_timeslots_and_misc_sessions(request, num=None, owner=None, name=None):
meeting = get_meeting(num)
if name is None:
schedule = meeting.schedule
else:
schedule = get_schedule_by_name(meeting, get_person_by_email(owner), name)
if schedule is None:
raise Http404("No meeting information for meeting %s owner %s schedule %s available" % (num, owner, name))
rooms = list(Room.objects.filter(meeting=meeting).prefetch_related('session_types').order_by('-capacity', 'name'))
rooms.append(Room(name="(No location)"))
timeslot_qs = TimeSlot.objects.filter(meeting=meeting).prefetch_related('type').order_by('time')
can_edit = has_role(request.user, 'Secretariat')
if request.method == 'GET' and request.GET.get('action') == "edit-timeslot":
timeslot_pk = request.GET.get('timeslot')
if not timeslot_pk or not timeslot_pk.isdecimal():
raise Http404
timeslot = get_object_or_404(timeslot_qs, pk=timeslot_pk)
assigned_session = add_event_info_to_session_qs(Session.objects.filter(
timeslotassignments__schedule__in=[schedule, schedule.base],
timeslotassignments__timeslot=timeslot,
)).first()
timeslot.can_cancel = not assigned_session or assigned_session.current_status not in ['canceled', 'canceled', 'resched']
return JsonResponse({
'form': render_to_string("meeting/edit_timeslot_form.html", {
'timeslot_form_action': 'edit',
'timeslot_form': TimeSlotForm(meeting, schedule, timeslot=timeslot),
'timeslot': timeslot,
'schedule': schedule,
'meeting': meeting,
'can_edit': can_edit,
}, request=request)
})
scroll = request.POST.get('scroll')
def redirect_with_scroll():
url = request.get_full_path()
if scroll and scroll.isdecimal():
url += "#scroll={}".format(scroll)
return HttpResponseRedirect(url)
add_timeslot_form = None
if request.method == 'POST' and request.POST.get('action') == 'add-timeslot' and can_edit:
add_timeslot_form = TimeSlotForm(meeting, schedule, request.POST)
if add_timeslot_form.is_valid():
c = add_timeslot_form.cleaned_data
timeslot, created = TimeSlot.objects.get_or_create(
meeting=meeting,
type=c['type'],
name=c['name'],
time=datetime.datetime.combine(c['day'], c['time']),
duration=c['duration'],
location=c['location'],
show_location=c['show_location'],
)
if timeslot.type_id != 'regular':
if not created:
Session.objects.filter(timeslotassignments__timeslot=timeslot).delete()
session = Session.objects.create(
meeting=meeting,
name=c['name'],
short=c['short'],
group=c['group'],
type=c['type'],
agenda_note=c.get('agenda_note') or "",
)
SchedulingEvent.objects.create(
session=session,
status=SessionStatusName.objects.get(slug='sched'),
by=request.user.person,
)
SchedTimeSessAssignment.objects.create(
timeslot=timeslot,
session=session,
schedule=schedule
)
return redirect_with_scroll()
edit_timeslot_form = None
if request.method == 'POST' and request.POST.get('action') == 'edit-timeslot' and can_edit:
timeslot_pk = request.POST.get('timeslot')
if not timeslot_pk or not timeslot_pk.isdecimal():
raise Http404
timeslot = get_object_or_404(TimeSlot, pk=timeslot_pk)
edit_timeslot_form = TimeSlotForm(meeting, schedule, request.POST, timeslot=timeslot)
if edit_timeslot_form.is_valid() and edit_timeslot_form.active_assignment.schedule_id == schedule.pk:
c = edit_timeslot_form.cleaned_data
timeslot.type = c['type']
timeslot.name = c['name']
timeslot.time = datetime.datetime.combine(c['day'], c['time'])
timeslot.duration = c['duration']
timeslot.location = c['location']
timeslot.show_location = c['show_location']
timeslot.save()
session = Session.objects.filter(
timeslotassignments__schedule__in=[schedule, schedule.base if schedule else None],
timeslotassignments__timeslot=timeslot,
).select_related('group').first()
if session:
if timeslot.type_id != 'regular':
session.name = c['name']
session.short = c['short']
session.group = c['group']
session.type = c['type']
session.agenda_note = c.get('agenda_note') or ""
session.save()
return redirect_with_scroll()
if request.method == 'POST' and request.POST.get('action') == 'cancel-timeslot' and can_edit:
timeslot_pk = request.POST.get('timeslot')
if not timeslot_pk or not timeslot_pk.isdecimal():
raise Http404
timeslot = get_object_or_404(TimeSlot, pk=timeslot_pk)
if timeslot.type_id != 'break':
sessions = add_event_info_to_session_qs(
Session.objects.filter(timeslotassignments__schedule=schedule, timeslotassignments__timeslot=timeslot),
).exclude(current_status__in=['canceled', 'resched'])
for session in sessions:
SchedulingEvent.objects.create(
session=session,
status=SessionStatusName.objects.get(slug='canceled'),
by=request.user.person,
)
return redirect_with_scroll()
if request.method == 'POST' and request.POST.get('action') == 'delete-timeslot' and can_edit:
timeslot_pk = request.POST.get('timeslot')
if not timeslot_pk or not timeslot_pk.isdecimal():
raise Http404
timeslot = get_object_or_404(TimeSlot, pk=timeslot_pk)
if timeslot.type_id != 'regular':
for session in Session.objects.filter(timeslotassignments__schedule=schedule, timeslotassignments__timeslot=timeslot):
for doc in session.materials.all():
doc.set_state(State.objects.get(type=doc.type_id, slug='deleted'))
e = DocEvent(doc=doc, rev=doc.rev, by=request.user.person, type='deleted')
e.desc = "Deleted meeting session"
e.save()
session.delete()
timeslot.delete()
return redirect_with_scroll()
sessions_by_pk = {
s.pk: s for s in
add_event_info_to_session_qs(
Session.objects.filter(
meeting=meeting,
).order_by('pk'),
requested_time=True,
requested_by=True,
).filter(
current_status__in=['appr', 'schedw', 'scheda', 'sched', 'canceled', 'canceledpa', 'resched']
).prefetch_related(
'group', 'group', 'group__type',
)
}
assignments_by_timeslot = defaultdict(list)
for a in SchedTimeSessAssignment.objects.filter(schedule__in=[schedule, schedule.base]):
assignments_by_timeslot[a.timeslot_id].append(a)
days = [meeting.date + datetime.timedelta(days=i) for i in range(meeting.days)]
timeslots_by_day_and_room = defaultdict(list)
for t in timeslot_qs:
timeslots_by_day_and_room[(t.time.date(), t.location_id)].append(t)
min_time = min([t.time.time() for t in timeslot_qs] + [datetime.time(8)])
max_time = max([t.end_time().time() for t in timeslot_qs] + [datetime.time(22)])
min_max_delta = datetime.datetime.combine(meeting.date, max_time) - datetime.datetime.combine(meeting.date, min_time)
day_grid = []
for d in days:
room_timeslots = []
for r in rooms:
ts = []
for t in timeslots_by_day_and_room.get((d, r.pk), []):
# FIXME: the database (as of 2020) contains spurious
# regular time slots in rooms not intended for regular
# sessions - once those are gone, this filter can go
# away
if t.type_id == 'regular' and not any(t.slug == 'regular' for t in r.session_types.all()):
continue
t.assigned_sessions = []
for a in assignments_by_timeslot.get(t.pk, []):
s = sessions_by_pk.get(a.session_id)
if s:
t.assigned_sessions.append(s)
t.left_offset = 100.0 * (t.time - datetime.datetime.combine(t.time.date(), min_time)) / min_max_delta
t.layout_width = min(100.0 * t.duration / min_max_delta, 100 - t.left_offset)
ts.append(t)
room_timeslots.append((r, ts))
day_grid.append({
'day': d,
'room_timeslots': room_timeslots
})
return render(request, "meeting/edit_meeting_timeslots_and_misc_sessions.html", {
'meeting': meeting,
'schedule': schedule,
'can_edit': can_edit,
'day_grid': day_grid,
'empty_timeslot_form': TimeSlotForm(meeting, schedule),
'add_timeslot_form': add_timeslot_form,
'edit_timeslot_form': edit_timeslot_form,
'scroll': scroll,
'hide_menu': True,
})
##############################################################################
#@role_required('Area Director','Secretariat')
# disable the above security for now, check it below.
@ensure_csrf_cookie
def edit_schedule(request, num=None, owner=None, name=None):
if request.method == 'POST':
return schedule_create(request, num, owner, name)
user = request.user
meeting = get_meeting(num)
person = get_person_by_email(owner)
if name is None:
schedule = meeting.schedule
else:
schedule = get_schedule_by_name(meeting, person, name)
if schedule is None:
raise Http404("No meeting information for meeting %s owner %s schedule %s available" % (num, owner, name))
meeting_base_url = request.build_absolute_uri(meeting.base_url())
site_base_url = request.build_absolute_uri('/')[:-1] # skip the trailing slash
rooms = meeting.room_set.filter(session_types__slug='regular').distinct().order_by("capacity")
saveas = SaveAsForm()
saveasurl=reverse(edit_schedule,
args=[meeting.number, schedule.owner_email(), schedule.name])
can_see, can_edit,secretariat = schedule_permissions(meeting, schedule, user)
if not can_see:
return render(request, "meeting/private_schedule.html",
{"schedule":schedule,
"meeting": meeting,
"meeting_base_url":meeting_base_url,
"hide_menu": True
}, status=403, content_type="text/html")
assignments = get_all_assignments_from_schedule(schedule)
# get_modified_from needs the query set, not the list
modified = get_modified_from_assignments(assignments)
area_list = get_areas()
wg_name_list = get_wg_name_list(assignments)
wg_list = get_wg_list(wg_name_list)
ads = find_ads_for_meeting(meeting)
for ad in ads:
# set the default to avoid needing extra arguments in templates
# django 1.3+
ad.default_hostscheme = site_base_url
time_slices,date_slices = build_all_agenda_slices(meeting)
return render(request, "meeting/landscape_edit.html",
{"schedule":schedule,
"saveas": saveas,
"saveasurl": saveasurl,
"meeting_base_url": meeting_base_url,
"site_base_url": site_base_url,
"rooms":rooms,
"time_slices":time_slices,
"date_slices":date_slices,
"modified": modified,
"meeting":meeting,
"area_list": area_list,
"area_directors" : ads,
"wg_list": wg_list ,
"assignments": assignments,
"show_inline": set(["txt","htm","html"]),
"hide_menu": True,
"can_edit_properties": can_edit or secretariat,
})
class SchedulePropertiesForm(forms.ModelForm):
class Meta:
model = Schedule
fields = ['name', 'notes', 'visible', 'public', 'base']
def __init__(self, meeting, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['base'].queryset = self.fields['base'].queryset.filter(meeting=meeting)
if self.instance.pk is not None:
self.fields['base'].queryset = self.fields['base'].queryset.exclude(pk=self.instance.pk)
@role_required('Area Director','Secretariat')
def edit_schedule_properties(request, num, owner, name):
meeting = get_meeting(num)
person = get_person_by_email(owner)
schedule = get_schedule_by_name(meeting, person, name)
if schedule is None:
raise Http404("No agenda information for meeting %s owner %s schedule %s available" % (num, owner, name))
can_see, can_edit, secretariat = schedule_permissions(meeting, schedule, request.user)
can_edit_properties = can_edit or secretariat
if not can_edit_properties:
permission_denied(request, "You may not edit this schedule.")
if request.method == 'POST':
form = SchedulePropertiesForm(meeting, instance=schedule, data=request.POST)
if form.is_valid():
form.save()
if request.GET.get('next'):
return HttpResponseRedirect(request.GET.get('next'))
return redirect('ietf.meeting.views.edit_schedule', num=num, owner=owner, name=name)
else:
form = SchedulePropertiesForm(meeting, instance=schedule)
return render(request, "meeting/properties_edit.html", {
"schedule": schedule,
"form": form,
"meeting": meeting,
})
nat_sort_re = re.compile('([0-9]+)')
def natural_sort_key(s): # from https://stackoverflow.com/questions/4836710/is-there-a-built-in-function-for-string-natural-sort
return [int(text) if text.isdecimal() else text.lower() for text in nat_sort_re.split(s)]
@role_required('Area Director','Secretariat')
def list_schedules(request, num):
meeting = get_meeting(num)
schedules = Schedule.objects.filter(
meeting=meeting
).prefetch_related('owner', 'assignments', 'origin', 'origin__assignments', 'base').order_by('owner', '-name', '-public').distinct()
if not has_role(request.user, 'Secretariat'):
schedules = schedules.filter(Q(visible=True) | Q(owner=request.user.person))
official_schedules = []
own_schedules = []
other_public_schedules = []
other_private_schedules = []
is_secretariat = has_role(request.user, 'Secretariat')
for s in schedules:
s.can_edit_properties = is_secretariat or user_is_person(request.user, s.owner)
if s.origin:
s.changes_from_origin = len(diff_meeting_schedules(s.origin, s))
if s in [meeting.schedule, meeting.schedule.base if meeting.schedule else None]:
official_schedules.append(s)
elif user_is_person(request.user, s.owner):
own_schedules.append(s)
elif s.public:
other_public_schedules.append(s)
else:
other_private_schedules.append(s)
schedule_groups = [
(official_schedules, False, "Official Agenda"),
(own_schedules, True, "Own Draft Agendas"),
(other_public_schedules, False, "Other Draft Agendas"),
(other_private_schedules, False, "Other Private Draft Agendas"),
]
schedule_groups = [(sorted(l, reverse=True, key=lambda s: natural_sort_key(s.name)), own, *t) for l, own, *t in schedule_groups if l or own]
return render(request, "meeting/schedule_list.html", {
'meeting': meeting,
'schedule_groups': schedule_groups,
})
class DiffSchedulesForm(forms.Form):
from_schedule = forms.ChoiceField()
to_schedule = forms.ChoiceField()
def __init__(self, meeting, user, *args, **kwargs):
super().__init__(*args, **kwargs)
qs = Schedule.objects.filter(meeting=meeting).prefetch_related('owner').order_by('-public').distinct()
if not has_role(user, 'Secretariat'):
qs = qs.filter(Q(visible=True) | Q(owner__user=user))
sorted_schedules = sorted(qs, reverse=True, key=lambda s: natural_sort_key(s.name))
schedule_choices = [(schedule.name, "{} ({})".format(schedule.name, schedule.owner)) for schedule in sorted_schedules]
self.fields['from_schedule'].choices = schedule_choices
self.fields['to_schedule'].choices = schedule_choices
@role_required('Area Director','Secretariat')
def diff_schedules(request, num):
meeting = get_meeting(num)
diffs = None
from_schedule = None
to_schedule = None
if 'from_schedule' in request.GET:
form = DiffSchedulesForm(meeting, request.user, request.GET)
if form.is_valid():
from_schedule = get_object_or_404(Schedule, name=form.cleaned_data['from_schedule'], meeting=meeting)
to_schedule = get_object_or_404(Schedule, name=form.cleaned_data['to_schedule'], meeting=meeting)
raw_diffs = diff_meeting_schedules(from_schedule, to_schedule)
diffs = prefetch_schedule_diff_objects(raw_diffs)
for d in diffs:
s = d['session']
s.session_label = s.short_name
if s.requested_duration:
s.session_label = "{} ({}h)".format(s.session_label, round(s.requested_duration.seconds / 60.0 / 60.0, 1))
else:
form = DiffSchedulesForm(meeting, request.user)
return render(request, "meeting/diff_schedules.html", {
'meeting': meeting,
'form': form,
'diffs': diffs,
'from_schedule': from_schedule,
'to_schedule': to_schedule,
})
@ensure_csrf_cookie
def session_materials(request, session_id):
"""Session details for agenda page pop-up"""
session = get_object_or_404(Session, id=session_id)
assignments = SchedTimeSessAssignment.objects.filter(session=session)
if len(assignments) == 0:
raise Http404('No such scheduled session')
assignments = preprocess_assignments_for_agenda(assignments, session.meeting)
assignment = assignments[0]
return render(request, 'meeting/session_materials.html', dict(item=assignment))
@ensure_csrf_cookie
def agenda(request, num=None, name=None, base=None, ext=None, owner=None, utc=""):
base = base if base else 'agenda'
ext = ext if ext else '.html'
mimetype = {
".html":"text/html; charset=%s"%settings.DEFAULT_CHARSET,
".txt": "text/plain; charset=%s"%settings.DEFAULT_CHARSET,
".csv": "text/csv; charset=%s"%settings.DEFAULT_CHARSET,
}
# We do not have the appropriate data in the datatracker for IETF 64 and earlier.
# So that we're not producing misleading pages...
assert num is None or num.isdigit()
meeting = get_ietf_meeting(num)
if not meeting or (meeting.number.isdigit() and int(meeting.number) <= 64 and (not meeting.schedule or not meeting.schedule.assignments.exists())):
if ext == '.html' or (meeting and meeting.number.isdigit() and 0 < int(meeting.number) <= 64):
return HttpResponseRedirect( 'https://www.ietf.org/proceedings/%s' % num )
else:
raise Http404("No such meeting")
if name is None:
schedule = get_schedule(meeting, name)
else:
person = get_person_by_email(owner)
schedule = get_schedule_by_name(meeting, person, name)
if schedule == None:
base = base.replace("-utc", "")
return render(request, "meeting/no-"+base+ext, {'meeting':meeting }, content_type=mimetype[ext])
updated = meeting.updated()
filtered_assignments = SchedTimeSessAssignment.objects.filter(
schedule__in=[schedule, schedule.base],
timeslot__type__private=False,
)
filtered_assignments = preprocess_assignments_for_agenda(filtered_assignments, meeting)
tag_assignments_with_filter_keywords(filtered_assignments)
if ext == ".csv":
return agenda_csv(schedule, filtered_assignments)
# extract groups hierarchy, it's a little bit complicated because
# we can be dealing with historic groups
seen = set()
groups = [a.session.historic_group for a in filtered_assignments
if a.session
and a.session.historic_group
and is_regular_agenda_filter_group(a.session.historic_group)
and a.session.historic_group.historic_parent]
group_parents = []
for g in groups:
if g.historic_parent.acronym not in seen:
group_parents.append(g.historic_parent)
seen.add(g.historic_parent.acronym)
seen = set()
for p in group_parents:
p.group_list = []
for g in groups:
if g.acronym not in seen and g.historic_parent.acronym == p.acronym:
p.group_list.append(g)
seen.add(g.acronym)
p.group_list.sort(key=lambda g: g.acronym)
# Groups gathered and processed. Now arrange for the filter UI.
#
# The agenda_filter template expects a list of categorized header buttons, each
# with a list of children. Make two categories: the IETF areas and the other parent groups.
# We also pass a list of 'extra' buttons - currently Office Hours and miscellaneous filters.
# All but the last of these are additionally used by the agenda.html template to make
# a list of filtered ical buttons. The last group is ignored for this.
area_group_filters = []
other_group_filters = []
extra_filters = []
for p in group_parents:
new_filter = dict(
label=p.acronym.upper(),
keyword=p.acronym.lower(),
children=[
dict(
label=g.acronym,
keyword=g.acronym.lower(),
is_bof=g.is_bof(),
) for g in p.group_list
]
)
if p.type.slug == 'area':
area_group_filters.append(new_filter)
else:
other_group_filters.append(new_filter)
office_hours_labels = set()
for a in filtered_assignments:
suffix = ' office hours'
if a.session.name.lower().endswith(suffix):
office_hours_labels.add(a.session.name[:-len(suffix)].strip())
if len(office_hours_labels) > 0:
# keyword needs to match what's tagged in filter_keywords_for_session()
extra_filters.append(dict(
label='Office Hours',
keyword='officehours',
children=[
dict(
label=label,
keyword=label.lower().replace(' ', '')+'officehours',
is_bof=False,
) for label in office_hours_labels
]
))
# Keywords that should appear in 'non-area' column
non_area_labels = [
'BoF', 'EDU', 'Hackathon', 'IEPG', 'IESG', 'IETF', 'Plenary', 'Secretariat', 'Tools',
]
# Remove any unused non-area keywords
non_area_filters = [
dict(label=label, keyword=label.lower(), is_bof=False)
for label in non_area_labels if any([
label.lower() in assignment.filter_keywords
for assignment in filtered_assignments
])
]
if len(non_area_filters) > 0:
extra_filters.append(dict(
label=None,
keyword=None,
children=non_area_filters,
))
area_group_filters.sort(key=lambda p:p['label'])
other_group_filters.sort(key=lambda p:p['label'])
filter_categories = [category
for category in [area_group_filters, other_group_filters, extra_filters]
if len(category) > 0]
is_current_meeting = (num is None) or (num == get_current_ietf_meeting_num())
rendered_page = render(request, "meeting/"+base+ext, {
"schedule": schedule,
"filtered_assignments": filtered_assignments,
"updated": updated,
"filter_categories": filter_categories,
"non_area_keywords": [label.lower() for label in non_area_labels],
"now": datetime.datetime.now().astimezone(pytz.UTC),
"timezone": meeting.time_zone,
"is_current_meeting": is_current_meeting,
"use_codimd": True if meeting.date>=settings.MEETING_USES_CODIMD_DATE else False,
"cache_time": 150 if is_current_meeting else 3600,
}, content_type=mimetype[ext])
return rendered_page
def agenda_csv(schedule, filtered_assignments):
response = HttpResponse(content_type="text/csv; charset=%s"%settings.DEFAULT_CHARSET)
writer = csv.writer(response, delimiter=str(','), quoting=csv.QUOTE_ALL)
headings = ["Date", "Start", "End", "Session", "Room", "Area", "Acronym", "Type", "Description", "Session ID", "Agenda", "Slides"]
def write_row(row):
encoded_row = [v.encode('utf-8') if isinstance(v, str) else v for v in row]
while len(encoded_row) < len(headings):
encoded_row.append(None) # produce empty entries at the end as necessary
writer.writerow(encoded_row)
def agenda_field(item):
agenda_doc = item.session.agenda()
if agenda_doc:
return "http://www.ietf.org/proceedings/{schedule.meeting.number}/agenda/{agenda.uploaded_filename}".format(schedule=schedule, agenda=agenda_doc)
else:
return ""
def slides_field(item):
return "|".join("http://www.ietf.org/proceedings/{schedule.meeting.number}/slides/{slide.uploaded_filename}".format(schedule=schedule, slide=slide) for slide in item.session.slides())
write_row(headings)
for item in filtered_assignments:
row = []
row.append(item.timeslot.time.strftime("%Y-%m-%d"))
row.append(item.timeslot.time.strftime("%H%M"))
row.append(item.timeslot.end_time().strftime("%H%M"))
if item.timeslot.type_id == "break":
row.append(item.timeslot.type.name)
row.append(schedule.meeting.break_area)
row.append("")
row.append("")
row.append("")
row.append(item.timeslot.name)
row.append("b{}".format(item.timeslot.pk))
elif item.timeslot.type_id == "reg":
row.append(item.timeslot.type.name)
row.append(schedule.meeting.reg_area)
row.append("")
row.append("")
row.append("")
row.append(item.timeslot.name)
row.append("r{}".format(item.timeslot.pk))
elif item.timeslot.type_id == "other":
row.append("None")
row.append(item.timeslot.location.name if item.timeslot.location else "")
row.append("")
row.append(item.session.historic_group.acronym)
row.append(item.session.historic_group.historic_parent.acronym.upper() if item.session.historic_group.historic_parent else "")
row.append(item.session.name)
row.append(item.session.pk)
elif item.timeslot.type_id == "plenary":
row.append(item.session.name)
row.append(item.timeslot.location.name if item.timeslot.location else "")
row.append("")
row.append(item.session.historic_group.acronym if item.session.historic_group else "")
row.append("")
row.append(item.session.name)
row.append(item.session.pk)
row.append(agenda_field(item))
row.append(slides_field(item))
elif item.timeslot.type_id == 'regular':
row.append(item.timeslot.name)
row.append(item.timeslot.location.name if item.timeslot.location else "")
row.append(item.session.historic_group.historic_parent.acronym.upper() if item.session.historic_group.historic_parent else "")
row.append(item.session.historic_group.acronym if item.session.historic_group else "")
row.append("BOF" if item.session.historic_group.state_id in ("bof", "bof-conc") else item.session.historic_group.type.name)
row.append(item.session.historic_group.name if item.session.historic_group else "")
row.append(item.session.pk)
row.append(agenda_field(item))
row.append(slides_field(item))
if len(row) > 3:
write_row(row)
return response
@role_required('Area Director','Secretariat','IAB')
def agenda_by_room(request, num=None, name=None, owner=None):
meeting = get_meeting(num)
if name is None:
schedule = get_schedule(meeting)
else:
person = get_person_by_email(owner)
schedule = get_schedule_by_name(meeting, person, name)
assignments = SchedTimeSessAssignment.objects.filter(
schedule__in=[schedule, schedule.base if schedule else None]
).prefetch_related('timeslot', 'timeslot__location', 'session', 'session__group', 'session__group__parent')
ss_by_day = OrderedDict()
for day in assignments.dates('timeslot__time','day'):
ss_by_day[day]=[]
for ss in assignments.order_by('timeslot__location__functional_name','timeslot__location__name','timeslot__time'):
day = ss.timeslot.time.date()
ss_by_day[day].append(ss)
return render(request,"meeting/agenda_by_room.html",{"meeting":meeting,"schedule":schedule,"ss_by_day":ss_by_day})
@role_required('Area Director','Secretariat','IAB')
def agenda_by_type(request, num=None, type=None, name=None, owner=None):
meeting = get_meeting(num)
if name is None:
schedule = get_schedule(meeting)
else:
person = get_person_by_email(owner)
schedule = get_schedule_by_name(meeting, person, name)
assignments = SchedTimeSessAssignment.objects.filter(
schedule__in=[schedule, schedule.base if schedule else None]
).prefetch_related(
'timeslot', 'timeslot__location', 'session', 'session__group', 'session__group__parent'
).order_by('session__type__slug','timeslot__time','session__group__acronym')
if type:
assignments = assignments.filter(session__type__slug=type)
return render(request,"meeting/agenda_by_type.html",{"meeting":meeting,"schedule":schedule,"assignments":assignments})
@role_required('Area Director','Secretariat','IAB')
def agenda_by_type_ics(request,num=None,type=None):
meeting = get_meeting(num)
schedule = get_schedule(meeting)
assignments = SchedTimeSessAssignment.objects.filter(
schedule__in=[schedule, schedule.base if schedule else None]
).prefetch_related(
'timeslot', 'timeslot__location', 'session', 'session__group', 'session__group__parent'
).order_by('session__type__slug','timeslot__time')
if type:
assignments = assignments.filter(session__type__slug=type)
updated = meeting.updated()
return render(request,"meeting/agenda.ics",{"schedule":schedule,"updated":updated,"assignments":assignments},content_type="text/calendar")
def session_draft_list(num, acronym):
try:
agendas = Document.objects.filter(type="agenda",
session__meeting__number=num,
session__group__acronym=acronym,
states=State.objects.get(type="agenda", slug="active")).distinct()
except Document.DoesNotExist:
raise Http404
drafts = set()
for agenda in agendas:
content, _ = read_agenda_file(num, agenda)
if content:
drafts.update(re.findall(b'(draft-[-a-z0-9]*)', content))
result = []
for draft in drafts:
draft = force_str(draft)
try:
if re.search('-[0-9]{2}$', draft):
doc_name = draft
else:
doc = Document.objects.get(name=draft)
doc_name = draft + "-" + doc.rev
if doc_name not in result:
result.append(doc_name)
except Document.DoesNotExist:
pass
for sp in SessionPresentation.objects.filter(session__meeting__number=num, session__group__acronym=acronym, document__type='draft'):
doc_name = sp.document.name + "-" + sp.document.rev
if doc_name not in result:
result.append(doc_name)
return sorted(result)
def session_draft_tarfile(request, num, acronym):
drafts = session_draft_list(num, acronym);
response = HttpResponse(content_type='application/octet-stream')
response['Content-Disposition'] = 'attachment; filename=%s-drafts.tgz'%(acronym)
tarstream = tarfile.open('','w:gz',response)
mfh, mfn = mkstemp()
os.close(mfh)
manifest = io.open(mfn, "w")
for doc_name in drafts:
pdf_path = os.path.join(settings.INTERNET_DRAFT_PDF_PATH, doc_name + ".pdf")
if not os.path.exists(pdf_path):
convert_draft_to_pdf(doc_name)
if os.path.exists(pdf_path):
try:
tarstream.add(pdf_path, str(doc_name + ".pdf"))
manifest.write("Included: "+pdf_path+"\n")
except Exception as e:
manifest.write(("Failed (%s): "%e)+pdf_path+"\n")
else:
manifest.write("Not found: "+pdf_path+"\n")
manifest.close()
tarstream.add(mfn, "manifest.txt")
tarstream.close()
os.unlink(mfn)
return response
def session_draft_pdf(request, num, acronym):
drafts = session_draft_list(num, acronym);
curr_page = 1
pmh, pmn = mkstemp()
os.close(pmh)
pdfmarks = io.open(pmn, "w")
pdf_list = ""
for draft in drafts:
pdf_path = os.path.join(settings.INTERNET_DRAFT_PDF_PATH, draft + ".pdf")
if not os.path.exists(pdf_path):
convert_draft_to_pdf(draft)
if os.path.exists(pdf_path):
pages = pdf_pages(pdf_path)
pdfmarks.write("[/Page "+str(curr_page)+" /View [/XYZ 0 792 1.0] /Title (" + draft + ") /OUT pdfmark\n")
pdf_list = pdf_list + " " + pdf_path
curr_page = curr_page + pages
pdfmarks.close()
pdfh, pdfn = mkstemp()
os.close(pdfh)
gs = settings.GHOSTSCRIPT_COMMAND
code, out, err = pipe(gs + " -dBATCH -dNOPAUSE -q -sDEVICE=pdfwrite -sOutputFile=" + pdfn + " " + pdf_list + " " + pmn)
assertion('code == 0')
pdf = io.open(pdfn,"rb")
pdf_contents = pdf.read()
pdf.close()
os.unlink(pmn)
os.unlink(pdfn)
return HttpResponse(pdf_contents, content_type="application/pdf")
def week_view(request, num=None, name=None, owner=None):
meeting = get_meeting(num)
if name is None:
schedule = get_schedule(meeting)
else:
person = get_person_by_email(owner)
schedule = get_schedule_by_name(meeting, person, name)
if not schedule:
raise Http404
filtered_assignments = SchedTimeSessAssignment.objects.filter(
schedule__in=[schedule, schedule.base],
timeslot__type__private=False,
)
filtered_assignments = preprocess_assignments_for_agenda(filtered_assignments, meeting)
tag_assignments_with_filter_keywords(filtered_assignments)
items = []
for a in filtered_assignments:
# we don't HTML escape any of these as the week-view code is using createTextNode
item = {
"key": str(a.timeslot.pk),
"utc_time": a.timeslot.utc_start_time().strftime("%Y%m%dT%H%MZ"), # ISO8601 compliant
"duration": a.timeslot.duration.seconds,
"type": a.timeslot.type.name,
"filter_keywords": ",".join(a.filter_keywords),
}
if a.session:
if a.session.historic_group:
item["group"] = a.session.historic_group.acronym
if a.session.name:
item["name"] = a.session.name
elif a.timeslot.type_id == "break":
item["name"] = a.timeslot.name
item["area"] = a.timeslot.type_id
item["group"] = a.timeslot.type_id
elif a.session.historic_group:
item["name"] = a.session.historic_group.name
if a.session.historic_group.state_id == "bof":
item["name"] += " BOF"
item["state"] = a.session.historic_group.state.name
if a.session.historic_group.historic_parent:
item["area"] = a.session.historic_group.historic_parent.acronym
if a.timeslot.show_location:
item["room"] = a.timeslot.get_location()
if a.session and a.session.agenda():
item["agenda"] = a.session.agenda().get_href()
if a.session.current_status == 'canceled':
item["name"] = "CANCELLED - " + item["name"]
items.append(item)
return render(request, "meeting/week-view.html", {
"items": json.dumps(items),
})
@role_required('Area Director','Secretariat','IAB')
def room_view(request, num=None, name=None, owner=None):
meeting = get_meeting(num)
rooms = meeting.room_set.order_by('functional_name','name')
if not rooms.exists():
return HttpResponse("No rooms defined yet")
if name is None:
schedule = get_schedule(meeting)
else:
person = get_person_by_email(owner)
schedule = get_schedule_by_name(meeting, person, name)
assignments = SchedTimeSessAssignment.objects.filter(
schedule__in=[schedule, schedule.base if schedule else None]
).prefetch_related(
'timeslot', 'timeslot__location', 'session', 'session__group', 'session__group__parent'
)
unavailable = meeting.timeslot_set.filter(type__slug='unavail')
if not (assignments.exists() or unavailable.exists()):
return HttpResponse("No sessions/timeslots available yet")
earliest = None
latest = None
if assignments:
earliest = assignments.aggregate(Min('timeslot__time'))['timeslot__time__min']
latest = assignments.aggregate(Max('timeslot__time'))['timeslot__time__max']
if unavailable:
earliest_unavailable = unavailable.aggregate(Min('time'))['time__min']
if not earliest or ( earliest_unavailable and earliest_unavailable < earliest ):
earliest = earliest_unavailable
latest_unavailable = unavailable.aggregate(Max('time'))['time__max']
if not latest or ( latest_unavailable and latest_unavailable > latest ):
latest = latest_unavailable
if not (earliest and latest):
raise Http404
base_time = earliest
base_day = datetime.datetime(base_time.year,base_time.month,base_time.day)
day = base_day
days = []
while day <= latest :
days.append(day)
day += datetime.timedelta(days=1)
unavailable = list(unavailable)
for t in unavailable:
t.delta_from_beginning = (t.time - base_time).total_seconds()
t.day = (t.time-base_day).days
assignments = list(assignments)
for ss in assignments:
ss.delta_from_beginning = (ss.timeslot.time - base_time).total_seconds()
ss.day = (ss.timeslot.time-base_day).days
template = "meeting/room-view.html"
return render(request, template,{"meeting":meeting,"schedule":schedule,"unavailable":unavailable,"assignments":assignments,"rooms":rooms,"days":days})
def ical_session_status(assignment):
if assignment.session.current_status == 'canceled':
return "CANCELLED"
elif assignment.session.current_status == 'resched':
t = "RESCHEDULED"
if assignment.session.tombstone_for_id is not None:
other_assignment = SchedTimeSessAssignment.objects.filter(schedule=assignment.schedule_id, session=assignment.session.tombstone_for_id).first()
if other_assignment:
t = "RESCHEDULED TO {}-{}".format(
other_assignment.timeslot.time.strftime("%A %H:%M").upper(),
other_assignment.timeslot.end_time().strftime("%H:%M")
)
return t
else:
return "CONFIRMED"
def parse_agenda_filter_params(querydict):
"""Parse agenda filter parameters from a request"""
if len(querydict) == 0:
return None
# Parse group filters from GET parameters. Other params are ignored.
filt_params = {'show': set(), 'hide': set(), 'showtypes': set(), 'hidetypes': set()}
for key, value in querydict.items():
if key in filt_params:
vals = unquote(value).lower().split(',')
vals = [v.strip() for v in vals]
filt_params[key] = set([v for v in vals if len(v) > 0]) # remove empty strings
return filt_params
def should_include_assignment(filter_params, assignment):
"""Decide whether to include an assignment
When filtering by wg, uses historic_group if available as an attribute
on the session, otherwise falls back to using group.
"""
shown = len(set(filter_params['show']).intersection(assignment.filter_keywords)) > 0
hidden = len(set(filter_params['hide']).intersection(assignment.filter_keywords)) > 0
return shown and not hidden
def agenda_ical(request, num=None, name=None, acronym=None, session_id=None):
"""Agenda ical view
By default, all agenda items will be shown. A filter can be specified in
the querystring. It has the format
?show=...&hide=...&showtypes=...&hidetypes=...
where any of the parameters can be omitted. The right-hand side of each
'=' is a comma separated list, which can be empty. If none of the filter
parameters are specified, no filtering will be applied, even if the query
string is not empty.
The show and hide parameters each take a list of working group (wg) acronyms.
The showtypes and hidetypes parameters take a list of session types.
Hiding (by wg or type) takes priority over showing.
"""
meeting = get_meeting(num, type_in=None)
schedule = get_schedule(meeting, name)
updated = meeting.updated()
if schedule is None and acronym is None and session_id is None:
raise Http404
assignments = SchedTimeSessAssignment.objects.filter(
schedule__in=[schedule, schedule.base],
timeslot__type__private=False,
)
assignments = preprocess_assignments_for_agenda(assignments, meeting)
tag_assignments_with_filter_keywords(assignments)
try:
filt_params = parse_agenda_filter_params(request.GET)
except ValueError as e:
return HttpResponseBadRequest(str(e))
if filt_params is not None:
# Apply the filter
assignments = [a for a in assignments if should_include_assignment(filt_params, a)]
if acronym:
assignments = [ a for a in assignments if a.session.historic_group and a.session.historic_group.acronym == acronym ]
elif session_id:
assignments = [ a for a in assignments if a.session_id == int(session_id) ]
for a in assignments:
if a.session:
a.session.ical_status = ical_session_status(a)
return render(request, "meeting/agenda.ics", {
"schedule": schedule,
"assignments": assignments,
"updated": updated
}, content_type="text/calendar")
@cache_page(15 * 60)
def agenda_json(request, num=None):
meeting = get_meeting(num, type_in=['ietf','interim'])
sessions = []
locations = set()
parent_acronyms = set()
assignments = SchedTimeSessAssignment.objects.filter(
schedule__in=[meeting.schedule, meeting.schedule.base if meeting.schedule else None],
timeslot__type__private=False,
).exclude(
session__type__in=['break', 'reg']
)
# Update the assignments with historic information, i.e., valid at the
# time of the meeting
assignments = preprocess_assignments_for_agenda(assignments, meeting, extra_prefetches=[
"session__materials__docevent_set",
"session__sessionpresentation_set",
"timeslot__meeting"
])
for asgn in assignments:
sessdict = dict()
sessdict['objtype'] = 'session'
sessdict['id'] = asgn.pk
sessdict['is_bof'] = False
if asgn.session.historic_group:
sessdict['group'] = {
"acronym": asgn.session.historic_group.acronym,
"name": asgn.session.historic_group.name,
"type": asgn.session.historic_group.type_id,
"state": asgn.session.historic_group.state_id,
}
if asgn.session.historic_group.is_bof():
sessdict['is_bof'] = True
if asgn.session.historic_group.type_id in ['wg','rg', 'ag', 'rag'] or asgn.session.historic_group.acronym in ['iesg',]: # TODO: should that first list be groupfeatures driven?
if asgn.session.historic_group.historic_parent:
sessdict['group']['parent'] = asgn.session.historic_group.historic_parent.acronym
parent_acronyms.add(asgn.session.historic_group.historic_parent.acronym)
if asgn.session.name:
sessdict['name'] = asgn.session.name
else:
sessdict['name'] = asgn.session.historic_group.name
if asgn.session.short:
sessdict['short'] = asgn.session.short
if asgn.session.agenda_note:
sessdict['agenda_note'] = asgn.session.agenda_note
if asgn.session.remote_instructions:
sessdict['remote_instructions'] = asgn.session.remote_instructions
utc_start = asgn.timeslot.utc_start_time()
if utc_start:
sessdict['start'] = utc_start.strftime("%Y-%m-%dT%H:%M:%SZ")
sessdict['duration'] = str(asgn.timeslot.duration)
sessdict['location'] = asgn.room_name
if asgn.timeslot.location: # Some socials have an assignment but no location
locations.add(asgn.timeslot.location)
if asgn.session.agenda():
sessdict['agenda'] = asgn.session.agenda().get_href()
if asgn.session.minutes():
sessdict['minutes'] = asgn.session.minutes().get_href()
if asgn.session.slides():
sessdict['presentations'] = []
presentations = SessionPresentation.objects.filter(session=asgn.session, document__type__slug='slides')
for pres in presentations:
sessdict['presentations'].append(
{
'name': pres.document.name,
'title': pres.document.title,
'order': pres.order,
'rev': pres.rev,
'resource_uri': '/api/v1/meeting/sessionpresentation/%s/'%pres.id,
})
sessdict['session_res_uri'] = '/api/v1/meeting/session/%s/'%asgn.session.id
sessdict['session_id'] = asgn.session.id
modified = asgn.session.modified
for doc in asgn.session.materials.all():
rev_docevent = doc.latest_event(NewRevisionDocEvent,'new_revision')
modified = max(modified, (rev_docevent and rev_docevent.time) or modified)
sessdict['modified'] = modified
sessdict['status'] = asgn.session.current_status
sessions.append(sessdict)
rooms = []
for room in locations:
roomdict = dict()
roomdict['id'] = room.pk
roomdict['objtype'] = 'location'
roomdict['name'] = room.name
if room.floorplan:
roomdict['level_name'] = room.floorplan.name
roomdict['level_sort'] = room.floorplan.order
if room.x1 is not None:
roomdict['x'] = (room.x1+room.x2)/2.0
roomdict['y'] = (room.y1+room.y2)/2.0
roomdict['modified'] = room.modified
if room.floorplan and room.floorplan.image:
roomdict['map'] = room.floorplan.image.url
roomdict['modified'] = max(room.modified, room.floorplan.modified)
rooms.append(roomdict)
parents = []
for parent in Group.objects.filter(acronym__in=parent_acronyms):
parentdict = dict()
parentdict['id'] = parent.pk
parentdict['objtype'] = 'parent'
parentdict['name'] = parent.acronym
parentdict['description'] = parent.name
parentdict['modified'] = parent.time
parents.append(parentdict)
meetinfo = []
meetinfo.extend(sessions)
meetinfo.extend(rooms)
meetinfo.extend(parents)
meetinfo.sort(key=lambda x: x['modified'],reverse=True)
last_modified = meetinfo and meetinfo[0]['modified']
tz = pytz.timezone(settings.PRODUCTION_TIMEZONE)
for obj in meetinfo:
obj['modified'] = tz.localize(obj['modified']).astimezone(pytz.utc).strftime('%Y-%m-%dT%H:%M:%SZ')
data = {"%s"%num: meetinfo}
response = HttpResponse(json.dumps(data, indent=2, sort_keys=True), content_type='application/json;charset=%s'%settings.DEFAULT_CHARSET)
if last_modified:
last_modified = tz.localize(last_modified).astimezone(pytz.utc)
response['Last-Modified'] = format_date_time(timegm(last_modified.timetuple()))
return response
def meeting_requests(request, num=None):
meeting = get_meeting(num)
sessions = add_event_info_to_session_qs(
Session.objects.filter(
meeting__number=meeting.number,
type__slug='regular',
group__parent__isnull=False
),
requested_by=True,
).exclude(
requested_by=0
).order_by(
"group__parent__acronym", "current_status", "group__acronym"
).prefetch_related(
"group","group__ad_role__person"
)
status_names = {n.slug: n.name for n in SessionStatusName.objects.all()}
session_requesters = {p.pk: p for p in Person.objects.filter(pk__in=[s.requested_by for s in sessions if s.requested_by is not None])}
for s in sessions:
s.current_status_name = status_names.get(s.current_status, s.current_status)
s.requested_by_person = session_requesters.get(s.requested_by)
groups_not_meeting = Group.objects.filter(state='Active',type__in=['wg','rg','ag','rag','bof','program']).exclude(acronym__in = [session.group.acronym for session in sessions]).order_by("parent__acronym","acronym").prefetch_related("parent")
return render(request, "meeting/requests.html",
{"meeting": meeting, "sessions":sessions,
"groups_not_meeting": groups_not_meeting})
def get_sessions(num, acronym):
meeting = get_meeting(num=num,type_in=None)
sessions = Session.objects.filter(meeting=meeting,group__acronym=acronym,type__in=['regular','plenary','other'])
if not sessions:
sessions = Session.objects.filter(meeting=meeting,short=acronym,type__in=['regular','plenary','other'])
sessions = sessions.with_current_status()
return sorted(sessions, key=lambda s: session_time_for_sorting(s, use_meeting_date=False))
def session_details(request, num, acronym):
meeting = get_meeting(num=num,type_in=None)
sessions = get_sessions(num, acronym)
if not sessions:
raise Http404
# Find the time of the meeting, so that we can look back historically
# for what the group was called at the time.
meeting_time = datetime.datetime.combine(meeting.date, datetime.time())
groups = list(set([ s.group for s in sessions ]))
group_replacements = find_history_replacements_active_at(groups, meeting_time)
status_names = {n.slug: n.name for n in SessionStatusName.objects.all()}
for session in sessions:
session.historic_group = None
if session.group:
session.historic_group = group_replacements.get(session.group_id)
if session.historic_group:
session.historic_group.historic_parent = None
session.type_counter = Counter()
ss = session.timeslotassignments.filter(schedule__in=[meeting.schedule, meeting.schedule.base if meeting.schedule else None]).order_by('timeslot__time')
if ss:
if meeting.type_id == 'interim' and not (meeting.city or meeting.country):
session.times = [ x.timeslot.utc_start_time() for x in ss ]
else:
session.times = [ x.timeslot.local_start_time() for x in ss ]
session.cancelled = session.current_status in Session.CANCELED_STATUSES
session.status = ''
elif meeting.type_id=='interim':
session.times = [ meeting.date ]
session.cancelled = session.current_status in Session.CANCELED_STATUSES
session.status = ''
else:
session.times = []
session.cancelled = session.current_status in Session.CANCELED_STATUSES
session.status = status_names.get(session.current_status, session.current_status)
session.filtered_artifacts = list(session.sessionpresentation_set.filter(document__type__slug__in=['agenda','minutes','bluesheets']))
session.filtered_artifacts.sort(key=lambda d:['agenda','minutes','bluesheets'].index(d.document.type.slug))
session.filtered_slides = session.sessionpresentation_set.filter(document__type__slug='slides').order_by('order')
session.filtered_drafts = session.sessionpresentation_set.filter(document__type__slug='draft')
# TODO FIXME Deleted materials shouldn't be in the sessionpresentation_set
for qs in [session.filtered_artifacts,session.filtered_slides,session.filtered_drafts]:
qs = [p for p in qs if p.document.get_state_slug(p.document.type_id)!='deleted']
session.type_counter.update([p.document.type.slug for p in qs])
# we somewhat arbitrarily use the group of the last session we get from
# get_sessions() above when checking can_manage_session_materials()
can_manage = can_manage_session_materials(request.user, session.group, session)
can_view_request = can_view_interim_request(meeting, request.user)
scheduled_sessions = [s for s in sessions if s.current_status == 'sched']
unscheduled_sessions = [s for s in sessions if s.current_status != 'sched']
pending_suggestions = None
if request.user.is_authenticated:
if can_manage:
pending_suggestions = session.slidesubmission_set.filter(status__slug='pending')
else:
pending_suggestions = session.slidesubmission_set.filter(status__slug='pending', submitter=request.user.person)
return render(request, "meeting/session_details.html",
{ 'scheduled_sessions':scheduled_sessions ,
'unscheduled_sessions':unscheduled_sessions ,
'pending_suggestions' : pending_suggestions,
'meeting' :meeting ,
'acronym' :acronym,
'is_materials_manager' : session.group.has_role(request.user, session.group.features.matman_roles),
'can_manage_materials' : can_manage,
'can_view_request': can_view_request,
'thisweek': datetime.date.today()-datetime.timedelta(days=7),
'now': datetime.datetime.now(),
'use_codimd': True if meeting.date>=settings.MEETING_USES_CODIMD_DATE else False,
})
class SessionDraftsForm(forms.Form):
drafts = SearchableDocumentsField(required=False)
def __init__(self, *args, **kwargs):
self.already_linked = kwargs.pop('already_linked')
super(self.__class__, self).__init__(*args, **kwargs)
def clean(self):
selected = self.cleaned_data['drafts']
problems = set(selected).intersection(set(self.already_linked))
if problems:
raise forms.ValidationError("Already linked: %s" % ', '.join([d.name for d in problems]))
return self.cleaned_data
def add_session_drafts(request, session_id, num):
# num is redundant, but we're dragging it along an artifact of where we are in the current URL structure
session = get_object_or_404(Session,pk=session_id)
if not session.can_manage_materials(request.user):
raise Http404
if session.is_material_submission_cutoff() and not has_role(request.user, "Secretariat"):
raise Http404
already_linked = [sp.document for sp in session.sessionpresentation_set.filter(document__type_id='draft')]
session_number = None
sessions = get_sessions(session.meeting.number,session.group.acronym)
if len(sessions) > 1:
session_number = 1 + sessions.index(session)
if request.method == 'POST':
form = SessionDraftsForm(request.POST,already_linked=already_linked)
if form.is_valid():
for draft in form.cleaned_data['drafts']:
session.sessionpresentation_set.create(document=draft,rev=None)
c = DocEvent(type="added_comment", doc=draft, rev=draft.rev, by=request.user.person)
c.desc = "Added to session: %s" % session
c.save()
return redirect('ietf.meeting.views.session_details', num=session.meeting.number, acronym=session.group.acronym)
else:
form = SessionDraftsForm(already_linked=already_linked)
return render(request, "meeting/add_session_drafts.html",
{ 'session': session,
'session_number': session_number,
'already_linked': session.sessionpresentation_set.filter(document__type_id='draft'),
'form': form,
})
class UploadBlueSheetForm(FileUploadForm):
def __init__(self, *args, **kwargs):
kwargs['doc_type'] = 'bluesheets'
super(UploadBlueSheetForm, self).__init__(*args, **kwargs )
def upload_session_bluesheets(request, session_id, num):
# num is redundant, but we're dragging it along an artifact of where we are in the current URL structure
session = get_object_or_404(Session,pk=session_id)
if not session.can_manage_materials(request.user):
permission_denied(request, "You don't have permission to upload bluesheets for this session.")
if session.is_material_submission_cutoff() and not has_role(request.user, "Secretariat"):
permission_denied(request, "The materials cutoff for this session has passed. Contact the secretariat for further action.")
if session.meeting.type.slug == 'ietf' and not has_role(request.user, 'Secretariat'):
permission_denied(request, 'Restricted to role Secretariat')
session_number = None
sessions = get_sessions(session.meeting.number,session.group.acronym)
if len(sessions) > 1:
session_number = 1 + sessions.index(session)
if request.method == 'POST':
form = UploadBlueSheetForm(request.POST,request.FILES)
if form.is_valid():
file = request.FILES['file']
ota = session.official_timeslotassignment()
sess_time = ota and ota.timeslot.time
if not sess_time:
return HttpResponse("Cannot receive uploads for an unscheduled session. Please check the session ID.", status=410, content_type="text/plain")
save_error = save_bluesheet(request, session, file, encoding=form.file_encoding[file.name])
if save_error:
form.add_error(None, save_error)
else:
return redirect('ietf.meeting.views.session_details',num=num,acronym=session.group.acronym)
else:
form = UploadBlueSheetForm()
bluesheet_sp = session.sessionpresentation_set.filter(document__type='bluesheets').first()
return render(request, "meeting/upload_session_bluesheets.html",
{'session': session,
'session_number': session_number,
'bluesheet_sp' : bluesheet_sp,
'form': form,
})
def save_bluesheet(request, session, file, encoding='utf-8'):
bluesheet_sp = session.sessionpresentation_set.filter(document__type='bluesheets').first()
_, ext = os.path.splitext(file.name)
if bluesheet_sp:
doc = bluesheet_sp.document
doc.rev = '%02d' % (int(doc.rev)+1)
bluesheet_sp.rev = doc.rev
bluesheet_sp.save()
else:
ota = session.official_timeslotassignment()
sess_time = ota and ota.timeslot.time
if session.meeting.type_id=='ietf':
name = 'bluesheets-%s-%s-%s' % (session.meeting.number,
session.group.acronym,
sess_time.strftime("%Y%m%d%H%M"))
title = 'Bluesheets IETF%s: %s : %s' % (session.meeting.number,
session.group.acronym,
sess_time.strftime("%a %H:%M"))
else:
name = 'bluesheets-%s-%s' % (session.meeting.number, sess_time.strftime("%Y%m%d%H%M"))
title = 'Bluesheets %s: %s' % (session.meeting.number, sess_time.strftime("%a %H:%M"))
doc = Document.objects.create(
name = name,
type_id = 'bluesheets',
title = title,
group = session.group,
rev = '00',
)
doc.states.add(State.objects.get(type_id='bluesheets',slug='active'))
DocAlias.objects.create(name=doc.name).docs.add(doc)
session.sessionpresentation_set.create(document=doc,rev='00')
filename = '%s-%s%s'% ( doc.name, doc.rev, ext)
doc.uploaded_filename = filename
e = NewRevisionDocEvent.objects.create(doc=doc, rev=doc.rev, by=request.user.person, type='new_revision', desc='New revision available: %s'%doc.rev)
save_error = handle_upload_file(file, filename, session.meeting, 'bluesheets', request=request, encoding=encoding)
if not save_error:
doc.save_with_history([e])
return save_error
class UploadMinutesForm(FileUploadForm):
apply_to_all = forms.BooleanField(label='Apply to all group sessions at this meeting',initial=True,required=False)
def __init__(self, show_apply_to_all_checkbox, *args, **kwargs):
kwargs['doc_type'] = 'minutes'
super(UploadMinutesForm, self).__init__(*args, **kwargs )
if not show_apply_to_all_checkbox:
self.fields.pop('apply_to_all')
def upload_session_minutes(request, session_id, num):
# num is redundant, but we're dragging it along an artifact of where we are in the current URL structure
session = get_object_or_404(Session,pk=session_id)
if not session.can_manage_materials(request.user):
permission_denied(request, "You don't have permission to upload minutes for this session.")
if session.is_material_submission_cutoff() and not has_role(request.user, "Secretariat"):
permission_denied(request, "The materials cutoff for this session has passed. Contact the secretariat for further action.")
session_number = None
sessions = get_sessions(session.meeting.number,session.group.acronym)
show_apply_to_all_checkbox = len(sessions) > 1 if session.type_id == 'regular' else False
if len(sessions) > 1:
session_number = 1 + sessions.index(session)
minutes_sp = session.sessionpresentation_set.filter(document__type='minutes').first()
if request.method == 'POST':
form = UploadMinutesForm(show_apply_to_all_checkbox,request.POST,request.FILES)
if form.is_valid():
file = request.FILES['file']
_, ext = os.path.splitext(file.name)
apply_to_all = session.type_id == 'regular'
if show_apply_to_all_checkbox:
apply_to_all = form.cleaned_data['apply_to_all']
if minutes_sp:
doc = minutes_sp.document
doc.rev = '%02d' % (int(doc.rev)+1)
minutes_sp.rev = doc.rev
minutes_sp.save()
else:
ota = session.official_timeslotassignment()
sess_time = ota and ota.timeslot.time
if not sess_time:
return HttpResponse("Cannot receive uploads for an unscheduled session. Please check the session ID.", status=410, content_type="text/plain")
if session.meeting.type_id=='ietf':
name = 'minutes-%s-%s' % (session.meeting.number,
session.group.acronym)
title = 'Minutes IETF%s: %s' % (session.meeting.number,
session.group.acronym)
if not apply_to_all:
name += '-%s' % (sess_time.strftime("%Y%m%d%H%M"),)
title += ': %s' % (sess_time.strftime("%a %H:%M"),)
else:
name = 'minutes-%s-%s' % (session.meeting.number, sess_time.strftime("%Y%m%d%H%M"))
title = 'Minutes %s: %s' % (session.meeting.number, sess_time.strftime("%a %H:%M"))
if Document.objects.filter(name=name).exists():
doc = Document.objects.get(name=name)
doc.rev = '%02d' % (int(doc.rev)+1)
else:
doc = Document.objects.create(
name = name,
type_id = 'minutes',
title = title,
group = session.group,
rev = '00',
)
DocAlias.objects.create(name=doc.name).docs.add(doc)
doc.states.add(State.objects.get(type_id='minutes',slug='active'))
if session.sessionpresentation_set.filter(document=doc).exists():
sp = session.sessionpresentation_set.get(document=doc)
sp.rev = doc.rev
sp.save()
else:
session.sessionpresentation_set.create(document=doc,rev=doc.rev)
if apply_to_all:
for other_session in sessions:
if other_session != session:
other_session.sessionpresentation_set.filter(document__type='minutes').delete()
other_session.sessionpresentation_set.create(document=doc,rev=doc.rev)
filename = '%s-%s%s'% ( doc.name, doc.rev, ext)
doc.uploaded_filename = filename
e = NewRevisionDocEvent.objects.create(doc=doc, by=request.user.person, type='new_revision', desc='New revision available: %s'%doc.rev, rev=doc.rev)
# The way this function builds the filename it will never trigger the file delete in handle_file_upload.
save_error = handle_upload_file(file, filename, session.meeting, 'minutes', request=request, encoding=form.file_encoding[file.name])
if save_error:
form.add_error(None, save_error)
else:
doc.save_with_history([e])
return redirect('ietf.meeting.views.session_details',num=num,acronym=session.group.acronym)
else:
form = UploadMinutesForm(show_apply_to_all_checkbox)
return render(request, "meeting/upload_session_minutes.html",
{'session': session,
'session_number': session_number,
'minutes_sp' : minutes_sp,
'form': form,
})
class UploadAgendaForm(FileUploadForm):
apply_to_all = forms.BooleanField(label='Apply to all group sessions at this meeting',initial=True,required=False)
def __init__(self, show_apply_to_all_checkbox, *args, **kwargs):
kwargs['doc_type'] = 'agenda'
super(UploadAgendaForm, self).__init__(*args, **kwargs )
if not show_apply_to_all_checkbox:
self.fields.pop('apply_to_all')
def upload_session_agenda(request, session_id, num):
# num is redundant, but we're dragging it along an artifact of where we are in the current URL structure
session = get_object_or_404(Session,pk=session_id)
if not session.can_manage_materials(request.user):
permission_denied(request, "You don't have permission to upload an agenda for this session.")
if session.is_material_submission_cutoff() and not has_role(request.user, "Secretariat"):
permission_denied(request, "The materials cutoff for this session has passed. Contact the secretariat for further action.")
session_number = None
sessions = get_sessions(session.meeting.number,session.group.acronym)
show_apply_to_all_checkbox = len(sessions) > 1 if session.type_id == 'regular' else False
if len(sessions) > 1:
session_number = 1 + sessions.index(session)
agenda_sp = session.sessionpresentation_set.filter(document__type='agenda').first()
if request.method == 'POST':
form = UploadAgendaForm(show_apply_to_all_checkbox,request.POST,request.FILES)
if form.is_valid():
file = request.FILES['file']
_, ext = os.path.splitext(file.name)
apply_to_all = session.type_id == 'regular'
if show_apply_to_all_checkbox:
apply_to_all = form.cleaned_data['apply_to_all']
if agenda_sp:
doc = agenda_sp.document
doc.rev = '%02d' % (int(doc.rev)+1)
agenda_sp.rev = doc.rev
agenda_sp.save()
else:
ota = session.official_timeslotassignment()
sess_time = ota and ota.timeslot.time
if not sess_time:
return HttpResponse("Cannot receive uploads for an unscheduled session. Please check the session ID.", status=410, content_type="text/plain")
if session.meeting.type_id=='ietf':
name = 'agenda-%s-%s' % (session.meeting.number,
session.group.acronym)
title = 'Agenda IETF%s: %s' % (session.meeting.number,
session.group.acronym)
if not apply_to_all:
name += '-%s' % (session.docname_token(),)
if sess_time:
title += ': %s' % (sess_time.strftime("%a %H:%M"),)
else:
name = 'agenda-%s-%s' % (session.meeting.number, session.docname_token())
title = 'Agenda %s' % (session.meeting.number, )
if sess_time:
title += ': %s' % (sess_time.strftime("%a %H:%M"),)
if Document.objects.filter(name=name).exists():
doc = Document.objects.get(name=name)
doc.rev = '%02d' % (int(doc.rev)+1)
else:
doc = Document.objects.create(
name = name,
type_id = 'agenda',
title = title,
group = session.group,
rev = '00',
)
DocAlias.objects.create(name=doc.name).docs.add(doc)
doc.states.add(State.objects.get(type_id='agenda',slug='active'))
if session.sessionpresentation_set.filter(document=doc).exists():
sp = session.sessionpresentation_set.get(document=doc)
sp.rev = doc.rev
sp.save()
else:
session.sessionpresentation_set.create(document=doc,rev=doc.rev)
if apply_to_all:
for other_session in sessions:
if other_session != session:
other_session.sessionpresentation_set.filter(document__type='agenda').delete()
other_session.sessionpresentation_set.create(document=doc,rev=doc.rev)
filename = '%s-%s%s'% ( doc.name, doc.rev, ext)
doc.uploaded_filename = filename
e = NewRevisionDocEvent.objects.create(doc=doc,by=request.user.person,type='new_revision',desc='New revision available: %s'%doc.rev,rev=doc.rev)
# The way this function builds the filename it will never trigger the file delete in handle_file_upload.
save_error = handle_upload_file(file, filename, session.meeting, 'agenda', request=request, encoding=form.file_encoding[file.name])
if save_error:
form.add_error(None, save_error)
else:
doc.save_with_history([e])
return redirect('ietf.meeting.views.session_details',num=num,acronym=session.group.acronym)
else:
form = UploadAgendaForm(show_apply_to_all_checkbox, initial={'apply_to_all':session.type_id=='regular'})
return render(request, "meeting/upload_session_agenda.html",
{'session': session,
'session_number': session_number,
'agenda_sp' : agenda_sp,
'form': form,
})
class UploadSlidesForm(FileUploadForm):
title = forms.CharField(max_length=255)
apply_to_all = forms.BooleanField(label='Apply to all group sessions at this meeting',initial=False,required=False)
def __init__(self, session, show_apply_to_all_checkbox, *args, **kwargs):
self.session = session
kwargs['doc_type'] = 'slides'
super(UploadSlidesForm, self).__init__(*args, **kwargs )
if not show_apply_to_all_checkbox:
self.fields.pop('apply_to_all')
def clean_title(self):
title = self.cleaned_data['title']
# The current tables only handles Unicode BMP:
if ord(max(title)) > 0xffff:
raise forms.ValidationError("The title contains characters outside the Unicode BMP, which is not currently supported")
if self.session.meeting.type_id=='interim':
if re.search(r'-\d{2}$', title):
raise forms.ValidationError("Interim slides currently may not have a title that ends with something that looks like a revision number (-nn)")
return title
def upload_session_slides(request, session_id, num, name):
# num is redundant, but we're dragging it along an artifact of where we are in the current URL structure
session = get_object_or_404(Session,pk=session_id)
if not session.can_manage_materials(request.user):
permission_denied(request, "You don't have permission to upload slides for this session.")
if session.is_material_submission_cutoff() and not has_role(request.user, "Secretariat"):
permission_denied(request, "The materials cutoff for this session has passed. Contact the secretariat for further action.")
session_number = None
sessions = get_sessions(session.meeting.number,session.group.acronym)
show_apply_to_all_checkbox = len(sessions) > 1 if session.type_id == 'regular' else False
if len(sessions) > 1:
session_number = 1 + sessions.index(session)
slides = None
slides_sp = None
if name:
slides = Document.objects.filter(name=name).first()
if not (slides and slides.type_id=='slides'):
raise Http404
slides_sp = session.sessionpresentation_set.filter(document=slides).first()
if request.method == 'POST':
form = UploadSlidesForm(session, show_apply_to_all_checkbox,request.POST,request.FILES)
if form.is_valid():
file = request.FILES['file']
_, ext = os.path.splitext(file.name)
apply_to_all = session.type_id == 'regular'
if show_apply_to_all_checkbox:
apply_to_all = form.cleaned_data['apply_to_all']
if slides_sp:
doc = slides_sp.document
doc.rev = '%02d' % (int(doc.rev)+1)
doc.title = form.cleaned_data['title']
slides_sp.rev = doc.rev
slides_sp.save()
else:
title = form.cleaned_data['title']
if session.meeting.type_id=='ietf':
name = 'slides-%s-%s' % (session.meeting.number,
session.group.acronym)
if not apply_to_all:
name += '-%s' % (session.docname_token(),)
else:
name = 'slides-%s-%s' % (session.meeting.number, session.docname_token())
name = name + '-' + slugify(title).replace('_', '-')[:128]
if Document.objects.filter(name=name).exists():
doc = Document.objects.get(name=name)
doc.rev = '%02d' % (int(doc.rev)+1)
doc.title = form.cleaned_data['title']
else:
doc = Document.objects.create(
name = name,
type_id = 'slides',
title = title,
group = session.group,
rev = '00',
)
DocAlias.objects.create(name=doc.name).docs.add(doc)
doc.states.add(State.objects.get(type_id='slides',slug='active'))
doc.states.add(State.objects.get(type_id='reuse_policy',slug='single'))
if session.sessionpresentation_set.filter(document=doc).exists():
sp = session.sessionpresentation_set.get(document=doc)
sp.rev = doc.rev
sp.save()
else:
max_order = session.sessionpresentation_set.filter(document__type='slides').aggregate(Max('order'))['order__max'] or 0
session.sessionpresentation_set.create(document=doc,rev=doc.rev,order=max_order+1)
if apply_to_all:
for other_session in sessions:
if other_session != session and not other_session.sessionpresentation_set.filter(document=doc).exists():
max_order = other_session.sessionpresentation_set.filter(document__type='slides').aggregate(Max('order'))['order__max'] or 0
other_session.sessionpresentation_set.create(document=doc,rev=doc.rev,order=max_order+1)
filename = '%s-%s%s'% ( doc.name, doc.rev, ext)
doc.uploaded_filename = filename
e = NewRevisionDocEvent.objects.create(doc=doc,by=request.user.person,type='new_revision',desc='New revision available: %s'%doc.rev,rev=doc.rev)
# The way this function builds the filename it will never trigger the file delete in handle_file_upload.
save_error = handle_upload_file(file, filename, session.meeting, 'slides', request=request, encoding=form.file_encoding[file.name])
if save_error:
form.add_error(None, save_error)
else:
doc.save_with_history([e])
post_process(doc)
return redirect('ietf.meeting.views.session_details',num=num,acronym=session.group.acronym)
else:
initial = {}
if slides:
initial = {'title':slides.title}
form = UploadSlidesForm(session, show_apply_to_all_checkbox, initial=initial)
return render(request, "meeting/upload_session_slides.html",
{'session': session,
'session_number': session_number,
'slides_sp' : slides_sp,
'form': form,
})
@login_required
def propose_session_slides(request, session_id, num):
session = get_object_or_404(Session,pk=session_id)
if session.is_material_submission_cutoff() and not has_role(request.user, "Secretariat"):
permission_denied(request, "The materials cutoff for this session has passed. Contact the secretariat for further action.")
session_number = None
sessions = get_sessions(session.meeting.number,session.group.acronym)
show_apply_to_all_checkbox = len(sessions) > 1 if session.type_id == 'regular' else False
if len(sessions) > 1:
session_number = 1 + sessions.index(session)
if request.method == 'POST':
form = UploadSlidesForm(session, show_apply_to_all_checkbox,request.POST,request.FILES)
if form.is_valid():
file = request.FILES['file']
_, ext = os.path.splitext(file.name)
apply_to_all = session.type_id == 'regular'
if show_apply_to_all_checkbox:
apply_to_all = form.cleaned_data['apply_to_all']
title = form.cleaned_data['title']
submission = SlideSubmission.objects.create(session = session, title = title, filename = '', apply_to_all = apply_to_all, submitter=request.user.person)
if session.meeting.type_id=='ietf':
name = 'slides-%s-%s' % (session.meeting.number,
session.group.acronym)
if not apply_to_all:
name += '-%s' % (session.docname_token(),)
else:
name = 'slides-%s-%s' % (session.meeting.number, session.docname_token())
name = name + '-' + slugify(title).replace('_', '-')[:128]
filename = '%s-ss%d%s'% (name, submission.id, ext)
destination = io.open(os.path.join(settings.SLIDE_STAGING_PATH, filename),'wb+')
for chunk in file.chunks():
destination.write(chunk)
destination.close()
submission.filename = filename
submission.save()
(to, cc) = gather_address_lists('slides_proposed', group=session.group).as_strings()
msg_txt = render_to_string("meeting/slides_proposed.txt", {
"to": to,
"cc": cc,
"submission": submission,
"settings": settings,
})
msg = infer_message(msg_txt)
msg.by = request.user.person
msg.save()
send_mail_message(request, msg)
return redirect('ietf.meeting.views.session_details',num=num,acronym=session.group.acronym)
else:
initial = {}
form = UploadSlidesForm(session, show_apply_to_all_checkbox, initial=initial)
return render(request, "meeting/propose_session_slides.html",
{'session': session,
'session_number': session_number,
'form': form,
})
def remove_sessionpresentation(request, session_id, num, name):
sp = get_object_or_404(SessionPresentation,session_id=session_id,document__name=name)
session = sp.session
if not session.can_manage_materials(request.user):
permission_denied(request, "You don't have permission to manage materials for this session.")
if session.is_material_submission_cutoff() and not has_role(request.user, "Secretariat"):
permission_denied(request, "The materials cutoff for this session has passed. Contact the secretariat for further action.")
if request.method == 'POST':
session.sessionpresentation_set.filter(pk=sp.pk).delete()
c = DocEvent(type="added_comment", doc=sp.document, rev=sp.document.rev, by=request.user.person)
c.desc = "Removed from session: %s" % (session)
c.save()
return redirect('ietf.meeting.views.session_details', num=session.meeting.number, acronym=session.group.acronym)
return render(request,'meeting/remove_sessionpresentation.html', {'sp': sp })
def ajax_add_slides_to_session(request, session_id, num):
session = get_object_or_404(Session,pk=session_id)
if not session.can_manage_materials(request.user):
permission_denied(request, "You don't have permission to upload slides for this session.")
if session.is_material_submission_cutoff() and not has_role(request.user, "Secretariat"):
permission_denied(request, "The materials cutoff for this session has passed. Contact the secretariat for further action.")
if request.method != 'POST' or not request.POST:
return HttpResponse(json.dumps({ 'success' : False, 'error' : 'No data submitted or not POST' }),content_type='application/json')
order_str = request.POST.get('order', None)
try:
order = int(order_str)
except (ValueError, TypeError):
return HttpResponse(json.dumps({ 'success' : False, 'error' : 'Supplied order is not valid' }),content_type='application/json')
if order < 1 or order > session.sessionpresentation_set.filter(document__type_id='slides').count() + 1 :
return HttpResponse(json.dumps({ 'success' : False, 'error' : 'Supplied order is not valid' }),content_type='application/json')
name = request.POST.get('name', None)
doc = Document.objects.filter(name=name).first()
if not doc:
return HttpResponse(json.dumps({ 'success' : False, 'error' : 'Supplied name is not valid' }),content_type='application/json')
if not session.sessionpresentation_set.filter(document=doc).exists():
condition_slide_order(session)
session.sessionpresentation_set.filter(document__type_id='slides', order__gte=order).update(order=F('order')+1)
session.sessionpresentation_set.create(document=doc,rev=doc.rev,order=order)
DocEvent.objects.create(type="added_comment", doc=doc, rev=doc.rev, by=request.user.person, desc="Added to session: %s" % session)
return HttpResponse(json.dumps({'success':True}), content_type='application/json')
def ajax_remove_slides_from_session(request, session_id, num):
session = get_object_or_404(Session,pk=session_id)
if not session.can_manage_materials(request.user):
permission_denied(request, "You don't have permission to upload slides for this session.")
if session.is_material_submission_cutoff() and not has_role(request.user, "Secretariat"):
permission_denied(request, "The materials cutoff for this session has passed. Contact the secretariat for further action.")
if request.method != 'POST' or not request.POST:
return HttpResponse(json.dumps({ 'success' : False, 'error' : 'No data submitted or not POST' }),content_type='application/json')
oldIndex_str = request.POST.get('oldIndex', None)
try:
oldIndex = int(oldIndex_str)
except (ValueError, TypeError):
return HttpResponse(json.dumps({ 'success' : False, 'error' : 'Supplied index is not valid' }),content_type='application/json')
if oldIndex < 1 or oldIndex > session.sessionpresentation_set.filter(document__type_id='slides').count() :
return HttpResponse(json.dumps({ 'success' : False, 'error' : 'Supplied index is not valid' }),content_type='application/json')
name = request.POST.get('name', None)
doc = Document.objects.filter(name=name).first()
if not doc:
return HttpResponse(json.dumps({ 'success' : False, 'error' : 'Supplied name is not valid' }),content_type='application/json')
condition_slide_order(session)
affected_presentations = session.sessionpresentation_set.filter(document=doc).first()
if affected_presentations:
if affected_presentations.order == oldIndex:
affected_presentations.delete()
session.sessionpresentation_set.filter(document__type_id='slides', order__gt=oldIndex).update(order=F('order')-1)
DocEvent.objects.create(type="added_comment", doc=doc, rev=doc.rev, by=request.user.person, desc="Removed from session: %s" % session)
return HttpResponse(json.dumps({'success':True}), content_type='application/json')
else:
return HttpResponse(json.dumps({ 'success' : False, 'error' : 'Name does not match index' }),content_type='application/json')
else:
return HttpResponse(json.dumps({ 'success' : False, 'error' : 'SessionPresentation not found' }),content_type='application/json')
def ajax_reorder_slides_in_session(request, session_id, num):
session = get_object_or_404(Session,pk=session_id)
if not session.can_manage_materials(request.user):
permission_denied(request, "You don't have permission to upload slides for this session.")
if session.is_material_submission_cutoff() and not has_role(request.user, "Secretariat"):
permission_denied(request, "The materials cutoff for this session has passed. Contact the secretariat for further action.")
if request.method != 'POST' or not request.POST:
return HttpResponse(json.dumps({ 'success' : False, 'error' : 'No data submitted or not POST' }),content_type='application/json')
num_slides_in_session = session.sessionpresentation_set.filter(document__type_id='slides').count()
oldIndex_str = request.POST.get('oldIndex', None)
try:
oldIndex = int(oldIndex_str)
except (ValueError, TypeError):
return HttpResponse(json.dumps({ 'success' : False, 'error' : 'Supplied index is not valid' }),content_type='application/json')
if oldIndex < 1 or oldIndex > num_slides_in_session :
return HttpResponse(json.dumps({ 'success' : False, 'error' : 'Supplied index is not valid' }),content_type='application/json')
newIndex_str = request.POST.get('newIndex', None)
try:
newIndex = int(newIndex_str)
except (ValueError, TypeError):
return HttpResponse(json.dumps({ 'success' : False, 'error' : 'Supplied index is not valid' }),content_type='application/json')
if newIndex < 1 or newIndex > num_slides_in_session :
return HttpResponse(json.dumps({ 'success' : False, 'error' : 'Supplied index is not valid' }),content_type='application/json')
if newIndex == oldIndex:
return HttpResponse(json.dumps({ 'success' : False, 'error' : 'Supplied index is not valid' }),content_type='application/json')
condition_slide_order(session)
sp = session.sessionpresentation_set.get(order=oldIndex)
if oldIndex < newIndex:
session.sessionpresentation_set.filter(order__gt=oldIndex, order__lte=newIndex).update(order=F('order')-1)
else:
session.sessionpresentation_set.filter(order__gte=newIndex, order__lt=oldIndex).update(order=F('order')+1)
sp.order = newIndex
sp.save()
return HttpResponse(json.dumps({'success':True}), content_type='application/json')
@role_required('Secretariat')
def make_schedule_official(request, num, owner, name):
meeting = get_meeting(num)
person = get_person_by_email(owner)
schedule = get_schedule_by_name(meeting, person, name)
if schedule is None:
raise Http404
if request.method == 'POST':
if not (schedule.public and schedule.visible):
schedule.public = True
schedule.visible = True
schedule.save()
if schedule.base and not (schedule.base.public and schedule.base.visible):
schedule.base.public = True
schedule.base.visible = True
schedule.base.save()
meeting.schedule = schedule
meeting.save()
return HttpResponseRedirect(reverse('ietf.meeting.views.list_schedules',kwargs={'num':num}))
if not schedule.public:
messages.warning(request,"This schedule will be made public as it is made official.")
if not schedule.visible:
messages.warning(request,"This schedule will be made visible as it is made official.")
if schedule.base:
if not schedule.base.public:
messages.warning(request,"The base schedule will be made public as it is made official.")
if not schedule.base.visible:
messages.warning(request,"The base schedule will be made visible as it is made official.")
return render(request, "meeting/make_schedule_official.html",
{ 'schedule' : schedule,
'meeting' : meeting,
}
)
@role_required('Secretariat','Area Director')
def delete_schedule(request, num, owner, name):
meeting = get_meeting(num)
person = get_person_by_email(owner)
schedule = get_schedule_by_name(meeting, person, name)
# FIXME: we ought to put these checks in a function and only show
# the delete button if the checks pass
if schedule == meeting.schedule:
permission_denied(request, 'You may not delete the official schedule for %s'%meeting)
if Schedule.objects.filter(base=schedule).exists():
return HttpResponseForbidden('You may not delete a schedule serving as the base for other schedules')
if not ( has_role(request.user, 'Secretariat') or person.user == request.user ):
permission_denied(request, "You may not delete other user's schedules")
if request.method == 'POST':
# remove schedule from origin tree
replacement_origin = schedule.origin
Schedule.objects.filter(origin=schedule).update(origin=replacement_origin)
schedule.delete()
return HttpResponseRedirect(reverse('ietf.meeting.views.list_schedules',kwargs={'num':num}))
return render(request, "meeting/delete_schedule.html",
{ 'schedule' : schedule,
'meeting' : meeting,
}
)
# -------------------------------------------------
# Interim Views
# -------------------------------------------------
def ajax_get_utc(request):
'''Ajax view that takes arguments time, timezone, date and returns UTC data'''
time = request.GET.get('time')
timezone = request.GET.get('timezone')
date = request.GET.get('date')
time_re = re.compile(r'^\d{2}:\d{2}$')
# validate input
if not time_re.match(time) or not date:
return HttpResponse(json.dumps({'error': True}),
content_type='application/json')
hour, minute = time.split(':')
if not (int(hour) <= 23 and int(minute) <= 59):
return HttpResponse(json.dumps({'error': True}),
content_type='application/json')
year, month, day = date.split('-')
dt = datetime.datetime(int(year), int(month), int(day), int(hour), int(minute))
tz = pytz.timezone(timezone)
aware_dt = tz.localize(dt, is_dst=None)
utc_dt = aware_dt.astimezone(pytz.utc)
utc = utc_dt.strftime('%H:%M')
# calculate utc day offset
naive_utc_dt = utc_dt.replace(tzinfo=None)
utc_day_offset = (naive_utc_dt.date() - dt.date()).days
html = "<span>{utc} UTC</span>".format(utc=utc)
if utc_day_offset != 0:
html = html + "<span class='day-offset'> {0:+d} Day</span>".format(utc_day_offset)
context_data = {'timezone': timezone,
'time': time,
'utc': utc,
'utc_day_offset': utc_day_offset,
'html': html}
return HttpResponse(json.dumps(context_data),
content_type='application/json')
def interim_announce(request):
'''View which shows interim meeting requests awaiting announcement'''
meetings = data_for_meetings_overview(Meeting.objects.filter(type='interim').order_by('date'), interim_status='scheda')
menu_entries = get_interim_menu_entries(request)
selected_menu_entry = 'announce'
return render(request, "meeting/interim_announce.html", {
'menu_entries': menu_entries,
'selected_menu_entry': selected_menu_entry,
'meetings': meetings})
@role_required('Secretariat',)
def interim_send_announcement(request, number):
'''View for sending the announcement of a new interim meeting'''
meeting = get_object_or_404(Meeting, number=number)
group = meeting.session_set.first().group
if request.method == 'POST':
form = InterimAnnounceForm(request.POST,
initial=get_announcement_initial(meeting))
if form.is_valid():
message = form.save(user=request.user)
message.related_groups.add(group)
for session in meeting.session_set.not_canceled():
SchedulingEvent.objects.create(
session=session,
status=SessionStatusName.objects.get(slug='sched'),
by=request.user.person,
)
send_mail_message(request, message)
messages.success(request, 'Interim meeting announcement sent')
return redirect(interim_announce)
form = InterimAnnounceForm(initial=get_announcement_initial(meeting))
return render(request, "meeting/interim_send_announcement.html", {
'meeting': meeting,
'form': form})
@role_required('Secretariat',)
def interim_skip_announcement(request, number):
'''View to change status of interim meeting to Scheduled without
first announcing. Only applicable to IRTF groups.
'''
meeting = get_object_or_404(Meeting, number=number)
if request.method == 'POST':
for session in meeting.session_set.not_canceled():
SchedulingEvent.objects.create(
session=session,
status=SessionStatusName.objects.get(slug='sched'),
by=request.user.person,
)
messages.success(request, 'Interim meeting scheduled. No announcement sent.')
return redirect(interim_announce)
return render(request, "meeting/interim_skip_announce.html", {
'meeting': meeting})
def interim_pending(request):
'''View which shows interim meeting requests pending approval'''
meetings = data_for_meetings_overview(Meeting.objects.filter(type='interim').order_by('date'), interim_status='apprw')
menu_entries = get_interim_menu_entries(request)
selected_menu_entry = 'pending'
for meeting in meetings:
if can_approve_interim_request(meeting, request.user):
meeting.can_approve = True
return render(request, "meeting/interim_pending.html", {
'menu_entries': menu_entries,
'selected_menu_entry': selected_menu_entry,
'meetings': meetings})
@login_required
def interim_request(request):
if not can_manage_some_groups(request.user):
permission_denied(request, "You don't have permission to request any interims")
'''View for requesting an interim meeting'''
SessionFormset = inlineformset_factory(
Meeting,
Session,
form=InterimSessionModelForm,
formset=InterimSessionInlineFormSet,
can_delete=False, extra=2)
if request.method == 'POST':
form = InterimMeetingModelForm(request, data=request.POST)
formset = SessionFormset(instance=Meeting(), data=request.POST)
if form.is_valid() and formset.is_valid():
group = form.cleaned_data.get('group')
is_approved = form.cleaned_data.get('approved', False)
is_virtual = form.is_virtual()
meeting_type = form.cleaned_data.get('meeting_type')
requires_approval = not ( is_approved or ( is_virtual and not settings.VIRTUAL_INTERIMS_REQUIRE_APPROVAL ))
# pre create meeting
if meeting_type in ('single', 'multi-day'):
meeting = form.save(date=get_earliest_session_date(formset))
# need to use curry here to pass custom variable to form init
SessionFormset.form.__init__ = curry(
InterimSessionModelForm.__init__,
user=request.user,
group=group,
requires_approval=requires_approval)
formset = SessionFormset(instance=meeting, data=request.POST)
formset.is_valid()
formset.save()
sessions_post_save(request, formset)
if requires_approval:
send_interim_approval_request(meetings=[meeting])
else:
send_interim_approval(request.user, meeting=meeting)
if not has_role(request.user, 'Secretariat'):
send_interim_announcement_request(meeting=meeting)
# series require special handling, each session gets it's own
# meeting object we won't see this on edit because series are
# subsequently dealt with individually
elif meeting_type == 'series':
series = []
SessionFormset.form.__init__ = curry(
InterimSessionModelForm.__init__,
user=request.user,
group=group,
requires_approval=requires_approval)
formset = SessionFormset(instance=Meeting(), data=request.POST)
formset.is_valid() # re-validate
for session_form in formset.forms:
if not session_form.has_changed():
continue
# create meeting
form = InterimMeetingModelForm(request, data=request.POST)
form.is_valid()
meeting = form.save(date=session_form.cleaned_data['date'])
# create save session
session = session_form.save(commit=False)
session.meeting = meeting
session.save()
series.append(meeting)
sessions_post_save(request, [session_form])
if requires_approval:
send_interim_approval_request(meetings=series)
else:
send_interim_approval(request.user, meeting=meeting)
if not has_role(request.user, 'Secretariat'):
send_interim_announcement_request(meeting=meeting)
messages.success(request, 'Interim meeting request submitted')
return redirect(upcoming)
else:
initial = {'meeting_type': 'single', 'group': request.GET.get('group', '')}
form = InterimMeetingModelForm(request=request,
initial=initial)
formset = SessionFormset()
return render(request, "meeting/interim_request.html", {
"form": form,
"formset": formset})
@login_required
def interim_request_cancel(request, number):
'''View for cancelling an interim meeting request'''
meeting = get_object_or_404(Meeting, number=number)
first_session = meeting.session_set.first()
group = first_session.group
if not can_manage_group(request.user, group):
permission_denied(request, "You do not have permissions to cancel this meeting request")
session_status = current_session_status(first_session)
if request.method == 'POST':
form = InterimCancelForm(request.POST)
if form.is_valid():
if 'comments' in form.changed_data:
meeting.session_set.update(agenda_note=form.cleaned_data.get('comments'))
was_scheduled = session_status.slug == 'sched'
result_status = SessionStatusName.objects.get(slug='canceled' if was_scheduled else 'canceledpa')
for session in meeting.session_set.not_canceled():
SchedulingEvent.objects.create(
session=session,
status=result_status,
by=request.user.person,
)
if was_scheduled:
send_interim_meeting_cancellation_notice(meeting)
messages.success(request, 'Interim meeting cancelled')
return redirect(upcoming)
else:
form = InterimCancelForm(initial={'group': group.acronym, 'date': meeting.date})
return render(request, "meeting/interim_request_cancel.html", {
"form": form,
"meeting": meeting,
"session_status": session_status,
})
@login_required
def interim_request_session_cancel(request, sessionid):
'''View for cancelling an interim meeting request'''
session = get_object_or_404(Session, pk=sessionid)
group = session.group
if not can_manage_group(request.user, group):
permission_denied(request, "You do not have permissions to cancel this session")
session_status = current_session_status(session)
if request.method == 'POST':
form = InterimCancelForm(request.POST)
if form.is_valid():
remaining_sessions = session.meeting.session_set.with_current_status().exclude(
current_status__in=['canceled', 'canceledpa']
)
if remaining_sessions.count() <= 1:
return HttpResponse('Cannot cancel only remaining session. Cancel the request instead.',
status=409)
if 'comments' in form.changed_data:
session.agenda_note=form.cleaned_data.get('comments')
session.save()
was_scheduled = session_status.slug == 'sched'
result_status = SessionStatusName.objects.get(slug='canceled' if was_scheduled else 'canceledpa')
SchedulingEvent.objects.create(
session=session,
status=result_status,
by=request.user.person,
)
if was_scheduled:
send_interim_session_cancellation_notice(session)
messages.success(request, 'Interim meeting session cancelled')
return redirect(interim_request_details, number=session.meeting.number)
else:
session_time = session.official_timeslotassignment().timeslot.time
form = InterimCancelForm(initial={'group': group.acronym, 'date': session_time.date()})
return render(request, "meeting/interim_request_cancel.html", {
"form": form,
"session": session,
"session_status": session_status,
})
@login_required
def interim_request_details(request, number):
'''View details of an interim meeting request'''
meeting = get_object_or_404(Meeting, number=number)
sessions_not_canceled = meeting.session_set.not_canceled()
first_session = meeting.session_set.first() # first, whether or not canceled
group = first_session.group
if not can_manage_group(request.user, group):
permission_denied(request, "You do not have permissions to manage this meeting request")
can_edit = can_edit_interim_request(meeting, request.user)
can_approve = can_approve_interim_request(meeting, request.user)
if request.method == 'POST':
if request.POST.get('approve') and can_approve_interim_request(meeting, request.user):
for session in sessions_not_canceled:
SchedulingEvent.objects.create(
session=session,
status=SessionStatusName.objects.get(slug='scheda'),
by=request.user.person,
)
messages.success(request, 'Interim meeting approved')
if has_role(request.user, 'Secretariat'):
return redirect(interim_send_announcement, number=number)
else:
send_interim_announcement_request(meeting)
return redirect(interim_pending)
if request.POST.get('disapprove') and can_approve_interim_request(meeting, request.user):
for session in sessions_not_canceled:
SchedulingEvent.objects.create(
session=session,
status=SessionStatusName.objects.get(slug='disappr'),
by=request.user.person,
)
messages.success(request, 'Interim meeting disapproved')
return redirect(interim_pending)
# Determine meeting status from non-canceled sessions, if any.
# N.b., meeting_status may be None after either of these code paths,
# though I am not sure what circumstances would cause this.
if sessions_not_canceled.count() > 0:
meeting_status = current_session_status(sessions_not_canceled.first())
else:
meeting_status = current_session_status(first_session)
meeting_assignments = SchedTimeSessAssignment.objects.filter(
schedule__in=[meeting.schedule, meeting.schedule.base if meeting.schedule else None]
).select_related(
'session', 'timeslot'
)
for ma in meeting_assignments:
ma.status = current_session_status(ma.session)
ma.can_be_canceled = ma.status.slug in ('sched', 'scheda', 'apprw')
return render(request, "meeting/interim_request_details.html", {
"meeting": meeting,
"meeting_assignments": meeting_assignments,
"group": group,
"requester": session_requested_by(first_session),
"meeting_status": meeting_status or SessionStatusName.objects.get(slug='canceled'),
"can_edit": can_edit,
"can_approve": can_approve})
@login_required
def interim_request_edit(request, number):
'''Edit details of an interim meeting reqeust'''
meeting = get_object_or_404(Meeting, number=number)
if not can_edit_interim_request(meeting, request.user):
permission_denied(request, "You do not have permissions to edit this meeting request")
SessionFormset = inlineformset_factory(
Meeting,
Session,
form=InterimSessionModelForm,
can_delete=False,
extra=1)
if request.method == 'POST':
form = InterimMeetingModelForm(request=request, instance=meeting,
data=request.POST)
group = Group.objects.get(pk=form.data['group'])
is_approved = is_interim_meeting_approved(meeting)
SessionFormset.form.__init__ = curry(
InterimSessionModelForm.__init__,
user=request.user,
group=group,
requires_approval= not is_approved)
formset = SessionFormset(instance=meeting, data=request.POST)
if form.is_valid() and formset.is_valid():
meeting = form.save(date=get_earliest_session_date(formset))
formset.save()
sessions_post_save(request, formset)
message = 'Interim meeting request saved'
meeting_is_scheduled = add_event_info_to_session_qs(meeting.session_set).filter(current_status='sched').exists()
if (form.has_changed() or formset.has_changed()) and meeting_is_scheduled:
send_interim_change_notice(request, meeting)
message = message + ' and change announcement sent'
messages.success(request, message)
return redirect(interim_request_details, number=number)
else:
form = InterimMeetingModelForm(request=request, instance=meeting)
formset = SessionFormset(instance=meeting)
return render(request, "meeting/interim_request_edit.html", {
"meeting": meeting,
"form": form,
"formset": formset})
@cache_page(60*60)
def past(request):
'''List of past meetings'''
today = datetime.datetime.today()
meetings = data_for_meetings_overview(Meeting.objects.filter(date__lte=today).order_by('-date'))
return render(request, 'meeting/past.html', {
'meetings': meetings,
})
def upcoming(request):
'''List of upcoming meetings'''
today = datetime.date.today()
# Get ietf meetings starting 7 days ago, and interim meetings starting today
ietf_meetings = Meeting.objects.filter(type_id='ietf', date__gte=today-datetime.timedelta(days=7))
for m in ietf_meetings:
m.end = m.date + datetime.timedelta(days=m.days-1) # subtract 1 to avoid counting an extra day
interim_sessions = add_event_info_to_session_qs(
Session.objects.filter(
meeting__type_id='interim',
timeslotassignments__schedule=F('meeting__schedule'),
timeslotassignments__timeslot__time__gte=today
)
).filter(current_status__in=('sched','canceled'))
# get groups for group UI display - same algorithm as in agenda(), but
# using group / parent instead of historic_group / historic_parent
groups = [s.group for s in interim_sessions
if s.group
and is_regular_agenda_filter_group(s.group)
and s.group.parent]
group_parents = {g.parent for g in groups if g.parent}
seen = set()
for p in group_parents:
p.group_list = []
for g in groups:
if g.acronym not in seen and g.parent.acronym == p.acronym:
p.group_list.append(g)
seen.add(g.acronym)
# only one category
filter_categories = [[
dict(
label=p.acronym,
keyword=p.acronym.lower(),
children=[dict(
label=g.acronym,
keyword=g.acronym.lower(),
is_bof=g.is_bof(),
) for g in p.group_list]
) for p in group_parents
]]
for session in interim_sessions:
session.historic_group = session.group
session.filter_keywords = filter_keywords_for_session(session)
entries = list(ietf_meetings)
entries.extend(list(interim_sessions))
entries.sort(key = lambda o: pytz.utc.localize(datetime.datetime.combine(o.date, datetime.datetime.min.time())) if isinstance(o,Meeting) else o.official_timeslotassignment().timeslot.utc_start_time())
for o in entries:
if isinstance(o, Meeting):
o.start_timestamp = int(pytz.utc.localize(datetime.datetime.combine(o.date, datetime.time.min)).timestamp())
o.end_timestamp = int(pytz.utc.localize(datetime.datetime.combine(o.end, datetime.time.max)).timestamp())
else:
o.start_timestamp = int(o.official_timeslotassignment().timeslot.utc_start_time().timestamp())
o.end_timestamp = int(o.official_timeslotassignment().timeslot.utc_end_time().timestamp())
# add menu entries
menu_entries = get_interim_menu_entries(request)
selected_menu_entry = 'upcoming'
# add menu actions
actions = []
if can_request_interim_meeting(request.user):
actions.append(dict(
label='Request new interim meeting',
url=reverse('ietf.meeting.views.interim_request'),
append_filter=False)
)
actions.append(dict(
label='Download as .ics',
url=reverse('ietf.meeting.views.upcoming_ical'),
append_filter=True)
)
actions.append(dict(
label='Subscribe with webcal',
url='webcal://'+request.get_host()+reverse('ietf.meeting.views.upcoming_ical'),
append_filter=True)
)
return render(request, 'meeting/upcoming.html', {
'entries': entries,
'filter_categories': filter_categories,
'menu_actions': actions,
'menu_entries': menu_entries,
'selected_menu_entry': selected_menu_entry,
'now': datetime.datetime.now(),
'use_codimd': True if datetime.date.today()>=settings.MEETING_USES_CODIMD_DATE else False,
})
def upcoming_ical(request):
"""Return Upcoming meetings in iCalendar file
Filters by wg name and session type.
"""
try:
filter_params = parse_agenda_filter_params(request.GET)
except ValueError as e:
return HttpResponseBadRequest(str(e))
today = datetime.date.today()
# get meetings starting 7 days ago -- we'll filter out sessions in the past further down
meetings = data_for_meetings_overview(Meeting.objects.filter(date__gte=today-datetime.timedelta(days=7)).prefetch_related('schedule').order_by('date'))
assignments = list(SchedTimeSessAssignment.objects.filter(
schedule__in=[m.schedule_id for m in meetings] + [m.schedule.base_id for m in meetings if m.schedule],
session__in=[s.pk for m in meetings for s in m.sessions if m.type_id != 'ietf'],
timeslot__time__gte=today,
).order_by(
'schedule__meeting__date', 'session__type', 'timeslot__time'
).select_related(
'session__group', 'session__group__parent', 'timeslot', 'schedule', 'schedule__meeting'
).distinct())
tag_assignments_with_filter_keywords(assignments)
# apply filters
if filter_params is not None:
assignments = [a for a in assignments if should_include_assignment(filter_params, a)]
# we already collected sessions with current_status, so reuse those
sessions = {s.pk: s for m in meetings for s in m.sessions}
for a in assignments:
if a.session_id is not None:
a.session = sessions.get(a.session_id) or a.session
a.session.ical_status = ical_session_status(a)
# handle IETFs separately
ietfs = [m for m in meetings if m.type_id == 'ietf']
preprocess_meeting_important_dates(ietfs)
# icalendar response file should have '\r\n' line endings per RFC5545
response = render_to_string('meeting/upcoming.ics', {
'vtimezones': ''.join(sorted(list({meeting.vtimezone() for meeting in meetings if meeting.vtimezone()}))),
'assignments': assignments,
'ietfs': ietfs,
}, request=request)
response = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", response)
response = HttpResponse(response, content_type='text/calendar')
response['Content-Disposition'] = 'attachment; filename="upcoming.ics"'
return response
def upcoming_json(request):
'''Return Upcoming meetings in json format'''
today = datetime.date.today()
# get meetings starting 7 days ago -- we'll filter out sessions in the past further down
meetings = data_for_meetings_overview(Meeting.objects.filter(date__gte=today-datetime.timedelta(days=7)).order_by('date'))
data = {}
for m in meetings:
data[m.number] = {
'date': m.date.strftime("%Y-%m-%d"),
}
response = HttpResponse(json.dumps(data, indent=2, sort_keys=False), content_type='application/json;charset=%s'%settings.DEFAULT_CHARSET)
return response
def floor_plan(request, num=None, floor=None, ):
meeting = get_meeting(num)
schedule = meeting.schedule
floors = FloorPlan.objects.filter(meeting=meeting).order_by('order')
if floor:
floors = [ f for f in floors if xslugify(f.name) == floor ]
for floor in floors:
try:
floor.image.width
except FileNotFoundError:
raise Http404('Missing floorplan image for %s' % floor)
return render(request, 'meeting/floor-plan.html', {
"meeting": meeting,
"schedule": schedule,
"number": num,
"floors": floors,
})
def proceedings(request, num=None):
meeting = get_meeting(num)
if (meeting.number.isdigit() and int(meeting.number) <= 64):
return HttpResponseRedirect( 'https://www.ietf.org/proceedings/%s' % num )
if not meeting.schedule or not meeting.schedule.assignments.exists():
kwargs = dict()
if num:
kwargs['num'] = num
return redirect('ietf.meeting.views.materials', **kwargs)
begin_date = meeting.get_submission_start_date()
cut_off_date = meeting.get_submission_cut_off_date()
cor_cut_off_date = meeting.get_submission_correction_date()
now = datetime.date.today()
schedule = get_schedule(meeting, None)
sessions = add_event_info_to_session_qs(
Session.objects.filter(meeting__number=meeting.number)
).filter(
Q(timeslotassignments__schedule__in=[schedule, schedule.base if schedule else None]) | Q(current_status='notmeet')
).select_related().order_by('-current_status')
plenaries = sessions.filter(name__icontains='plenary').exclude(current_status='notmeet')
ietf = sessions.filter(group__parent__type__slug = 'area').exclude(group__acronym='edu')
irtf = sessions.filter(group__parent__acronym = 'irtf')
training = sessions.filter(group__acronym__in=['edu','iaoc'], type_id__in=['regular', 'other', ]).exclude(current_status='notmeet')
iab = sessions.filter(group__parent__acronym = 'iab').exclude(current_status='notmeet')
cache_version = Document.objects.filter(session__meeting__number=meeting.number).aggregate(Max('time'))["time__max"]
ietf_areas = []
for area, sessions in itertools.groupby(sorted(ietf, key=lambda s: (s.group.parent.acronym, s.group.acronym)), key=lambda s: s.group.parent):
sessions = list(sessions)
meeting_groups = set(s.group_id for s in sessions if s.current_status != 'notmeet')
meeting_sessions = []
not_meeting_sessions = []
for s in sessions:
if s.current_status == 'notmeet' and s.group_id not in meeting_groups:
not_meeting_sessions.append(s)
else:
meeting_sessions.append(s)
ietf_areas.append((area, meeting_sessions, not_meeting_sessions))
return render(request, "meeting/proceedings.html", {
'meeting': meeting,
'plenaries': plenaries, 'ietf': ietf, 'training': training, 'irtf': irtf, 'iab': iab,
'ietf_areas': ietf_areas,
'cut_off_date': cut_off_date,
'cor_cut_off_date': cor_cut_off_date,
'submission_started': now > begin_date,
'cache_version': cache_version,
})
@role_required('Secretariat')
def finalize_proceedings(request, num=None):
meeting = get_meeting(num)
if (meeting.number.isdigit() and int(meeting.number) <= 64) or not meeting.schedule or not meeting.schedule.assignments.exists() or meeting.proceedings_final:
raise Http404
if request.method=='POST':
finalize(meeting)
return HttpResponseRedirect(reverse('ietf.meeting.views.proceedings',kwargs={'num':meeting.number}))
return render(request, "meeting/finalize.html", {'meeting':meeting,})
def proceedings_acknowledgements(request, num=None):
'''Display Acknowledgements for meeting'''
if not (num and num.isdigit()):
raise Http404
meeting = get_meeting(num)
if int(meeting.number) < settings.NEW_PROCEEDINGS_START:
return HttpResponseRedirect( 'https://www.ietf.org/proceedings/%s/acknowledgement.html' % num )
return render(request, "meeting/proceedings_acknowledgements.html", {
'meeting': meeting,
})
def proceedings_attendees(request, num=None):
'''Display list of meeting attendees'''
if not (num and num.isdigit()):
raise Http404
meeting = get_meeting(num)
if int(meeting.number) < settings.NEW_PROCEEDINGS_START:
return HttpResponseRedirect( 'https://www.ietf.org/proceedings/%s/attendees.html' % num )
overview_template = '/meeting/proceedings/%s/attendees.html' % meeting.number
try:
template = render_to_string(overview_template, {})
except TemplateDoesNotExist:
raise Http404
return render(request, "meeting/proceedings_attendees.html", {
'meeting': meeting,
'template': template,
})
def proceedings_overview(request, num=None):
'''Display Overview for given meeting'''
if not (num and num.isdigit()):
raise Http404
meeting = get_meeting(num)
if int(meeting.number) < settings.NEW_PROCEEDINGS_START:
return HttpResponseRedirect( 'https://www.ietf.org/proceedings/%s/overview.html' % num )
overview_template = '/meeting/proceedings/%s/overview.rst' % meeting.number
try:
template = render_to_string(overview_template, {})
except TemplateDoesNotExist:
raise Http404
return render(request, "meeting/proceedings_overview.html", {
'meeting': meeting,
'template': template,
})
@cache_page( 60 * 60 )
def proceedings_progress_report(request, num=None):
'''Display Progress Report (stats since last meeting)'''
if not (num and num.isdigit()):
raise Http404
meeting = get_meeting(num)
if int(meeting.number) < settings.NEW_PROCEEDINGS_START:
return HttpResponseRedirect( 'https://www.ietf.org/proceedings/%s/progress-report.html' % num )
sdate = meeting.previous_meeting().date
edate = meeting.date
context = get_progress_stats(sdate,edate)
context['meeting'] = meeting
return render(request, "meeting/proceedings_progress_report.html", context)
class OldUploadRedirect(RedirectView):
def get_redirect_url(self, **kwargs):
return reverse_lazy('ietf.meeting.views.session_details',kwargs=self.kwargs)
@csrf_exempt
def api_import_recordings(request, number):
'''REST API to check for recording files and import'''
if request.method == 'POST':
meeting = get_meeting(number)
import_audio_files(meeting)
return HttpResponse(status=201)
else:
return HttpResponse(status=405)
@require_api_key
@role_required('Recording Manager')
@csrf_exempt
def api_set_session_video_url(request):
def err(code, text):
return HttpResponse(text, status=code, content_type='text/plain')
if request.method == 'POST':
# parameters:
# apikey: the poster's personal API key
# meeting: '101', or 'interim-2018-quic-02'
# group: 'quic' or 'plenary'
# item: '1', '2', '3' (the group's first, second, third etc.
# session during the week)
# url: The recording url (on YouTube, or whatever)
user = request.user.person
for item in ['meeting', 'group', 'item', 'url',]:
value = request.POST.get(item)
if not value:
return err(400, "Missing %s parameter" % item)
number = request.POST.get('meeting')
sessions = Session.objects.filter(meeting__number=number)
if not sessions.exists():
return err(400, "No sessions found for meeting '%s'" % (number, ))
acronym = request.POST.get('group')
sessions = sessions.filter(group__acronym=acronym)
if not sessions.exists():
return err(400, "No sessions found in meeting '%s' for group '%s'" % (number, acronym))
session_times = [ (s.official_timeslotassignment().timeslot.time, s.id, s) for s in sessions if s.official_timeslotassignment() ]
session_times.sort()
item = request.POST.get('item')
if not item.isdigit():
return err(400, "Expected a numeric value for 'item', found '%s'" % (item, ))
n = int(item)-1 # change 1-based to 0-based
try:
time, __, session = session_times[n]
except IndexError:
return err(400, "No item '%s' found in list of sessions for group" % (item, ))
url = request.POST.get('url')
try:
URLValidator()(url)
except ValidationError:
return err(400, "Invalid url value: '%s'" % (url, ))
recordings = [ (r.name, r.title, r) for r in session.recordings() if 'video' in r.title.lower() ]
if recordings:
r = recordings[-1][-1]
if r.external_url != url:
e = DocEvent.objects.create(doc=r, rev=r.rev, type="added_comment", by=request.user.person,
desc="External url changed from %s to %s" % (r.external_url, url))
r.external_url = url
r.save_with_history([e])
else:
return err(400, "URL is the same")
else:
time = session.official_timeslotassignment().timeslot.time
title = 'Video recording for %s on %s at %s' % (acronym, time.date(), time.time())
create_recording(session, url, title=title, user=user)
else:
return err(405, "Method not allowed")
return HttpResponse("Done", status=200, content_type='text/plain')
@require_api_key
@role_required('Recording Manager', 'Secretariat')
@csrf_exempt
def api_upload_bluesheet(request):
def err(code, text):
return HttpResponse(text, status=code, content_type='text/plain')
if request.method == 'POST':
# parameters:
# apikey: the poster's personal API key
# meeting: number as string, i.e., '101', or 'interim-2018-quic-02'
# group: acronym or special, i.e., 'quic' or 'plenary'
# item: '1', '2', '3' (the group's first, second, third etc.
# session during the week)
# bluesheet: json blob with
# [{'name': 'Name', 'affiliation': 'Organization', }, ...]
for item in ['meeting', 'group', 'item', 'bluesheet',]:
value = request.POST.get(item)
if not value:
return err(400, "Missing %s parameter" % item)
number = request.POST.get('meeting')
sessions = Session.objects.filter(meeting__number=number)
if not sessions.exists():
return err(400, "No sessions found for meeting '%s'" % (number, ))
acronym = request.POST.get('group')
sessions = sessions.filter(group__acronym=acronym)
if not sessions.exists():
return err(400, "No sessions found in meeting '%s' for group '%s'" % (number, acronym))
session_times = [ (s.official_timeslotassignment().timeslot.time, s.id, s) for s in sessions if s.official_timeslotassignment() ]
session_times.sort()
item = request.POST.get('item')
if not item.isdigit():
return err(400, "Expected a numeric value for 'item', found '%s'" % (item, ))
n = int(item)-1 # change 1-based to 0-based
try:
time, __, session = session_times[n]
except IndexError:
return err(400, "No item '%s' found in list of sessions for group" % (item, ))
bjson = request.POST.get('bluesheet')
try:
data = json.loads(bjson)
except json.decoder.JSONDecodeError:
return err(400, "Invalid json value: '%s'" % (bjson, ))
text = render_to_string('meeting/bluesheet.txt', {
'data': data,
'session': session,
})
fd, name = tempfile.mkstemp(suffix=".txt", text=True)
os.close(fd)
with open(name, "w") as file:
file.write(text)
with open(name, "br") as file:
save_err = save_bluesheet(request, session, file)
if save_err:
return err(400, save_err)
else:
return err(405, "Method not allowed")
return HttpResponse("Done", status=200, content_type='text/plain')
def important_dates(request, num=None, output_format=None):
assert num is None or num.isdigit()
preview_roles = ['Area Director', 'Secretariat', 'IETF Chair', 'IAD', ]
meeting = get_ietf_meeting(num)
if not meeting:
raise Http404
base_num = int(meeting.number)
user = request.user
today = datetime.date.today()
meetings = []
if meeting.show_important_dates or meeting.date < today:
meetings.append(meeting)
for i in range(1,3):
future_meeting = get_ietf_meeting(base_num+i)
if future_meeting and ( future_meeting.show_important_dates
or (user and user.is_authenticated and has_role(user, preview_roles))):
meetings.append(future_meeting)
if output_format == 'ics':
preprocess_meeting_important_dates(meetings)
ics = render_to_string('meeting/important_dates.ics', {
'meetings': meetings,
}, request=request)
# icalendar response file should have '\r\n' line endings per RFC5545
response = HttpResponse(re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", ics), content_type='text/calendar')
response['Content-Disposition'] = 'attachment; filename="important-dates.ics"'
return response
return render(request, 'meeting/important-dates.html', {
'meetings': meetings
})
TimeSlotTypeForm = modelform_factory(TimeSlot, fields=('type',))
@role_required('Secretariat')
def edit_timeslot_type(request, num, slot_id):
timeslot = get_object_or_404(TimeSlot,id=slot_id)
meeting = get_object_or_404(Meeting,number=num)
if timeslot.meeting!=meeting:
raise Http404()
if request.method=='POST':
form = TimeSlotTypeForm(instance=timeslot,data=request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('ietf.meeting.views.edit_timeslots',kwargs={'num':num}))
else:
form = TimeSlotTypeForm(instance=timeslot)
sessions = timeslot.sessions.filter(timeslotassignments__schedule__in=[meeting.schedule, meeting.schedule.base if meeting.schedule else None])
return render(request, 'meeting/edit_timeslot_type.html', {'timeslot':timeslot,'form':form,'sessions':sessions})
@role_required('Secretariat')
def request_minutes(request, num=None):
meeting = get_ietf_meeting(num)
if request.method=='POST':
form = RequestMinutesForm(data=request.POST)
if form.is_valid():
send_mail_text(request,
to=form.cleaned_data.get('to'),
frm=request.user.person.email_address(),
subject=form.cleaned_data.get('subject'),
txt=form.cleaned_data.get('body'),
cc=form.cleaned_data.get('cc'),
)
return HttpResponseRedirect(reverse('ietf.meeting.views.materials',kwargs={'num':num}))
else:
needs_minutes = set()
session_qs = add_event_info_to_session_qs(
Session.objects.filter(
timeslotassignments__schedule__meeting=meeting,
timeslotassignments__schedule__meeting__schedule=F('timeslotassignments__schedule'),
group__type__in=['wg','rg','ag','rag','program'],
)
).filter(~Q(current_status='canceled')).select_related('group', 'group__parent')
for session in session_qs:
if not session.all_meeting_minutes():
group = session.group
if group.parent and group.parent.type_id in ('area','irtf'):
needs_minutes.add(group)
needs_minutes = list(needs_minutes)
needs_minutes.sort(key=lambda g: ('zzz' if g.parent.acronym == 'irtf' else g.parent.acronym)+":"+g.acronym)
body_context = {'meeting':meeting,
'needs_minutes':needs_minutes,
'settings':settings,
}
body = render_to_string('meeting/request_minutes.txt', body_context)
initial = {'to': 'wgchairs@ietf.org',
'cc': 'irsg@irtf.org',
'subject': 'Request for IETF WG and Bof Session Minutes',
'body': body,
}
form = RequestMinutesForm(initial=initial)
context = {'meeting':meeting, 'form': form}
return render(request, 'meeting/request_minutes.html', context)
class ApproveSlidesForm(forms.Form):
title = forms.CharField(max_length=255)
apply_to_all = forms.BooleanField(label='Apply to all group sessions at this meeting',initial=False,required=False)
def __init__(self, show_apply_to_all_checkbox, *args, **kwargs):
super(ApproveSlidesForm, self).__init__(*args, **kwargs )
if not show_apply_to_all_checkbox:
self.fields.pop('apply_to_all')
@login_required
def approve_proposed_slides(request, slidesubmission_id, num):
submission = get_object_or_404(SlideSubmission,pk=slidesubmission_id)
if not submission.session.can_manage_materials(request.user):
permission_denied(request, "You don't have permission to manage slides for this session.")
if submission.session.is_material_submission_cutoff() and not has_role(request.user, "Secretariat"):
permission_denied(request, "The materials cutoff for this session has passed. Contact the secretariat for further action.")
session_number = None
sessions = get_sessions(submission.session.meeting.number,submission.session.group.acronym)
show_apply_to_all_checkbox = len(sessions) > 1 if submission.session.type_id == 'regular' else False
if len(sessions) > 1:
session_number = 1 + sessions.index(submission.session)
name, _ = os.path.splitext(submission.filename)
name = name[:name.rfind('-ss')]
existing_doc = Document.objects.filter(name=name).first()
if request.method == 'POST' and submission.status.slug == 'pending':
form = ApproveSlidesForm(show_apply_to_all_checkbox, request.POST)
if form.is_valid():
apply_to_all = submission.session.type_id == 'regular'
if show_apply_to_all_checkbox:
apply_to_all = form.cleaned_data['apply_to_all']
if request.POST.get('approve'):
# Ensure that we have a file to approve. The system gets cranky otherwise.
if submission.filename is None or submission.filename == '' or not os.path.isfile(submission.staged_filepath()):
return HttpResponseNotFound("The slides you attempted to approve could not be found. Please disapprove and delete them instead.")
title = form.cleaned_data['title']
if existing_doc:
doc = Document.objects.get(name=name)
doc.rev = '%02d' % (int(doc.rev)+1)
doc.title = form.cleaned_data['title']
else:
doc = Document.objects.create(
name = name,
type_id = 'slides',
title = title,
group = submission.session.group,
rev = '00',
)
DocAlias.objects.create(name=doc.name).docs.add(doc)
doc.states.add(State.objects.get(type_id='slides',slug='active'))
doc.states.add(State.objects.get(type_id='reuse_policy',slug='single'))
if submission.session.sessionpresentation_set.filter(document=doc).exists():
sp = submission.session.sessionpresentation_set.get(document=doc)
sp.rev = doc.rev
sp.save()
else:
max_order = submission.session.sessionpresentation_set.filter(document__type='slides').aggregate(Max('order'))['order__max'] or 0
submission.session.sessionpresentation_set.create(document=doc,rev=doc.rev,order=max_order+1)
if apply_to_all:
for other_session in sessions:
if other_session != submission.session and not other_session.sessionpresentation_set.filter(document=doc).exists():
max_order = other_session.sessionpresentation_set.filter(document__type='slides').aggregate(Max('order'))['order__max'] or 0
other_session.sessionpresentation_set.create(document=doc,rev=doc.rev,order=max_order+1)
sub_name, sub_ext = os.path.splitext(submission.filename)
target_filename = '%s-%s%s' % (sub_name[:sub_name.rfind('-ss')],doc.rev,sub_ext)
doc.uploaded_filename = target_filename
e = NewRevisionDocEvent.objects.create(doc=doc,by=submission.submitter,type='new_revision',desc='New revision available: %s'%doc.rev,rev=doc.rev)
doc.save_with_history([e])
path = os.path.join(submission.session.meeting.get_materials_path(),'slides')
if not os.path.exists(path):
os.makedirs(path)
os.rename(submission.staged_filepath(), os.path.join(path, target_filename))
post_process(doc)
acronym = submission.session.group.acronym
submission.status = SlideSubmissionStatusName.objects.get(slug='approved')
submission.doc = doc
submission.save()
return redirect('ietf.meeting.views.session_details',num=num,acronym=acronym)
elif request.POST.get('disapprove'):
# Errors in processing a submit request sometimes result
# in a SlideSubmission object without a file. Handle
# this case and keep processing the 'disapprove' even if
# the filename doesn't exist.
try:
if submission.filename != None and submission.filename != '':
os.unlink(submission.staged_filepath())
except (FileNotFoundError, IsADirectoryError):
pass
acronym = submission.session.group.acronym
submission.status = SlideSubmissionStatusName.objects.get(slug='rejected')
submission.save()
return redirect('ietf.meeting.views.session_details',num=num,acronym=acronym)
else:
pass
elif not submission.status.slug == 'pending':
return render(request, "meeting/previously_approved_slides.html",
{'submission': submission })
else:
initial = {
'title': submission.title,
'apply_to_all' : submission.apply_to_all,
}
form = ApproveSlidesForm(show_apply_to_all_checkbox, initial=initial )
return render(request, "meeting/approve_proposed_slides.html",
{'submission': submission,
'session_number': session_number,
'existing_doc' : existing_doc,
'form': form,
})
| 44.933549 | 245 | 0.636053 |
79493d01d972ab1f761866d708904a4411d0572f
| 5,670 |
py
|
Python
|
megalista_dataflow/uploaders/google_analytics/google_analytics_data_import_eraser_test.py
|
omnicomdatahousecl/megalista
|
a8085608263b592a9ace144809d6352b5f24e58d
|
[
"Apache-2.0"
] | 44 |
2021-03-10T15:41:50.000Z
|
2022-02-28T11:21:37.000Z
|
megalista_dataflow/uploaders/google_analytics/google_analytics_data_import_eraser_test.py
|
QPC-database/megalista
|
0d74da9129886969ee1ddeb2c59ead15a98eb99a
|
[
"Apache-2.0"
] | 18 |
2021-03-11T12:17:38.000Z
|
2022-03-11T20:46:54.000Z
|
megalista_dataflow/uploaders/google_analytics/google_analytics_data_import_eraser_test.py
|
QPC-database/megalista
|
0d74da9129886969ee1ddeb2c59ead15a98eb99a
|
[
"Apache-2.0"
] | 25 |
2021-03-10T00:53:12.000Z
|
2022-03-29T16:09:58.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from apache_beam.options.value_provider import StaticValueProvider
from models.oauth_credentials import OAuthCredentials
from models.execution import Execution, SourceType, DestinationType, Source, AccountConfig, Destination, Batch
from uploaders.google_analytics.google_analytics_data_import_eraser import GoogleAnalyticsDataImportEraser
@pytest.fixture
def eraser(mocker):
client_id = StaticValueProvider(str, "id")
secret = StaticValueProvider(str, "secret")
access = StaticValueProvider(str, "access")
refresh = StaticValueProvider(str, "refresh")
credentials = OAuthCredentials(client_id, secret, access, refresh)
return GoogleAnalyticsDataImportEraser(credentials)
def test_analytics_has_not_data_sources(mocker, eraser, caplog):
service = mocker.MagicMock()
mocker.patch.object(eraser, '_get_analytics_service')
eraser._get_analytics_service.return_value = service
mocker.patch.object(eraser, '_is_table_empty')
eraser._is_table_empty.return_value = False
service.management().customDataSources().list().execute.return_value = {
'items': []
}
execution = Execution(AccountConfig('', False, '', '', ''),
Source('orig1', SourceType.BIG_QUERY, ['dt1', 'buyers']),
Destination('dest1', DestinationType.GA_DATA_IMPORT, ['web_property', 'data_import_name']))
# Act
try:
next(eraser.process(Batch(execution, [])))
except StopIteration:
pass
assert 'data_import_name - data import not found, please configure it in Google Analytics' in caplog.text
def test_data_source_not_found(mocker, eraser, caplog):
service = mocker.MagicMock()
mocker.patch.object(eraser, '_get_analytics_service')
eraser._get_analytics_service.return_value = service
mocker.patch.object(eraser, '_is_table_empty')
eraser._is_table_empty.return_value = False
service.management().customDataSources().list().execute.return_value = {
'items': [{'id': 1, 'name': 'wrong_name'}]
}
execution = Execution(AccountConfig('', False, '', '', ''),
Source('orig1', SourceType.BIG_QUERY, ['dt1', 'buyers']),
Destination('dest1', DestinationType.GA_DATA_IMPORT, ['web_property', 'data_import_name']))
# Act
try:
next(eraser.process(Batch(execution, [])))
except StopIteration:
pass
assert 'data_import_name - data import not found, please configure it in Google Analytics' in caplog.text
def test_no_files_found(mocker, eraser):
service = mocker.MagicMock()
mocker.patch.object(eraser, '_get_analytics_service')
eraser._get_analytics_service.return_value = service
mocker.patch.object(eraser, '_is_table_empty')
eraser._is_table_empty.return_value = False
service.management().customDataSources().list().execute.return_value = {
'items': [{'id': 1, 'name': 'data_import_name'},
{'id': 2, 'name': 'data_import_name2'}]
}
execution = Execution(AccountConfig('', False, '', '', ''),
Source('orig1', SourceType.BIG_QUERY, ['dt1', 'buyers']),
Destination('dest1', DestinationType.GA_DATA_IMPORT, ['web_property', 'data_import_name']))
# Add mock to side effect of list uploads
service.management().uploads().list().execute.return_value = {'items': []}
# Add mock to side effect of deleteUploadData
delete_call_mock = mocker.MagicMock()
service.management().uploads().deleteUploadData.side_effect = delete_call_mock
# Act
next(eraser.process(Batch(execution, [])))
# Called once
delete_call_mock.assert_not_called()
def test_files_deleted(mocker, eraser):
service = mocker.MagicMock()
mocker.patch.object(eraser, '_get_analytics_service')
eraser._get_analytics_service.return_value = service
mocker.patch.object(eraser, '_is_table_empty')
eraser._is_table_empty.return_value = False
service.management().customDataSources().list().execute.return_value = {
'items': [{'id': 1, 'name': 'data_import_name'},
{'id': 2, 'name': 'data_import_name2'}]
}
execution = Execution(AccountConfig('', False, '', '', ''),
Source('orig1', SourceType.BIG_QUERY, ['dt1', 'buyers']),
Destination('dest1', DestinationType.GA_DATA_IMPORT, ['web_property', 'data_import_name']))
# Add mock to side effect of list uploads
service.management().uploads().list().execute.return_value = {'items': [{'id': 'ab'}, {'id': 'cd'}]}
# Add mock to side effect of deleteUploadData
delete_call_mock = mocker.MagicMock()
service.management().uploads().deleteUploadData.side_effect = delete_call_mock
# Act
next(eraser.process(Batch(execution, [])))
# Called once
delete_call_mock.assert_called_once()
# Intercept args called
_, kwargs = delete_call_mock.call_args
# Check if really sent values from custom field
ids = kwargs['body']
# assert
| 37.058824 | 117 | 0.688889 |
79493d6223ba52e57662715f5c5e48a170182722
| 2,547 |
py
|
Python
|
ArcaeaAssetsUpdater/assets_updater.py
|
Chendihe4975/ArcaeaAssetsUpdater
|
7c567f7606265a4709bae7b019ea1b1635f15aa4
|
[
"MIT"
] | 1 |
2022-03-14T06:34:04.000Z
|
2022-03-14T06:34:04.000Z
|
ArcaeaAssetsUpdater/assets_updater.py
|
Chendihe4975/ArcaeaAssetsUpdater
|
7c567f7606265a4709bae7b019ea1b1635f15aa4
|
[
"MIT"
] | null | null | null |
ArcaeaAssetsUpdater/assets_updater.py
|
Chendihe4975/ArcaeaAssetsUpdater
|
7c567f7606265a4709bae7b019ea1b1635f15aa4
|
[
"MIT"
] | 1 |
2022-03-14T12:37:24.000Z
|
2022-03-14T12:37:24.000Z
|
"""
- Author: DiheChen
- Date: 2021-08-14 23:42:42
- LastEditTime: 2021-08-21 00:04:28
- LastEditors: DiheChen
- Description: None
- GitHub: https://github.com/Chendihe4975
"""
from os import path
from sys import platform
from zipfile import ZipFile
import ujson as json
from aiohttp import ClientSession
from config import Config
if platform == "win32":
import asyncio
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
class ArcaeaAssetsUpdater:
work_path = path.abspath(path.join(path.dirname(__file__), "data"))
version_info = path.join(work_path, f"version.json")
def __init__(self) -> None:
pass
@staticmethod
def get_local_version_info():
with open(ArcaeaAssetsUpdater.version_info, "r") as file:
return json.loads(file.read())["value"]["version"]
@staticmethod
def mark_version_info(data: dict):
with open(ArcaeaAssetsUpdater.version_info, "w") as f:
f.write(json.dumps(data, indent=4))
return None
@staticmethod
async def download_file(force_download: bool = False):
async with ClientSession() as session:
async with session.get("https://webapi.lowiro.com/webapi/serve/static/bin/arcaea/apk",
proxy=Config.proxy, verify_ssl=False) as resp:
if resp.ok:
j = await resp.json()
if not force_download and j["value"]["version"] == ArcaeaAssetsUpdater.get_local_version_info():
return False
ArcaeaAssetsUpdater.mark_version_info(j)
async with session.get(j["value"]["url"], proxy=Config.proxy, verify_ssl=False) as resp:
with open(path.join(ArcaeaAssetsUpdater.work_path, f"arcaea_{j['value']['version']}.apk"),
'wb') as res:
res.write(await resp.read())
return True
@staticmethod
async def unzip_file():
zip_file = ZipFile(path.join(
ArcaeaAssetsUpdater.work_path, f"arcaea_{ArcaeaAssetsUpdater.get_local_version_info()}.apk"))
file_list = zip_file.namelist()
for f in file_list:
if f.startswith("assets"):
zip_file.extract(f, ArcaeaAssetsUpdater.work_path)
return True
@staticmethod
async def force_update():
if await ArcaeaAssetsUpdater.download_file(force_download=True):
await ArcaeaAssetsUpdater.unzip_file()
| 36.385714 | 116 | 0.628583 |
79493d813acd27717db6ffb9cb83b1cd3e3d3dcf
| 145 |
py
|
Python
|
app/euler/__init__.py
|
Joey-Wondersign/Staffjoy-suite-Joey
|
b6d0d87b8e60e6b866810ebeed631fb02fadad48
|
[
"MIT"
] | 890 |
2017-02-25T07:11:09.000Z
|
2022-03-08T05:49:20.000Z
|
app/euler/__init__.py
|
Joey-Wondersign/Staffjoy-suite-Joey
|
b6d0d87b8e60e6b866810ebeed631fb02fadad48
|
[
"MIT"
] | 11 |
2017-02-25T18:07:11.000Z
|
2020-10-19T13:09:41.000Z
|
app/euler/__init__.py
|
nfriedly/suite
|
c58c772d98d1476cad0531b8a296f27ad2ab945c
|
[
"MIT"
] | 276 |
2017-02-25T09:01:23.000Z
|
2022-03-19T02:24:02.000Z
|
from flask import Blueprint
euler = Blueprint(
'euler', __name__, template_folder='templates', static_folder='static')
from . import views
| 20.714286 | 75 | 0.751724 |
79493dbd596c655225f141ebf893bcbe5422a420
| 230 |
py
|
Python
|
users/reducer/nlp_loader.py
|
SionHu/Fairvision
|
b02c9111c52146be3b3590601d1deba48e69c813
|
[
"Apache-2.0"
] | 6 |
2019-04-08T23:06:21.000Z
|
2020-07-15T18:22:50.000Z
|
users/reducer/nlp_loader.py
|
moyazzz/Crowdsourcing
|
107827c8b7689ec1a847e38aff0b7f6747091c97
|
[
"Apache-2.0"
] | 8 |
2019-03-08T02:47:08.000Z
|
2019-07-26T18:27:25.000Z
|
users/reducer/nlp_loader.py
|
moyazzz/Crowdsourcing
|
107827c8b7689ec1a847e38aff0b7f6747091c97
|
[
"Apache-2.0"
] | 8 |
2019-05-17T02:04:45.000Z
|
2020-06-29T01:10:18.000Z
|
from en_core_web_md import load
import spacy
nlp = load() # Single load in memory till killed
if not isinstance(nlp, spacy.lang.en.English):
raise TypeError("Model given is not of type {}.".format("spacy.lang.en.English"))
| 28.75 | 85 | 0.734783 |
79493e35fd4ad54066e9551e4d83f376f5da364f
| 512 |
py
|
Python
|
day10/test_lib.py
|
heijp06/AoC-2021
|
f6afead5e1fe9a839d608a5792f84e54803742c1
|
[
"MIT"
] | null | null | null |
day10/test_lib.py
|
heijp06/AoC-2021
|
f6afead5e1fe9a839d608a5792f84e54803742c1
|
[
"MIT"
] | null | null | null |
day10/test_lib.py
|
heijp06/AoC-2021
|
f6afead5e1fe9a839d608a5792f84e54803742c1
|
[
"MIT"
] | null | null | null |
from lib import part1, part2
def test_part1():
assert part1(data) == 26397
def test_part2():
assert part2(data) == 288957
def test_part2_simple():
assert part2(["("]) == 1
data = [
"[({(<(())[]>[[{[]{<()<>>",
"[(()[<>])]({[<{<<[]>>(",
"{([(<{}[<>[]}>{[]{[(<()>",
"(((({<>}<{<{<>}{[]{[]{}",
"[[<[([]))<([[{}[[()]]]",
"[{[{({}]{}}([{[{{{}}([]",
"{<[[]]>}<{[{[{[]{()[[[]",
"[<(<(<(<{}))><([]([]()",
"<{([([[(<>()){}]>(<<{{",
"<{([{{}}[<[[[<>{}]]]>[]]",
]
| 18.285714 | 32 | 0.246094 |
79493ee453662ad087b11f36db6d7dfe18f6af02
| 6,479 |
py
|
Python
|
scripts/make_thumbnails.py
|
Pandinosaurus/geojs
|
c38b3c91a597db84bbc74c2c915bb525a82aedc1
|
[
"Apache-2.0"
] | 365 |
2015-01-28T12:07:22.000Z
|
2022-03-27T14:17:10.000Z
|
scripts/make_thumbnails.py
|
Pandinosaurus/geojs
|
c38b3c91a597db84bbc74c2c915bb525a82aedc1
|
[
"Apache-2.0"
] | 699 |
2015-01-05T21:22:40.000Z
|
2022-03-30T15:58:55.000Z
|
scripts/make_thumbnails.py
|
Pandinosaurus/geojs
|
c38b3c91a597db84bbc74c2c915bb525a82aedc1
|
[
"Apache-2.0"
] | 74 |
2015-02-23T14:08:13.000Z
|
2022-03-17T23:37:05.000Z
|
#!/usr/bin/env python
# NOTE: If this doesn't work, it may be related to a policy in
# /etc/ImageMagick-6/policy.xml
# Specifically, disable
# <policy domain="coder" rights="none" pattern="PS" />
# by removing it or commenting it out.
import json
import os
import psutil
import signal
import six
import subprocess
import sys
import time
OriginalSize = (1200, 900)
ExtraSpace = 1 # 1, otherwise we get a black border on the bottom and right
NavbarHeight = 60
FinalSize = (800, 600)
InitialDelay = 15 # in seconds
MaxDelay = 30 # in seconds
Quality = 90
OutputFile = 'thumb.jpg'
InputList = ["examples", "tutorials"]
BrowserCommand = [
'xvfb-run', '-s', '-ac -screen 0 %dx%dx24' % (
OriginalSize[0] + ExtraSpace, OriginalSize[1] + ExtraSpace + NavbarHeight),
'google-chrome', '--kiosk', '--no-pings', '--device-scale-factor=1',
'--incognito', '--start-fullscreen', '--no-default-browser-check',
'--user-data-dir=/tmp/chrome_geojs_thumbnails', '--no-first-run',
'--disable-default-apps', '--disable-popup-blocking',
'--disable-translate', '--disable-background-timer-throttling',
'--disable-renderer-backgrounding',
'--disable-device-discovery-notifications',
'--window-position=0,0',
]
BrowserCommandSize = [
'--window-size=%d,%d' % (OriginalSize[0] + ExtraSpace, OriginalSize[1] + ExtraSpace),
]
BrowserCommandSizeIgnoreNavbar = [
'--window-size=%d,%d' % (
OriginalSize[0] + ExtraSpace, OriginalSize[1] + ExtraSpace + NavbarHeight),
]
BrowserUrl = 'http://127.0.0.1:30100/%s'
ImageCommand = (
'DISPLAY=:99.0 import -window root -crop %dx%d+0+0 +repage - | '
'convert - -resize %dx%d -quality %d ' % (
OriginalSize[0], OriginalSize[1], FinalSize[0], FinalSize[1], Quality))
ImageCommandIgnoreNavbar = (
'DISPLAY=:99.0 import -window root -crop %dx%d+0+%d +repage - | '
'convert - -resize %dx%d -quality %d ' % (
OriginalSize[0], OriginalSize[1], NavbarHeight, FinalSize[0], FinalSize[1], Quality))
def process_item(path, opts):
output = (open('/tmp/thumbnail.out', 'a')
if opts.get('verbose', 0) >= 1 else open(os.devnull, 'w'))
data = json.load(open(path))
if data.get('disabled') and not opts.get('all'):
return
dest = os.path.join(os.path.dirname(path), OutputFile)
if os.path.exists(dest) and not opts.get('force'):
return
originalSize = 0
if os.path.exists(dest):
originalSize = os.path.getsize(dest)
sys.stdout.write('\r%s %d' % (path, originalSize))
sys.stdout.flush()
if opts.get('simulate'):
dest = os.path.join('/tmp', os.path.basename(os.path.dirname(
os.path.dirname(path))) + '_' + os.path.basename(os.path.dirname(
path)) + '_' + OutputFile)
if os.path.exists(dest):
os.unlink(dest)
cmd = list(BrowserCommand)
imgcmd = ImageCommand
if 'example.json' in path and not data.get('hideNavbar'):
cmd.extend(BrowserCommandSizeIgnoreNavbar)
imgcmd = ImageCommandIgnoreNavbar
else:
cmd.extend(BrowserCommandSize)
url = BrowserUrl % os.path.dirname(path)
if data.get('thumbquery'):
url += '?' + data['thumbquery']
cmd.append(url)
output.write('--> %r\n' % (cmd, ))
output.write(' %s\n' % (' '.join([six.moves.shlex_quote(arg) for arg in cmd])))
proc = subprocess.Popen(cmd, shell=False, stdout=output, stderr=output)
delay = opts.get('delay', InitialDelay)
startTime = time.time()
time.sleep(delay)
lastSize = 0
while True:
output.write('--> %r\n' % (imgcmd + six.moves.shlex_quote(dest), ))
subprocess.Popen(
imgcmd + six.moves.shlex_quote(dest), shell=True,
stdout=output, stderr=output).wait()
newSize = os.path.getsize(dest)
if newSize and newSize == lastSize:
break
if time.time() - startTime > opts.get('maxdelay', MaxDelay):
break
lastSize = newSize
sys.stdout.write('\r%s %d %d ' % (path, originalSize, newSize))
sys.stdout.flush()
time.sleep(0.5)
for child in psutil.Process(proc.pid).children(recursive=True):
try:
child.send_signal(signal.SIGINT)
except psutil.NoSuchProcess:
pass
os.kill(proc.pid, signal.SIGINT)
proc.wait()
sys.stdout.write('\n')
if __name__ == '__main__': # noqa
opts = {'force': False, 'verbose': 0}
for arg in sys.argv[1:]:
if arg in ('-a', '--all'):
opts['all'] = True
elif arg.startswith('--delay='):
opts['delay'] = float(arg.split('=', 1)[1])
elif arg == '--force':
opts['force'] = True
elif arg.startswith('--maxdelay='):
opts['maxdelay'] = float(arg.split('=', 1)[1])
elif arg.startswith('--only='):
opts['only'] = arg.split('=', 1)[1]
elif arg in ('-s', '--simulate'):
opts['simulate'] = True
elif arg in ('-v', '--verbose'):
opts['verbose'] += 1
else:
opts['help'] = True
if opts.get('help'):
print("""
Regenerate thumbnails for examples and tutorials.
Syntax: make_thumbnails.py --force --simulate --only=(substr) --all
--delay=(seconds) --maxdelay=(seconds)
Run in the root geojs directory.
--all or -a generates thumbnails for disabled examples, too.
--delay is the duration after the web browser is started before a thumbnail
snapshot might be taken. The thumbnail is only taken after the webpage hasn't
changed for a short duration.
--force regenerates all thumbnails. Otherwise, only missing thumbnails are
created.
--maxdelay is the longest to wait before taking the snapshot. This will happen
even if the webpage is still changing.
--only will only process examples or tutorials whose name contains the
specified substring.
--simulate or -s determines the size of thumbnails that would be created but
doesn't make them.
""")
sys.exit(0)
for inputdir in InputList:
for root, dirs, files in os.walk(inputdir):
dirs.sort()
for dir in dirs:
for name in ['example.json', 'tutorial.json']:
path = os.path.join(root, dir, name)
if opts.get('only') and not opts['only'] in path:
continue
if os.path.exists(path):
process_item(path, opts)
| 38.111765 | 93 | 0.610742 |
79494094896a8ed29e1d74d2603e605bdb8d2f6b
| 12,822 |
py
|
Python
|
tornado/CENTER.py
|
maqg/wcrobot
|
7d026c1a34362c5434105c27c5bd25f08c6fabe2
|
[
"MIT"
] | null | null | null |
tornado/CENTER.py
|
maqg/wcrobot
|
7d026c1a34362c5434105c27c5bd25f08c6fabe2
|
[
"MIT"
] | null | null | null |
tornado/CENTER.py
|
maqg/wcrobot
|
7d026c1a34362c5434105c27c5bd25f08c6fabe2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import sys
import traceback
from core import dbmysql
from core.err_code import NO_AUTH_SKEY, UNACCP_PARAS, SYSCALL_ERR, OCT_SUCCESS
from core.log import ERROR, DEBUG, INFO
from models.Common import DEFAULT_ACCOUNT_ID
from utils.commonUtil import getUuid, isSystemWindows
from utils.httpUtil import buildReply, getArgObj, buildFailureReply, appendBaseArg
from utils.sessionUtil import getSession
from views.api.dispatch import doDispatching, IGNORE_SESSION_APIS
sys.path.append("../")
import tornado
import tornado.httpclient
import tornado.ioloop
import tornado.web
import tornado.gen
import tornado.websocket
import tornado.options
from conf.config import *
from views.api.center.api import API_MODULE_LIST, API_PREFIX, PARAM_TYPE_INT
LISTEN_PORT = 8080
LISTEN_ADDR = "0.0.0.0"
API_PROTOS = {}
API_VIEW_LIST = {}
TEMPLATE_NOT_FOUND = "pagenotfound.html"
TEMPLATE_ROBOT = "robot.html"
TEMPLATE_DASHBOARD = "dashboard.html"
TEMPLATE_CONFIG = "config.html"
TEMPLATE_CONTACT = "contact.html"
TEMPLATE_GPOUP = "group.html"
TEMPLATE_GPOUP_MEMBER = "group_member.html"
TEMPLATE_GPOUP_MESSAGE = "group_message.html"
TEMPLATE_MESSAGE = "message.html"
TEMPLATE_LIST = {
"index": TEMPLATE_DASHBOARD,
"robot": TEMPLATE_ROBOT,
"config": TEMPLATE_CONFIG,
"contact": TEMPLATE_CONTACT,
"group": TEMPLATE_GPOUP,
"group_member": TEMPLATE_GPOUP_MEMBER,
"message": TEMPLATE_MESSAGE,
"group_message": TEMPLATE_GPOUP_MESSAGE,
}
def getTemplate(module="index"):
return TEMPLATE_LIST.get(module) or (TEMPLATE_NOT_FOUND)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", MainHandler),
(r"/login/", LoginHandler),
(r"/logout/", LogoutHandler),
(r"/app/(.*)", AppHandler),
(r"/app/(.*)/(.*)", AppDirHandler),
(r"/api/", ApiHandler),
(r"/api/test/", ApiTestHandler),
(r"/files/upload/", FileUploadHandler),
(r"/config/(.*)", tornado.web.StaticFileHandler, {"path": SystemConf.tmpPath}),
]
settings = dict(
cookie_secret="61oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "ng"),
xsrf_cookies=False,
)
tornado.web.Application.__init__(self, handlers, **settings)
class LogoutHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
cookie = self.get_cookie("usercookie", "")
if cookie:
pass
self.clear_cookie("usercookie")
self.redirect("/login/")
class LoginHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
error = self.get_argument("error", "")
if error:
prompt = "用户名不在,或密码不匹配!"
else:
prompt = ""
self.render("login.html", ACTION="error", PROMPT=prompt)
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
argObj = getArgObj(self.request)
paras = {
"account": self.get_argument("username"),
"password": self.get_argument("password"),
"role": 7,
"accountId": DEFAULT_ACCOUNT_ID
}
argObj["api"] = "octlink.tundra.v1.account.APILoginByAccount"
argObj["paras"] = paras
self.db = dbmysql.mysqldb()
session = getSession(self.db, sessionId="00000000000000000000000000000000")
del self.db
argObj["session"] = session
retObj = doDispatching(argObj, session, API_PROTOS)
if retObj["RetCode"] != OCT_SUCCESS:
ERROR("login error %s" % str(retObj))
self.redirect("/login/?error=true")
else:
sessionObj = retObj["RetObj"]["session"]
self.set_cookie("usercookie", sessionObj["id"])
self.set_cookie("username", retObj["RetObj"]["name"])
self.set_cookie("userid", retObj["RetObj"]["id"])
self.redirect("/")
class AppHandler(tornado.web.RequestHandler):
def get(self, filepath=None):
query = self.request.query
if (query):
self.redirect("/static/app/%s?%s" % (filepath, query))
else:
self.redirect("/static/app/%s" % filepath)
class AppDirHandler(tornado.web.RequestHandler):
def get(self, subpath=None, filepath=None):
query = self.request.query
if (query):
self.redirect("/static/app/%s/%s?%s" % (subpath, filepath, query))
else:
self.redirect("/static/app/%s/%s" % (subpath, filepath))
class MainHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
cookie = self.get_cookie("usercookie", "")
username = self.get_cookie("username", "")
userid = self.get_cookie("userid", "")
if not cookie:
self.redirect("/login/")
return
self.db = dbmysql.mysqldb()
session = getSession(self.db, sessionId=cookie)
del self.db
if not session:
self.redirect("/login/")
return
self.render("index.html")
class ApiTestHandler(tornado.web.RequestHandler):
result = {
"moduleSelected": "account",
"apiSelected": "octlink.wcrobot.v1.account.APILoginByAccount",
"request": "{}",
"reply": "{}",
"paras": "{}"
}
@tornado.web.asynchronous
def get(self):
self.render("testapi.html", moduleList=API_VIEW_LIST,
moduleListStr=json.dumps(API_VIEW_LIST, indent=4),
result=self.result,
resultStr=json.dumps(self.result, indent=4, ensure_ascii=False))
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self, *args, **kwargs):
argObj = getArgObj(self.request)
api = argObj["api"]
paras = argObj["paras"]
async = False
if paras["timeout"] != 0:
async = True
api_body = {
"api": api,
"paras": paras,
"async": async,
"session": {
"uuid": "00000000000000000000000000000000"
}
}
self.result["paras"] = argObj["paras"]
self.result["moduleSelected"] = argObj["module"]
self.result["apiSelected"] = argObj["api"]
self.result["request"] = json.dumps(argObj, indent=4, ensure_ascii=False)
client = tornado.httpclient.AsyncHTTPClient()
url = "http://%s:%d/api/" % ("127.0.0.1", RUNNING_PORT)
ERROR("%sfff" % url)
response = yield client.fetch(url, method="POST", request_timeout=10, connect_timeout=10,
body=json.dumps(api_body))
self.on_response(response)
def on_response(self, resp):
body = json.loads(str(resp.body, encoding="utf-8"))
if body == None:
result = buildFailureReply(SYSCALL_ERR)
self.result["reply"] = json.dumps(result, indent=4, ensure_ascii=False)
self.write(json.dumps(result, indent=4, ensure_ascii=False))
else:
self.result["reply"] = json.dumps(body, indent=4, ensure_ascii=False)
self.write(body)
self.redirect("/api/test/")
def getSessionId(argObj):
session = argObj.get("session")
if (session):
return session.get("uuid")
else:
return None
UPLOAD_API_MAP = {
"APISystemUpgrade": "octlink.center.v5.upgrade.APISystemUpgrade",
"APIUploadLicense": "octlink.center.v5.license.APIUploadLicense"
}
class FileUploadHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
self.db = dbmysql.mysqldb()
if isSystemWindows():
filePath = "var/tmp/" + getUuid()
else:
filePath = "/tmp/" + getUuid()
# get the request file to cache path
try:
file_metas = self.request.files['file']
except:
file_metas = self.request.files['filename']
for meta in file_metas:
with open(filePath, 'wb') as up:
up.write(meta['body'])
argObj = appendBaseArg({}, self.request)
argObj["paras"]["role"] = 7
argObj["paras"]["accountId"] = DEFAULT_ACCOUNT_ID
api_key = self.get_argument("api", None)
if (not api_key):
self.write(buildFailureReply(UNACCP_PARAS, errorMsg="api key error"))
self.finish()
return
argObj["paras"]["filePath"] = filePath
argObj["api"] = UPLOAD_API_MAP.get(api_key)
if (not argObj["api"]):
self.write(buildFailureReply(UNACCP_PARAS, errorMsg=api_key))
self.finish()
return
session = getSession(self.db, sessionId="00000000000000000000000000000000")
argObj["session"] = session
retObj = doDispatching(argObj, session, API_PROTOS)
self.write(buildReply(retObj))
self.finish()
class ApiHandler(tornado.web.RequestHandler):
SUPPORTED_METHODS = ("POST")
db = None
def __init__(self, application, request, **kwargs):
super(ApiHandler, self).__init__(application, request, **kwargs)
self.db = dbmysql.mysqldb()
def checkSession(self, argObj):
apiName = argObj.get("api")
if (apiName.split(".")[-1] in IGNORE_SESSION_APIS):
DEBUG("User login API, no need check session")
return (True, {})
sessionId = getSessionId(argObj)
if (not sessionId):
return (False, {})
DEBUG("got session id %s" % sessionId)
sessionObj = getSession(self.db, sessionId)
if not sessionObj:
return (False, {})
return (True, sessionObj)
def getAccountInfo(self, session):
if session.get("cookie"):
role = session["cookie"]["role"] or 7
accountId = session["cookie"]["id"] or DEFAULT_ACCOUNT_ID
else:
role = 7
accountId = DEFAULT_ACCOUNT_ID
return role, accountId
@tornado.web.asynchronous
def post(self, *args, **kwargs):
argObj = getArgObj(self.request)
# import time
# yield tornado.gen.Task(tornado.ioloop.IOLoop.instance().add_timeout, time.time() + 10)
if (not argObj.get("api")):
ERROR("not a valid api, no api exist")
self.write(buildFailureReply(UNACCP_PARAS))
self.finish()
return
(status, session) = self.checkSession(argObj)
if (not status):
ERROR("check session failed %s " % str(argObj))
self.write(buildFailureReply(NO_AUTH_SKEY))
self.finish()
return
(role, accountId) = self.getAccountInfo(session)
argObj["paras"]["role"] = role
# IF accountId Specified, just use it
if not argObj["paras"].get("accountId"):
argObj["paras"]["accountId"] = accountId
retObj = doDispatching(argObj, session, API_PROTOS)
self.write(buildReply(retObj))
self.finish()
def runWebServer(addr, port):
tornado.options.parse_command_line()
app = Application()
app.listen(port, addr)
tornado.ioloop.IOLoop.instance().start()
def loadFunction(apiProto):
serviceName = apiProto["serviceName"]
if (not serviceName):
apiProto["func"] = None
return True
funcName = serviceName.split(".")[-1]
modulePath = serviceName.split(".")[:-1]
try:
service = __import__("modules." + ".".join(modulePath), fromlist=["from modules import"])
except Exception as e:
print(('Import module failed. [%s]' % funcName))
print(('Import module failed. [%s]' % e))
print(('Import module failed. [%s]' % traceback.format_exc()))
return False
if hasattr(service, funcName):
funcObj = getattr(service, funcName)
apiProto["func"] = funcObj
else:
print(('There is no %s in %s' % (funcName, modulePath)))
del service
return False
return True
def loadAPIs():
global API_PROTOS
for moduleName in API_MODULE_LIST:
module = __import__("views.api.center.api_" + moduleName, fromlist=["from views import"])
for (k, v) in list(module.funcList.items()):
key = API_PREFIX + "." + moduleName + "." + k
if (not loadFunction(v)):
print("load function error")
return False
API_PROTOS[key] = v
print("Loaded all APIs OK!")
return True
def loadViewAPIs():
def copy_paras(paras):
copyed_paras = {}
for (k, v) in list(paras.items()):
copyed_paras[k] = v
append_extra_paras(copyed_paras)
return copyed_paras
def append_extra_paras(paras):
if (not paras.get("paras")):
paras["timeout"] = {
"default": 0,
"type": PARAM_TYPE_INT,
"desc": "Timeout Value",
"descCN": "超时时间,0表示同步调用",
}
global API_VIEW_LIST
for moduleName in API_MODULE_LIST:
API_VIEW_LIST[moduleName] = []
module = __import__("views.api.center.api_" + moduleName, fromlist=["from views import"])
for (k, v) in list(module.funcList.items()):
key = API_PREFIX + "." + moduleName + "." + k
apiProto = {
"name": v["name"],
"key": key,
"paras": copy_paras(v.get("paras") or {})
}
API_VIEW_LIST[moduleName].append(apiProto)
print("Loaded all APIs OK!")
def init():
if (not loadAPIs()):
return False
loadViewAPIs()
return True
# def startApiEngine():
# _thread.start_new_thread(apiEngine, ("API Engine Thread", 20))
if __name__ == "__main__":
if (float(tornado.version.split(".")[0]) < 3.0):
print(("Version of tornado [%s] is too low, we need 3.0 above" % (tornado.version)))
sys.exit(1)
if (not init()):
print("init Center API Engine Failed")
exit(1)
if (len(sys.argv) != 3):
addr = LISTEN_ADDR
port = LISTEN_PORT
else:
addr = sys.argv[1]
port = int(sys.argv[2])
global RUNNING_PORT
RUNNING_PORT = port
print("To start to run webServer in %s:%d" % (addr, port))
INFO("To start to run webServer in %s:%d" % (addr, port))
runWebServer(addr, port)
| 25.339921 | 91 | 0.685073 |
7949415fe15386852d43de95c7f2bc04e9365f91
| 2,487 |
py
|
Python
|
samples/openapi3/client/petstore/python-experimental/petstore_api/model/triangle_interface.py
|
chanjarster/openapi-generator
|
f92f8f1e58c59a429295509d8c5df32b100cde22
|
[
"Apache-2.0"
] | null | null | null |
samples/openapi3/client/petstore/python-experimental/petstore_api/model/triangle_interface.py
|
chanjarster/openapi-generator
|
f92f8f1e58c59a429295509d8c5df32b100cde22
|
[
"Apache-2.0"
] | null | null | null |
samples/openapi3/client/petstore/python-experimental/petstore_api/model/triangle_interface.py
|
chanjarster/openapi-generator
|
f92f8f1e58c59a429295509d8c5df32b100cde22
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import typing # noqa: F401
from frozendict import frozendict # noqa: F401
import decimal # noqa: F401
from datetime import date, datetime # noqa: F401
from frozendict import frozendict # noqa: F401
from petstore_api.schemas import ( # noqa: F401
AnyTypeSchema,
ComposedSchema,
DictSchema,
ListSchema,
StrSchema,
IntSchema,
Int32Schema,
Int64Schema,
Float32Schema,
Float64Schema,
NumberSchema,
UUIDSchema,
DateSchema,
DateTimeSchema,
DecimalSchema,
BoolSchema,
BinarySchema,
NoneSchema,
none_type,
Configuration,
Unset,
unset,
ComposedBase,
ListBase,
DictBase,
NoneBase,
StrBase,
IntBase,
Int32Base,
Int64Base,
Float32Base,
Float64Base,
NumberBase,
UUIDBase,
DateBase,
DateTimeBase,
BoolBase,
BinaryBase,
Schema,
_SchemaValidator,
_SchemaTypeChecker,
_SchemaEnumMaker
)
class TriangleInterface(
AnyTypeSchema
):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
_required_property_names = set((
'shapeType',
'triangleType',
))
class shapeType(
_SchemaEnumMaker(
enum_value_to_name={
"Triangle": "TRIANGLE",
}
),
StrSchema
):
@classmethod
@property
def TRIANGLE(cls):
return cls("Triangle")
triangleType = StrSchema
def __new__(
cls,
*args: typing.Union[dict, frozendict, str, date, datetime, int, float, decimal.Decimal, None, list, tuple, bytes],
shapeType: shapeType,
triangleType: triangleType,
_configuration: typing.Optional[Configuration] = None,
**kwargs: typing.Type[Schema],
) -> 'TriangleInterface':
return super().__new__(
cls,
*args,
shapeType=shapeType,
triangleType=triangleType,
_configuration=_configuration,
**kwargs,
)
| 22.00885 | 174 | 0.618416 |
79494260afb9153a1f9d94dd9508adb533b44866
| 2,619 |
py
|
Python
|
recepmod/tests/test_Main.py
|
thanatosmin/FcgR-binding
|
db54bff479d326b41c74a78d3ca2755a8b7fc7e6
|
[
"MIT"
] | 1 |
2019-08-22T14:56:30.000Z
|
2019-08-22T14:56:30.000Z
|
recepmod/tests/test_Main.py
|
meyer-lab/FcgR-binding
|
db54bff479d326b41c74a78d3ca2755a8b7fc7e6
|
[
"MIT"
] | 32 |
2017-01-11T15:50:20.000Z
|
2019-08-25T19:04:56.000Z
|
recepmod/tests/test_Main.py
|
thanatosmin/FcgR-binding
|
db54bff479d326b41c74a78d3ca2755a8b7fc7e6
|
[
"MIT"
] | 1 |
2018-11-21T22:33:54.000Z
|
2018-11-21T22:33:54.000Z
|
import random
import unittest
import numpy as np
from scipy.stats import norm
from ..StoneModel import StoneModel, ReqFuncSolver, logpdf_sum, StoneMod
def get_random_vars():
kai = random.random()
kx = random.random()
vi = random.randint(1, 30)
R = random.random()
Li = random.random()
return (kai, kx, vi, R, Li)
class TestStoneMethods(unittest.TestCase):
def setUp(self):
self.M = StoneModel()
self.Mold = StoneModel(False)
def test_reqFuncSolver(self):
kai, kx, vi, R, Li = get_random_vars()
diffFunAnon = lambda x: R-(10**x)*(1+vi*Li*kai*(1+kx*(10**x))**(vi-1))
output = ReqFuncSolver(R, kai, Li, vi, kx)
self.assertTrue(abs(diffFunAnon(np.log10(output))) < 1E-8)
self.assertTrue(np.isnan(ReqFuncSolver(R, kai, Li, -10, kx)))
def test_StoneMod(self):
# This test should check that the model output satisfies Rbound = Rtot - Req
kai, kx, vi, R, Li = get_random_vars()
StoneRet = StoneMod(np.log10(R),kai,vi,kx,Li,fullOutput = True)
Req = ReqFuncSolver(R,kai,Li,vi,kx)
self.assertAlmostEqual(R, Req + StoneRet[1], delta = R/1000)
# Test that monovalent ligand follows the binding curve
def test_StoneModTwo(self):
# logR,Ka,v,logKx,L0
# Sweep across ligand concentration
for i in range(100):
L = i / 100.0
StoneRet = StoneMod(0.0, 1.0, 1, 3.0, L)
self.assertAlmostEqual(StoneRet[0], L / (1 + L), delta = 0.0001)
def test_dataImport_kaBruhns(self):
self.assertTrue(self.M.kaBruhns.shape == (6,4))
def test_dataImport_tnpbsa(self):
self.assertTrue(self.M.tnpbsa.shape == (2,))
def test_dataImport_Rquant(self):
self.assertTrue(len(self.M.Rquant) == 6)
def test_Stone_names(self):
self.assertEqual(len(self.M.pNames), self.M.start.shape[0])
self.assertEqual(len(self.Mold.pNames), self.Mold.start.shape[0])
def test_dataImport_mfiAdjMean(self):
self.assertTrue(self.M.mfiAdjMean.shape == (24, 8))
self.assertTrue(self.Mold.mfiAdjMean.shape == (24, 8))
def test_NormalErrorCoef(self):
retVal = self.M.NormalErrorCoef(self.M.start)
self.assertFalse(np.isnan(retVal))
self.assertFalse(np.isinf(retVal))
# Test that our hand-coded logpdf matches the results of SciPy
def test_logpdf(self):
vecIn = np.array([0.01, 0.2, 0.3, 0.4])
self.assertAlmostEqual(norm.logpdf(vecIn, 0.2, 1).sum(), logpdf_sum(vecIn, 0.2, 1), 0.000001)
if __name__ == '__main__':
unittest.main()
| 30.811765 | 101 | 0.635739 |
7949430ebebb1f1280aa46ae43271742c2c75b37
| 10,604 |
py
|
Python
|
api/config.py
|
nitheesh-aot/queue-management
|
f36fd23d92cd1d49fb6fa7f57f799cfa62a1064f
|
[
"Apache-2.0"
] | null | null | null |
api/config.py
|
nitheesh-aot/queue-management
|
f36fd23d92cd1d49fb6fa7f57f799cfa62a1064f
|
[
"Apache-2.0"
] | null | null | null |
api/config.py
|
nitheesh-aot/queue-management
|
f36fd23d92cd1d49fb6fa7f57f799cfa62a1064f
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os
import dotenv
from pprint import pprint
# Load all the environment variables from a .env file located in some directory above.
dotenv.load_dotenv(dotenv.find_dotenv())
config = {
"production": "config.ProductionConfig",
"prod": "config.ProductionConfig",
"test": "config.TestConfig",
"development": "config.DevelopmentConfig",
"dev": "config.DevelopmentConfig",
"localhost": "config.LocalConfig",
"default": "config.LocalConfig"
}
class BaseConfig(object):
# Set up miscellaneous environment variables.
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_TRACK_MODIFICATIONS = False
TESTING = True,
DEBUG = False
# Set up logging
LOGGING_LEVEL = DEBUG
LOGGING_FORMAT = '[%(asctime)s] %(levelname)-8s (%(name)s) <%(module)s.py>.%(funcName)s: %(message)s'
PRINT_ENABLE = (os.getenv("PRINT_ENABLE","FALSE")).upper() == "TRUE"
SOCKET_STRING = os.getenv('LOG_SOCKETIO', 'WARNING')
ENGINE_STRING = os.getenv('LOG_ENGINEIO', 'WARNING')
# Set up OIDC variables.
SECRET_KEY = os.getenv('SECRET_KEY')
OIDC_OPENID_REALM = os.getenv('OIDC_OPENID_REALM','nest')
OIDC_CLIENT_SECRETS = os.getenv('OIDC_SECRETS_FILE','client_secrets/secrets.json')
OIDC_USER_INFO_ENABLED = True
OIDC_SCOPES = ['openid', 'email', 'profile']
# Set up session and communication variables.
REMEMBER_COOKIE_DURATION = 86400
SESSION_COOKIE_DOMAIN = os.getenv('SERVER_NAME', '')
CORS_ALLOWED_ORIGINS = ["https://" + SESSION_COOKIE_DOMAIN]
# Set up RabbitMQ variables.
ACTIVE_MQ_USER = os.getenv('ACTIVE_MQ_USER', '')
ACTIVE_MQ_PASSWORD = os.getenv('ACTIVE_MQ_PASSWORD', '')
ACTIVE_MQ_HOST = os.getenv('ACTIVE_MQ_HOST', '')
ACTIVE_MQ_PORT = os.getenv('ACTIVE_MQ_PORT', '')
ACTIVE_MQ_URL = 'amqp://{amq_user}:{amq_password}@{amq_host}:{amq_port}'.format(
amq_user=ACTIVE_MQ_USER,
amq_password=ACTIVE_MQ_PASSWORD,
amq_host=ACTIVE_MQ_HOST,
amq_port=ACTIVE_MQ_PORT
)
MARSHMALLOW_SCHEMA_DEFAULT_JIT = "toastedmarshmallow.Jit"
DB_LONG_RUNNING_QUERY = float(os.getenv("DATABASE_LONG_RUNNING_QUERY", '0.5'))
DB_ENGINE = os.getenv('DATABASE_ENGINE', '')
DB_USER = os.getenv('DATABASE_USERNAME', '')
DB_PASSWORD = os.getenv('DATABASE_PASSWORD','')
DB_NAME = os.getenv('DATABASE_NAME','')
DB_HOST = os.getenv('DATABASE_HOST','')
DB_PORT = os.getenv('DATABASE_PORT','')
DB_POOL_TIMEOUT = os.getenv('DATABASE_TIMEOUT_STRING', '')
DB_CONNECT_TIMEOUT = os.getenv('DATABASE_CONNECT_TIMEOUT_STRING', '')
SQLALCHEMY_DATABASE_URI = '{engine}://{user}:{password}@{host}:{port}/{name}'.format(
engine=DB_ENGINE,
user=DB_USER,
password=DB_PASSWORD,
host=DB_HOST,
port=DB_PORT,
name=DB_NAME,
)
SQLALCHEMY_DATABASE_URI_DISPLAY = '{engine}://{user}:<password>@{host}:{port}/{name}'.format(
engine=DB_ENGINE,
user=DB_USER,
host=DB_HOST,
port=DB_PORT,
name=DB_NAME,
)
# Get SQLAlchemy environment variables.
pool_size = int(os.getenv('SQLALCHEMY_POOL_SIZE', '9'))
max_overflow = int(os.getenv('SQLALCHEMY_MAX_OVERFLOW', '18'))
# db_timeout = int(os.getenv('SQLALCHEMY_TIMEOUT', '10'))
# Karims settings
# SQLALCHEMY_ENGINE_OPTIONS = {
# 'pool_size': pool_size,
# 'max_overflow': max_overflow,
# 'pool_pre_ping': True,
# 'pool_timeout': 5,
# 'pool_recycle': 3600,
# 'connect_args': {
# 'connect_timeout': 3
# }
# }
# Try to set some options to avoid long delays.
# SQLALCHEMY_ENGINE_OPTIONS = {
# 'pool_size' : pool_size,
# 'max_overflow' : max_overflow,
# 'pool_pre_ping' : True,
# 'pool_timeout': DB_POOL_TIMEOUT,
# 'pool_recycle': 3600,
# 'connect_args': {
# 'connect_timeout': DB_CONNECT_TIMEOUT,
# 'options' : '-c statement_timeout=1000'
# }
# }
# Get SQLAlchemy environment variables.
pool_size = int(os.getenv('SQLALCHEMY_POOL_SIZE', '9'))
pool_timeout = os.getenv('SQLALCHEMY_POOL_TIMEOUT', '')
connect_timeout_string = os.getenv('SQLALCHEMY_CONNECT_TIMEOUT', '')
max_overflow = int(os.getenv('SQLALCHEMY_MAX_OVERFLOW', '18'))
pool_pre_ping = (os.getenv('SQLALCHEMY_POOL_PRE_PING', 'False')).upper() == "TRUE"
# Try to set some options to avoid long delays.
SQLALCHEMY_ENGINE_OPTIONS = {
'pool_size': pool_size,
'max_overflow': max_overflow,
'pool_pre_ping': pool_pre_ping
}
if pool_timeout != "":
SQLALCHEMY_ENGINE_OPTIONS['pool_timeout'] = pool_timeout
if connect_timeout_string != "":
connect_timeout = int(connect_timeout_string)
# Determine which database engine being used, to use correct syntax.
if "PG8000" in DB_ENGINE.upper():
SQLALCHEMY_ENGINE_OPTIONS['connect_args'] = {'timeout': connect_timeout}
# SQLALCHEMY_ENGINE_OPTIONS['connect_args'] = {'timeout': connect_timeout, 'tcp_user_timeout': 500 }
else:
# SQLALCHEMY_ENGINE_OPTIONS['connect_args'] = { 'connect_timeout': connect_timeout, 'tcp_user_timeout': 500 }
SQLALCHEMY_ENGINE_OPTIONS['connect_args'] = {'connect_timeout': connect_timeout}
print("==> SQLALCHEMY_ENGINE_OPTIONS (Engine: " + DB_ENGINE)
pprint(SQLALCHEMY_ENGINE_OPTIONS)
# Set echo appropriately.
if (os.getenv('SQLALCHEMY_ECHO', "False")).upper() == "TRUE":
SQLALCHEMY_ECHO=True
else:
SQLALCHEMY_ECHO=False
THEQ_FEEDBACK = (os.getenv('THEQ_FEEDBACK','')).upper().replace(" ","").split(",")
SLACK_URL = os.getenv('SLACK_URL', '')
ROCKET_CHAT_URL = os.getenv('ROCKET_CHAT_URL')
SERVICENOW_INSTANCE = os.getenv('SERVICENOW_INSTANCE', '')
SERVICENOW_USER = os.getenv('SERVICENOW_USER', '')
SERVICENOW_PASSWORD = os.getenv('SERVICENOW_PASSWORD', '')
SERVICENOW_TABLE = os.getenv('SERVICENOW_TABLE', '')
SERVICENOW_TENANT = os.getenv('SERVICENOW_TENANT', '')
SERVICENOW_ASSIGN_GROUP = os.getenv('SERVICENOW_ASSIGN_GROUP', '')
VIDEO_PATH = os.getenv('VIDEO_PATH', '')
BACK_OFFICE_DISPLAY = os.getenv("BACK_OFFICE_DISPLAY", "BackOffice")
RECURRING_FEATURE_FLAG = os.getenv("RECURRING_FEATURE_FLAG", "On")
#print(parse_dsn(("postgresql://localhost:5000?connect_timeout=10")))
#quote_ident("connect_timeout", scope)
class LocalConfig(BaseConfig):
DEBUG = True
TESTING = False
ENV = 'dev'
USE_HTTPS = False
PREFERRED_URL_SCHEME = 'http'
ACTIVE_MQ_URL = ''
# For running rabbitmq locally, use the line below.
# ACTIVE_MQ_URL = 'amqp://guest:guest@localhost:5672'
SESSION_COOKIE_DOMAIN = None
CORS_ALLOWED_ORIGINS = ["http://localhost:8080"]
SECRET_KEY = "pancakes"
LOCALHOST_DB_IP = "127.0.0.1"
class DevelopmentConfig(BaseConfig):
DEBUG = True
REDIS_DEBUG = True
TESTING = False
ENV = 'dev'
USE_HTTPS = True
PREFERRED_URL_SCHEME = 'https'
class TestConfig(BaseConfig):
DEBUG = True
REDIS_DEBUG = True
TESTING = False
ENV = 'test'
USE_HTTPS = True
PREFERRED_URL_SCHEME = 'https'
class ProductionConfig(BaseConfig):
DEBUG = True
REDIS_DEBUG = True
TESTING = False
ENV = 'production'
USE_HTTPS = True
PREFERRED_URL_SCHEME = 'https'
def configure_app(app):
# Do basic configuration from config objects and files.
config_name = os.getenv('FLASK_CONFIGURATION', 'default')
app.config.from_object(config[config_name])
app.config.from_pyfile('config.cfg', silent=True)
# Set up various variables used later.
app.config['SOCKET_FLAG'] = logging.DEBUG == debug_string_to_debug_level(app.config['SOCKET_STRING'])
app.config['ENGINE_FLAG'] = logging.DEBUG == debug_string_to_debug_level(app.config['ENGINE_STRING'])
# Set up basic logging for the application.
log_string = os.getenv('LOG_ROOT', "WARNING").upper()
log_level = debug_string_to_debug_level(log_string)
logging.basicConfig(format=app.config['LOGGING_FORMAT'], level=log_level)
temp_logger = logging.getLogger()
if app.config['PRINT_ENABLE']:
print("==> Root logger of '" + temp_logger.name + "' set to level: " + log_string)
def configure_logging(app):
# Set up defaults.
print_flag = app.config['PRINT_ENABLE']
basic_string = os.getenv('LOG_BASIC', "WARNING").upper()
basic_level = 0
if basic_string != "DEFAULT":
basic_level = debug_string_to_debug_level(basic_string)
if print_flag:
print("==> List of loggers being specifically set:")
for name in logging.root.manager.loggerDict:
env_name = make_env_name(name)
log_string = os.getenv(env_name, "None")
if (log_string != "None"):
if print_flag:
print(" --> Logger " + name + " set to level = " + log_string)
module_logger = logging.getLogger(name)
log_level = debug_string_to_debug_level(log_string)
module_logger.setLevel(log_level)
elif (basic_string != "DEFAULT"):
if print_flag:
print(" --> Logger " + name + " set to level LOG_BASIC level of " + basic_string)
module_logger = logging.getLogger(name)
module_logger.setLevel(basic_level)
if print_flag:
print("")
def make_env_name(name):
return ("LOG_" + name.upper().replace('.', '_'))
def debug_string_to_debug_level(debug_string):
input_string = debug_string.lower()
result = -20
if input_string == 'critical':
result = logging.CRITICAL
elif input_string == 'error':
result = logging.ERROR
elif input_string == 'warning':
result = logging.WARNING
elif input_string == 'info':
result = logging.INFO
elif input_string == 'debug':
result = logging.DEBUG
elif input_string == 'notset':
result = logging.NOTSET
elif input_string == '':
result = -10
return result
def debug_level_to_debug_string(debug_level):
if debug_level == logging.CRITICAL:
result = "CRITICAL"
elif debug_level == logging.ERROR:
result = "ERROR"
elif debug_level == logging.WARNING:
result = "WARNING"
elif debug_level == logging.INFO:
result = "INFO"
elif debug_level == logging.DEBUG:
result = "DEBUG"
elif debug_level == logging.NOTSET:
result = "NOTSET"
return result
| 34.767213 | 121 | 0.658902 |
7949432425259a38de25f612f9502de068eefb6d
| 2,241 |
py
|
Python
|
tests/io_mod/test_fileloadermixin.py
|
MarcoJHB/ploomber
|
4849ef6915572f7934392443b4faf138172b9596
|
[
"Apache-2.0"
] | 2,141 |
2020-02-14T02:34:34.000Z
|
2022-03-31T22:43:20.000Z
|
tests/io_mod/test_fileloadermixin.py
|
MarcoJHB/ploomber
|
4849ef6915572f7934392443b4faf138172b9596
|
[
"Apache-2.0"
] | 660 |
2020-02-06T16:15:57.000Z
|
2022-03-31T22:55:01.000Z
|
tests/io_mod/test_fileloadermixin.py
|
MarcoJHB/ploomber
|
4849ef6915572f7934392443b4faf138172b9596
|
[
"Apache-2.0"
] | 122 |
2020-02-14T18:53:05.000Z
|
2022-03-27T22:33:24.000Z
|
from unittest.mock import Mock
import pytest
import pandas as pd
from ploomber import DAG
from ploomber.tasks import SQLDump, NotebookRunner, PythonCallable
from ploomber.products import File
def test_unsupported_extension():
task = SQLDump('SELECT * FROM table',
File('my_file.json'),
DAG(),
name='task',
client=Mock())
with pytest.raises(NotImplementedError):
task.load()
@pytest.mark.parametrize('product, kwargs', [
[File('my_file.csv'), dict()],
[File('my_file.csv'), dict(sep=',')],
],
ids=['simple', 'with-kwargs'])
def test_sqldump(product, kwargs, tmp_directory):
df = pd.DataFrame({'a': [1, 2, 3]})
df.to_csv('my_file.csv', index=False)
task = SQLDump('SELECT * FROM table',
product,
DAG(),
name='task',
client=Mock())
loaded = task.load(**kwargs)
assert df.equals(loaded)
def test_notebookrunner(tmp_directory):
df = pd.DataFrame({'a': [1, 2, 3]})
df.to_csv('my_file.csv', index=False)
task = NotebookRunner('# +tags=["parameters"]', {
'nb': File('nb.ipynb'),
'data': File('my_file.csv')
},
DAG(),
name='task',
ext_in='py')
loaded = task.load('data')
assert df.equals(loaded)
# PythonCallable does not use FileLoaderMixin directly because it gives
# preference to task.unserializer, but if it doesn't exist, it uses
# FileLoaderMixin internal API
@pytest.mark.parametrize(
'product, kwargs', [
[File('my_file.csv'), dict()],
[File('my_file.csv'), dict(sep=',')],
[{
'a': File('my_file.csv'),
'b': File('another.csv')
}, dict(key='a')],
],
ids=['simple', 'with-kwargs', 'with-multiple-products'])
def test_pythoncallable(tmp_directory, product, kwargs):
df = pd.DataFrame({'a': [1, 2, 3]})
df.to_csv('my_file.csv', index=False)
def callable_(product):
pass
task = PythonCallable(callable_, product, DAG(), name='task')
loaded = task.load(**kwargs)
assert df.equals(loaded)
| 27.329268 | 71 | 0.561803 |
79494419a87a7121aa5a7ca9fe0e9b555c373790
| 492 |
py
|
Python
|
robots/small/strategies/2019/zuta/init.py
|
memristor/mep2
|
bc5cddacba3d740f791f3454b8cb51bda83ce202
|
[
"MIT"
] | 5 |
2018-11-27T15:15:00.000Z
|
2022-02-10T21:44:13.000Z
|
robots/small/strategies/2019/zuta/init.py
|
memristor/mep2
|
bc5cddacba3d740f791f3454b8cb51bda83ce202
|
[
"MIT"
] | 2 |
2018-10-20T15:48:40.000Z
|
2018-11-20T05:11:33.000Z
|
robots/small/strategies/2019/zuta/init.py
|
memristor/mep2
|
bc5cddacba3d740f791f3454b8cb51bda83ce202
|
[
"MIT"
] | 1 |
2020-02-07T12:44:47.000Z
|
2020-02-07T12:44:47.000Z
|
State.PS_veliki=_State(False,name='PS_veliki',shared=True)
State.PS_mali=_State(False,name='PS_mali',shared=True)
from core.Util import *
State.goldenium_activated = _State(0)
State.goldenium_picked = _State(0)
State.back = _State(0)
State.must_stuck = _State(0)
def run():
r.conf_set('send_status_interval', 10)
State.color = 'zuta'
State.startpos = (1195, -610)
r.setpos(1500-175-138,-1000+310+80,90)
napgold(0)
nazgold(0)
rrucica(0)
lrucica(0)
rfliper(0)
lfliper(0)
r.speed(50)
| 24.6 | 58 | 0.735772 |
794944c0bfa6a0364d93e1e619befcdaf173ced1
| 403 |
py
|
Python
|
omaha_server/omaha/migrations/0030_version_is_critical.py
|
makar21/omaha-server
|
b84cdf6e67d9106e7a86b447204de4f82397b019
|
[
"Apache-2.0"
] | 8 |
2018-06-25T07:20:17.000Z
|
2021-02-07T20:01:04.000Z
|
omaha_server/omaha/migrations/0030_version_is_critical.py
|
makar21/omaha-server
|
b84cdf6e67d9106e7a86b447204de4f82397b019
|
[
"Apache-2.0"
] | 8 |
2018-06-22T21:56:27.000Z
|
2020-06-25T15:22:56.000Z
|
omaha_server/omaha/migrations/0030_version_is_critical.py
|
dentalwings/omaha-server
|
3d8e18c8f4aac4eb16445c0f3160ed1fc2fc8de5
|
[
"Apache-2.0"
] | 11 |
2019-01-22T01:36:42.000Z
|
2022-03-09T01:41:32.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2017-02-28 07:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('omaha', '0029_merge'),
]
operations = [
migrations.AddField(
model_name='version',
name='is_critical',
field=models.BooleanField(default=False),
),
]
| 19.190476 | 53 | 0.578164 |
794946404ed0f55a2ac6b54dc67c2f8afc1a58e0
| 583 |
py
|
Python
|
accounts/migrations/0018_auto_20201116_1849.py
|
simplymarco/Sistema-de-Controle-de-Processos-Titulados
|
f4e3bc7260611b848096d5fbfb1f72be9d440f1f
|
[
"MIT"
] | null | null | null |
accounts/migrations/0018_auto_20201116_1849.py
|
simplymarco/Sistema-de-Controle-de-Processos-Titulados
|
f4e3bc7260611b848096d5fbfb1f72be9d440f1f
|
[
"MIT"
] | null | null | null |
accounts/migrations/0018_auto_20201116_1849.py
|
simplymarco/Sistema-de-Controle-de-Processos-Titulados
|
f4e3bc7260611b848096d5fbfb1f72be9d440f1f
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0 on 2020-11-16 21:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0017_auto_20201112_1823'),
]
operations = [
migrations.AddField(
model_name='processo',
name='pdf',
field=models.ImageField(blank=True, null=True, upload_to=''),
),
migrations.AlterField(
model_name='processo',
name='interessado',
field=models.ManyToManyField(to='accounts.Interessado'),
),
]
| 24.291667 | 73 | 0.588336 |
79494776ae44f3d3fe82741d9c341356ed28d775
| 976 |
py
|
Python
|
addPixel.py
|
krishna1401/Digital-Image-Processing
|
47a4da4bef9d08708ac84174b0fcd0ced6a8b5e2
|
[
"MIT"
] | 1 |
2021-11-28T12:06:11.000Z
|
2021-11-28T12:06:11.000Z
|
addPixel.py
|
krishna1401/Digital-Image-Processing
|
47a4da4bef9d08708ac84174b0fcd0ced6a8b5e2
|
[
"MIT"
] | null | null | null |
addPixel.py
|
krishna1401/Digital-Image-Processing
|
47a4da4bef9d08708ac84174b0fcd0ced6a8b5e2
|
[
"MIT"
] | null | null | null |
#Program to add a User enter Value to each colour Code of the Image
import cv2
import numpy as nm
def addValueToPixel(image, value):
#Objective: Add Value to each Pixel of an Image
#Input: Original Image and Value
#Output: Resultant Image
height = image.shape[0]
width = image.shape[1]
for i in range(0, height):
for j in range(0, width):
R, B, G = image[i, j]
#Updating Red Color
R += value
R = R if R <= 255 else R-255
#Updating Blue Color
B += value
B = B if B <= 255 else B-255
#Updating Green Color
G += value
G = G if G <= 255 else G-255
image[i, j] = [R, B, G]
return image
img = cv2.imread('image.jpeg')
value = input('Enter the Value to be added to Each Pixel : ')
output = addValueToPixel(img,value)
cv2.imshow('image',output)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 25.684211 | 67 | 0.561475 |
794947b03d7f8fd2f236bc5692bc0a8813387359
| 4,809 |
py
|
Python
|
marvin/triage.py
|
ryantm/marvin-mk2
|
9a6670337d258e0e5e0207a88ce72aae2881ed90
|
[
"MIT"
] | null | null | null |
marvin/triage.py
|
ryantm/marvin-mk2
|
9a6670337d258e0e5e0207a88ce72aae2881ed90
|
[
"MIT"
] | null | null | null |
marvin/triage.py
|
ryantm/marvin-mk2
|
9a6670337d258e0e5e0207a88ce72aae2881ed90
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from datetime import timezone
from typing import Any
from gidgethub.aiohttp import GitHubAPI
from marvin import gh_util
from marvin import team
from marvin.command_router import CommandRouter
from marvin.gh_util import set_issue_status
command_router = CommandRouter()
AWAITING_REVIEWER_TIMEOUT_SECONDS = 60 * 60 * 24 * 3 # three days
AWAITING_MERGER_TIMEOUT_SECONDS = 60 * 60 * 24 * 3 # three days
async def timeout_awaiting_reviewer(
gh: GitHubAPI, token: str, repository_name: str
) -> None:
print("Timing out awaiting_reviewer PRs")
search_results = gh_util.search_issues(
gh,
token,
query_parameters=[
f"repo:{repository_name}",
"is:open",
"is:pr",
"label:awaiting_reviewer",
"label:marvin",
"sort:updated-asc", # stale first
],
)
async for issue in search_results:
last_updated = datetime.strptime(issue["updated_at"], "%Y-%m-%dT%H:%M:%S%z")
age = datetime.now(timezone.utc) - last_updated
if age.total_seconds() < AWAITING_REVIEWER_TIMEOUT_SECONDS:
break
print(
f"awaiting_reviewer -> needs_reviewer: #{issue['number']} ({issue['title']})"
)
await set_issue_status(issue, "needs_reviewer", gh, token)
async def timeout_awaiting_merger(
gh: GitHubAPI, token: str, repository_name: str
) -> None:
print("Timing out awaiting_merger PRs")
search_results = gh_util.search_issues(
gh,
token,
query_parameters=[
f"repo:{repository_name}",
"is:open",
"is:pr",
"label:awaiting_merger",
"label:marvin",
"sort:updated-asc", # stale first
],
)
async for issue in search_results:
last_updated = datetime.strptime(issue["updated_at"], "%Y-%m-%dT%H:%M:%S%z")
age = datetime.now(timezone.utc) - last_updated
if age.total_seconds() < AWAITING_MERGER_TIMEOUT_SECONDS:
break
print(f"awaiting_merger -> needs_merger: #{issue['number']} ({issue['title']})")
await set_issue_status(issue, "needs_merger", gh, token)
async def assign_mergers(gh: GitHubAPI, token: str, repository_name: str) -> None:
print("Assigning mergers to needs_merger PRs")
search_results = gh_util.search_issues(
gh,
token,
query_parameters=[
f"repo:{repository_name}",
"is:open",
"is:pr",
"label:needs_merger",
"label:marvin",
"sort:created-asc", # oldest first
],
)
async for issue in search_results:
reviewer = await team.get_reviewer(
gh, token, issue, merge_permission_needed=True
)
if reviewer is not None:
print(f"Requesting review (merge) from {reviewer} for #{issue['number']}.")
await gh_util.request_review(
issue["pull_request"]["url"], reviewer, gh, token
)
await set_issue_status(issue, "awaiting_merger", gh, token)
else:
print(f"No reviewer with merge permission found for #{issue['number']}.")
async def assign_reviewers(gh: GitHubAPI, token: str, repository_name: str) -> None:
print("Assigning reviewers to needs_reviewer PRs")
search_results = gh_util.search_issues(
gh,
token,
query_parameters=[
f"repo:{repository_name}",
"is:open",
"is:pr",
"label:needs_reviewer",
"label:marvin",
"sort:created-asc", # oldest first
],
)
async for issue in search_results:
reviewer = await team.get_reviewer(
gh, token, issue, merge_permission_needed=False
)
if reviewer is not None:
print(f"Requesting review from {reviewer} for #{issue['number']}.")
await gh_util.request_review(
issue["pull_request"]["url"], reviewer, gh, token
)
await set_issue_status(issue, "awaiting_reviewer", gh, token)
else:
print(f"No reviewer found for #{issue['number']}.")
@command_router.register_command("/marvin triage")
async def run_triage(gh: GitHubAPI, token: str, **kwargs: Any) -> None:
repositories = await gh_util.get_installation_repositories(gh, token)
for repository in repositories:
repository_name = repository["full_name"]
print(f"Running triage on {repository_name}")
await timeout_awaiting_reviewer(gh, token, repository_name)
await timeout_awaiting_merger(gh, token, repository_name)
await assign_mergers(gh, token, repository_name)
await assign_reviewers(gh, token, repository_name)
| 34.847826 | 89 | 0.61884 |
7949482ee6ca52246777e8a80ab3dd857d23495b
| 7,488 |
py
|
Python
|
dynamodbusermanager/export.py
|
dacut/dynamodb-user-manager
|
13fab16bfd5dc4019a73f3dd5fc5f62001d52c0c
|
[
"Apache-2.0"
] | null | null | null |
dynamodbusermanager/export.py
|
dacut/dynamodb-user-manager
|
13fab16bfd5dc4019a73f3dd5fc5f62001d52c0c
|
[
"Apache-2.0"
] | null | null | null |
dynamodbusermanager/export.py
|
dacut/dynamodb-user-manager
|
13fab16bfd5dc4019a73f3dd5fc5f62001d52c0c
|
[
"Apache-2.0"
] | null | null | null |
"""\
Utility for exporting user/group files to DynamoDB
Usage: {argv0} [options]
Options:
-h | --help
Show this usage information.
--passwd <filename> | --password <filename>
Read password file from <filename> instead of {PASSWD_FILE}.
--no-passwd | --no-password
Skip reading the password file. If specified, this also skips reading
the shadow file and exporting any users.
--shadow <filename>
Read shadow password file from <filename> instead of {SHADOW_FILE}.
--no-shadow
Skip reading the shadow password file.
--group <filename>
Read group file from <filename> intead of {GROUP_FILE}.
--no-group
Skip reading the group file. If specified, this also skips reading the
gshadow file and exporting any groups.
--gshadow <filename>
Read group shadow password file from <filename> instead of
{GSHADOW_FILE}.
--no-gshadow
Skip reading the group shadow password file.
--user-table <name>
Write users to this DynamoDB table.
--group-table <name>
Write groups to this DynamoDB table.
--region <region>
Use the specified AWS region.
--profile <profile>
Use the specified profile from ~/.aws/credentials.
"""
# pylint: disable=C0103,R0912,R0914,R0915
from getopt import getopt, GetoptError
from sys import argv, stdout, stderr
from time import time
from typing import Any, Dict, Optional, Sequence, TextIO
from boto3.session import Session
import botocore # pylint: disable=W0611
from botocore.exceptions import ClientError
from .constants import GROUP_FILE, GSHADOW_FILE, PASSWD_FILE, SHADOW_FILE
from .group import Group
from .shadow import ShadowDatabase
from .user import User
def user_to_dynamodb_item(user: User) -> Dict[str, Any]:
"""
Convert a User to a dict item for insertion into DynamoDB.
"""
item = {
"Name": {"S": user.name},
"UID": {"N": str(user.uid)},
"GID": {"N": str(user.gid)},
}
# Empty strings cannot be stored in DynamoDB.
if user.home:
item["Home"] = {"S": user.home}
if user.shell:
item["Shell"] = {"S": user.shell}
if user.real_name:
item["RealName"] = {"S": user.real_name}
if user.password:
item["Password"] = {"S": user.password}
if user.last_password_change_date is not None:
item["LastPasswordChangeDate"] = {
"S": user.last_password_change_date.isoformat()
}
if user.password_age_min_days is not None:
item["PasswordAgeMinDays"] = {"N": str(user.password_age_min_days)}
if user.password_age_max_days is not None:
item["PasswordAgeMaxDays"] = {"N": str(user.password_age_max_days)}
if user.password_warn_days is not None:
item["PasswordWarnDays"] = {"N": str(user.password_warn_days)}
if user.account_expire_date is not None:
item["AccountExpireDate"] = {"S": user.account_expire_date.isoformat()}
if user.ssh_public_keys:
item["SSHPublicKeys"] = {"SS": list(user.ssh_public_keys)}
return item
def group_to_dynamodb_item(group: Group) -> Dict[str, Any]:
"""
Convert a Group to a dict item for insertion into DynamoDB.
"""
item = {
"Name": {"S": group.name},
"GID": {"N": str(group.gid)},
}
if group.password is not None:
item["Password"] = {"S": group.password}
if group.administrators:
item["Administrators"] = {"SS": list(group.administrators)}
if group.members:
item["Members"] = {"SS": list(group.members)}
return item
def main(args: Optional[Sequence[str]] = None) -> int:
"""
main(args: Optional[Sequence[str]]) -> int
Main entry point of the export program.
"""
passwd_file: Optional[str] = PASSWD_FILE
shadow_file: Optional[str] = SHADOW_FILE
group_file: Optional[str] = GROUP_FILE
gshadow_file: Optional[str] = GSHADOW_FILE
users_table: Optional[str] = None
groups_table: Optional[str] = None
boto_kw: Dict[str, Any] = {}
if args is None:
args = argv[1:]
try:
opts, args = getopt(
list(args), "h",
["help", "passwd=", "password=", "no-passwd", "no-password",
"shadow=", "no-shadow", "group=", "gshadow=", "no-gshadow",
"users-table=", "groups-table=", "region=", "profile="])
for opt, val in opts:
if opt in ("-h", "--help",):
usage(stdout)
return 0
if opt in ("--passwd", "--password",):
passwd_file = val
if opt in ("--no-passwd", "--no-password",):
passwd_file = None
if opt == "--shadow":
shadow_file = val
if opt == "--no-shadow":
shadow_file = None
if opt == "--group":
group_file = val
if opt == "--no-group":
group_file = None
if opt == "--gshadow":
gshadow_file = val
if opt == "--no-gshadow":
gshadow_file = None
if opt == "--users-table":
users_table = val
if opt == "--groups-table":
groups_table = val
if opt == "--region":
boto_kw["region_name"] = val
if opt == "--profile":
boto_kw["profile_name"] = val
except GetoptError as e:
print(str(e), file=stderr)
return 1
if args:
print(f"Unknown argument {args[0]}", file=stderr)
usage()
return 1
start = time()
shadow_db = ShadowDatabase(skip_load=True)
if passwd_file is not None:
shadow_db.load_passwd_file(passwd_file)
if shadow_file is not None:
shadow_db.load_gshadow_file(shadow_file)
if group_file is not None:
shadow_db.load_group_file(group_file)
if gshadow_file is not None:
shadow_db.load_gshadow_file(gshadow_file)
elapsed = "%.2f" % (time() - start)
print(f"Imported {len(shadow_db.users)} user(s) and "
f"{len(shadow_db.groups)} group(s) in {elapsed} s.")
ddb = Session(**boto_kw).client("dynamodb")
users_exported = groups_exported = 0
start = time()
if passwd_file is not None and users_table is not None:
for user in shadow_db.users.values():
item = user_to_dynamodb_item(user)
try:
ddb.put_item(Item=item, TableName=users_table)
users_exported += 1
except ClientError as e:
print(f"Failed to export {user.name}: item={item}: {e}",
file=stderr)
if group_file is not None and groups_table is not None:
for group in shadow_db.groups.values():
item = group_to_dynamodb_item(group)
try:
ddb.put_item(Item=item, TableName=groups_table)
groups_exported += 1
except ClientError as e:
print(f"Failed to export {group.name}: item={item}: {e}",
file=stderr)
elapsed = "%.2f" % (time() - start)
print(f"Exported {users_exported} user(s) and {groups_exported} "
f"group(s) in {elapsed} s.")
return 0
def usage(fd: TextIO = stderr) -> None:
"""
Print usage information to the specified descriptor.
"""
fd.write(__doc__.format(argv0=argv[0]))
fd.flush()
| 29.832669 | 79 | 0.588542 |
794948c61591c26efba63c85529362d49b124e23
| 5,387 |
py
|
Python
|
GBDT/gbdt.py
|
cyckun/GBDT_Simple_Tutorial
|
20a8d1f53453291e55f14b268fa5c4f225d68d1c
|
[
"Apache-2.0"
] | null | null | null |
GBDT/gbdt.py
|
cyckun/GBDT_Simple_Tutorial
|
20a8d1f53453291e55f14b268fa5c4f225d68d1c
|
[
"Apache-2.0"
] | null | null | null |
GBDT/gbdt.py
|
cyckun/GBDT_Simple_Tutorial
|
20a8d1f53453291e55f14b268fa5c4f225d68d1c
|
[
"Apache-2.0"
] | null | null | null |
"""
Created on :2019/03/28
@author: Freeman, feverfc1994
"""
import abc
import math
import logging
import pandas as pd
from GBDT.decision_tree import Tree
from mpyc.statistics import (mean, variance, stdev, pvariance, pstdev,
mode, median, median_low, median_high)
from mpyc.runtime import mpc
import numpy as np
def scale_to_int(f):
if issubclass(secnum, mpc.Integer):
scale = lambda a: secnum(int(round(a * f))) # force Python integers
else:
scale = lambda a: secnum(float(a)) # force Python floats
# print("scale = ", np.vectorize(scale))
return np.vectorize(scale)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
secnum = mpc.SecFlt()
class SquaresError:
def __init__(self):
#self.mpc = mpc.run(mpc.start())
pass
def initialize_f_0(self, data):
#print("type:", type(data['label']))
# data['f_0'] = data['label'].mean()
#data['f_0'] = mean((data['label']))
data['f_0'] = mean(data['label'].values.tolist())
return mean(data['label'].values.tolist())
def calculate_residual(self, data, iter):
print("enter calculate:;")
res_name = 'res_' + str(iter)
f_prev_name = 'f_' + str(iter - 1)
label_list = data['label'].values.tolist()
f_prev_name_list = data[f_prev_name].values.tolist()
print("type fo fpreo list", type(f_prev_name_list[0]), f_prev_name)
new_list = []
for i in range(len(label_list)):
ele = mpc.sub(label_list[i], f_prev_name_list[i])
new_list.append(ele)
data[res_name] = pd.Series((ele for ele in new_list))
def update_f_m(self, data, trees, iter, learning_rate, logger):
f_prev_name = 'f_' + str(iter - 1)
f_m_name = 'f_' + str(iter)
data[f_m_name] = data[f_prev_name]
print(f_m_name)
print("t;dfd:", type(data[f_m_name].values.tolist()[0]))
for leaf_node in trees[iter].leaf_nodes:
# print("xxx:",type(data.loc[leaf_node.data_index, f_m_name]),data.loc[leaf_node.data_index, f_m_name])
tmp = data.loc[leaf_node.data_index, f_m_name]
# data.loc[leaf_node.data_index, f_m_name] = mpc.run(mpc.output(tmp.values.tolist()[0])) + learning_rate * leaf_node.predict_value # cipher and plain
tmp1 = scale_to_int(2)(learning_rate * leaf_node.predict_value)
data.loc[leaf_node.data_index, f_m_name] = mpc.add(tmp.values.tolist()[0], tmp1) # cipher and plain
# 打印每棵树的 train loss
self.get_train_loss(data['label'], data[f_m_name], iter, logger)
print("data f_m_nme type:", type(data[f_m_name].values.tolist()[0]))
def update_leaf_values(self, targets, y):
tmp = targets.values.tolist()
return mpc.run(mpc.output(mean(tmp)))
#return targets.mean()
def get_train_loss(self, y, f, iter, logger):
#loss = ((y - f) ** 2).mean()
loss = mpc.SecInt(1)
# logger.info(('第%d棵树: mse_loss:%.4f' % (iter, loss)))
class GradientBoostingRegressor:
def __init__(self, learning_rate, n_trees, max_depth,
min_samples_split=2, is_log=False, is_plot=False):
self.loss = SquaresError()
self.learning_rate = learning_rate
self.n_trees = n_trees
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.features = None
self.trees = {}
self.f_0 = {}
self.is_log = is_log
self.is_plot = is_plot
def fit(self, data):
"""
:param data: pandas.DataFrame, the features data of train training
"""
# 掐头去尾, 删除id和label,得到特征名称
self.features = list(data.columns)[1: -1] #cao, plain;
# 初始化 f_0(x)
# 对于平方损失来说,初始化 f_0(x) 就是 y 的均值
# print("type pp-- =", type(data))
self.f_0 = self.loss.initialize_f_0(data)
# 对 m = 1, 2, ..., M
logger.handlers[0].setLevel(logging.INFO if self.is_log else logging.CRITICAL)
for iter in range(1, self.n_trees + 1):
if len(logger.handlers) > 1:
logger.removeHandler(logger.handlers[-1])
fh = logging.FileHandler('results/NO.{}_tree.log'.format(iter), mode='w', encoding='utf-8')
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
# 计算负梯度--对于平方误差来说就是残差
logger.info(('-----------------------------构建第%d颗树-----------------------------' % iter))
self.loss.calculate_residual(data, iter)
target_name = 'res_' + str(iter)
self.trees[iter] = Tree(data, self.max_depth, self.min_samples_split,
self.features, self.loss, target_name, logger)
self.loss.update_f_m(data, self.trees, iter, self.learning_rate, logger)
def predict(self, data):
data['f_0'] = self.f_0
for iter in range(1, self.n_trees + 1):
f_prev_name = 'f_' + str(iter - 1)
f_m_name = 'f_' + str(iter)
data[f_m_name] = data[f_prev_name] + \
self.learning_rate * \
data.apply(lambda x: self.trees[iter].root_node.get_predict_value(x), axis=1)
data['predict_value'] = data[f_m_name]
| 39.036232 | 162 | 0.597178 |
794948d95e5e99942c9469bae18505499025d469
| 9,127 |
py
|
Python
|
owtf/api/handlers/targets.py
|
Udbhavbisarya23/owtf
|
27623937677caf975569f8de8af7983ca57611bc
|
[
"BSD-3-Clause"
] | 3 |
2019-09-28T14:10:02.000Z
|
2022-01-10T04:04:17.000Z
|
owtf/api/handlers/targets.py
|
justdvnsh/owtf
|
3a543b4eb2a7ad67155eb96dd2d99efbc181498d
|
[
"BSD-3-Clause"
] | 3 |
2021-03-26T00:33:28.000Z
|
2022-02-13T21:08:52.000Z
|
owtf/api/handlers/targets.py
|
justdvnsh/owtf
|
3a543b4eb2a7ad67155eb96dd2d99efbc181498d
|
[
"BSD-3-Clause"
] | 2 |
2019-06-10T02:43:25.000Z
|
2020-10-31T15:45:57.000Z
|
"""
owtf.api.handlers.targets
~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from owtf.api.handlers.base import APIRequestHandler
from owtf.lib import exceptions
from owtf.lib.exceptions import InvalidTargetReference, APIError
from owtf.managers.target import (
add_targets,
delete_target,
get_target_config_by_id,
get_target_config_dicts,
get_targets_by_severity_count,
search_target_configs,
update_target,
)
__all__ = ["TargetConfigSearchHandler", "TargetSeverityChartHandler", "TargetConfigHandler"]
class TargetConfigHandler(APIRequestHandler):
"""Manage target config data."""
SUPPORTED_METHODS = ["GET", "POST", "PUT", "PATCH", "DELETE"]
def get(self, target_id=None):
"""Get target config data by id or fetch all target configs.
**Example request**:
.. sourcecode:: http
GET /api/v1/targets/2 HTTP/1.1
X-Requested-With: XMLHttpRequest
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json; charset=UTF-8
{
"status": "success",
"data": {
"top_url": "https://google.com:443",
"top_domain": "com",
"target_url": "https://google.com",
"max_user_rank": 0,
"url_scheme": "https",
"host_path": "google.com",
"ip_url": "https://172.217.10.238",
"host_ip": "172.217.10.238",
"max_owtf_rank": -1,
"port_number": "443",
"host_name": "google.com",
"alternative_ips": "['172.217.10.238']",
"scope": true,
"id": 2
}
}
"""
try:
# If no target_id, means /target is accessed with or without filters
if not target_id:
# Get all filter data here, so that it can be passed
filter_data = dict(self.request.arguments)
self.success(get_target_config_dicts(self.session, filter_data))
else:
self.success(get_target_config_by_id(self.session, target_id))
except InvalidTargetReference:
raise APIError(400, "Invalid target reference provided")
def post(self, target_id=None):
"""Add a target to the current session.
**Example request**:
.. sourcecode:: http
POST /api/v1/targets/ HTTP/1.1
Content-Type: application/x-www-form-urlencoded; charset=UTF-8
X-Requested-With: XMLHttpRequest
**Example response**:
.. sourcecode:: http
HTTP/1.1 201 Created
Content-Length: 0
Content-Type: application/json
{
"status": "success",
"data": null
}
"""
if (target_id) or (not self.get_argument("target_url", default=None)): # How can one post using an id xD
raise APIError(400, "Incorrect query parameters")
try:
add_targets(self.session, dict(self.request.arguments)["target_url"])
self.set_status(201) # Stands for "201 Created"
self.success(None)
except exceptions.DBIntegrityException:
raise APIError(400, "An unknown exception occurred when performing a DB operation")
except exceptions.UnresolvableTargetException:
raise APIError(400, "The target url can not be resolved")
def put(self, target_id=None):
return self.patch(target_id)
def patch(self, target_id=None):
"""Update a target.
**Example request**:
.. sourcecode:: http
PATCH /api/v1/targets/1 HTTP/1.1
X-Requested-With: XMLHttpRequest
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": "success",
"data": null
}
"""
if not target_id or not self.request.arguments:
raise APIError(400, "Incorrect query parameters")
try:
patch_data = dict(self.request.arguments)
update_target(self.session, patch_data, id=target_id)
self.success(None)
except InvalidTargetReference:
raise APIError(400, "Invalid target reference provided")
def delete(self, target_id=None):
"""Delete a target.
**Example request**:
.. sourcecode:: http
DELETE /api/v1/targets/4 HTTP/1.1
X-Requested-With: XMLHttpRequest
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": "success",
"data": null
}
"""
if not target_id:
raise APIError(400, "Missing target_id")
try:
delete_target(self.session, id=target_id)
self.success(None)
except InvalidTargetReference:
raise APIError(400, "Invalid target reference provided")
class TargetConfigSearchHandler(APIRequestHandler):
"""Filter targets."""
SUPPORTED_METHODS = ["GET"]
def get(self):
"""Get target config data based on user filter.
**Example request**:
.. sourcecode:: http
GET /api/v1/targets/search/?limit=100&offset=0&target_url=google HTTP/1.1
Accept: application/json, text/javascript, */*; q=0.01
X-Requested-With: XMLHttpRequest
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json; charset=UTF-8
{
"status": "success",
"data": {
"records_total": 4,
"records_filtered": 2,
"data": [
{
"top_url": "https://google.com:443",
"top_domain": "com",
"target_url": "https://google.com",
"max_user_rank": -1,
"url_scheme": "https",
"host_path": "google.com",
"ip_url": "https://172.217.10.238",
"host_ip": "172.217.10.238",
"max_owtf_rank": -1,
"port_number": "443",
"host_name": "google.com",
"alternative_ips": "['172.217.10.238']",
"scope": true,
"id": 2
},
{
"top_url": "http://google.com:80",
"top_domain": "com",
"target_url": "http://google.com",
"max_user_rank": -1,
"url_scheme": "http",
"host_path": "google.com",
"ip_url": "http://172.217.10.238",
"host_ip": "172.217.10.238",
"max_owtf_rank": -1,
"port_number": "80",
"host_name": "google.com",
"alternative_ips": "['172.217.10.238']",
"scope": true,
"id": 1
}
]
}
}
"""
try:
filter_data = dict(self.request.arguments)
filter_data["search"] = True
self.success(search_target_configs(self.session, filter_data=filter_data))
except exceptions.InvalidParameterType:
raise APIError(400, "Invalid parameter type provided")
class TargetSeverityChartHandler(APIRequestHandler):
"""Get targets with severity."""
SUPPORTED_METHODS = ["GET"]
def get(self):
"""Get data for target severity chart.
**Example request**:
.. sourcecode:: http
GET /api/targets/severitychart/ HTTP/1.1
X-Requested-With: XMLHttpRequest
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json; charset=UTF-8
{
"status": "success",
"data": {
"data": [
{
"color": "#A9A9A9",
"id": 0,
"value": 100,
"label": "Not Ranked"
}
]
}
}
"""
try:
self.success(get_targets_by_severity_count(self.session))
except exceptions.InvalidParameterType:
raise APIError(400, "Invalid parameter type provided")
| 31.912587 | 113 | 0.486469 |
79494a7c77cbde989c2d9348c367accd2829c647
| 5,638 |
py
|
Python
|
pandas/tests/indexes/multi/test_conversion.py
|
Nikoleta-v3/pandas
|
58a59bd606911f11a4f679df18a00cfc25536c30
|
[
"BSD-3-Clause"
] | 1 |
2018-11-11T22:18:13.000Z
|
2018-11-11T22:18:13.000Z
|
pandas/tests/indexes/multi/test_conversion.py
|
Nikoleta-v3/pandas
|
58a59bd606911f11a4f679df18a00cfc25536c30
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/tests/indexes/multi/test_conversion.py
|
Nikoleta-v3/pandas
|
58a59bd606911f11a4f679df18a00cfc25536c30
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, MultiIndex, date_range
from pandas.compat import range
def test_tolist(idx):
result = idx.tolist()
exp = list(idx.values)
assert result == exp
def test_to_frame():
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
# See GH-22580
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False, name=['first', 'second'])
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame(name=['first', 'second'])
expected.index = index
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
msg = "'name' must be a list / sequence of column names."
with pytest.raises(TypeError, match=msg):
index.to_frame(name='first')
msg = "'name' should have same length as number of levels on index."
with pytest.raises(ValueError, match=msg):
index.to_frame(name=['first'])
# Tests for datetime index
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
# See GH-22580
result = index.to_frame(index=False, name=['first', 'second'])
expected = DataFrame(
{'first': np.repeat(np.arange(5, dtype='int64'), 3),
'second': np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
result = index.to_frame(name=['first', 'second'])
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical():
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_roundtrip_pickle_with_tz():
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_pickle(indices):
unpickled = tm.round_trip_pickle(indices)
assert indices.equals(unpickled)
original_name, indices.name = indices.name, 'foo'
unpickled = tm.round_trip_pickle(indices)
assert indices.equals(unpickled)
indices.name = original_name
def test_to_series(idx):
# assert that we are creating a copy of the index
s = idx.to_series()
assert s.values is not idx.values
assert s.index is not idx
assert s.name == idx.name
def test_to_series_with_arguments(idx):
# GH18699
# index kwarg
s = idx.to_series(index=idx)
assert s.values is not idx.values
assert s.index is idx
assert s.name == idx.name
# name kwarg
idx = idx
s = idx.to_series(name='__test')
assert s.values is not idx.values
assert s.index is not idx
assert s.name != idx.name
| 32.589595 | 75 | 0.581589 |
79494bf12d4787946448d0e543c2663e098c56d4
| 2,748 |
py
|
Python
|
web_blog/app.py
|
Shubhraaaj/Blogging
|
b57d3bceed2c9ac661f0a3eafc8c2b948a5ee3ce
|
[
"Apache-2.0"
] | null | null | null |
web_blog/app.py
|
Shubhraaaj/Blogging
|
b57d3bceed2c9ac661f0a3eafc8c2b948a5ee3ce
|
[
"Apache-2.0"
] | null | null | null |
web_blog/app.py
|
Shubhraaaj/Blogging
|
b57d3bceed2c9ac661f0a3eafc8c2b948a5ee3ce
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask, render_template, request, session, make_response
# '__main__
from web_blog.common.database import Database
from web_blog.models.blog import Blog
from web_blog.models.post import Post
from web_blog.models.user import User
app = Flask(__name__)
app.secret_key = "jose"
@app.route('/')
def home_template():
return render_template('home.html')
@app.route('/login')
def login_template():
return render_template('login.html')
@app.route('/register')
def register_template():
return render_template('register.html')
@app.before_first_request
def initialize_database():
Database.initialize()
@app.route('/auth/login', methods=['POST'])
def login_user():
email = request.form['email']
password = request.form['password']
if User.login_valid(email, password):
User.login(email)
else:
session['email'] = None
return render_template("profile.html", email=session['email'])
@app.route('/auth/register', methods=['POST'])
def register_user():
email = request.form['email']
password = request.form['password']
User.register(email, password)
return render_template("profile.html", email=session['email'])
@app.route('/blogs/<string:user_id>')
@app.route('/blogs')
def user_blogs(user_id=None):
if user_id is not None:
user = User.get_by_id(user_id)
else:
user = User.get_by_email(session['email'])
blogs = user.get_blogs()
return render_template("user_blogs.html", blogs=blogs, email=user.email)
@app.route('/blogs/new', methods=['POST', 'GET'])
def create_new_blog():
if request.method == 'GET':
return render_template('new_blog.html')
else:
title = request.form['title']
description = request.form['description']
user = User.get_by_email(session['email'])
new_blog = Blog(user.email, title, description, user._id)
new_blog.save_to_mongo()
return make_response(user_blogs(user._id))
@app.route('/posts/<string:blog_id>')
def blog_posts(blog_id):
blog = Blog.get_from_mongo(blog_id)
posts = blog.get_posts()
return render_template('posts.html', posts=posts, blog_name=blog.title, blog_id=blog._id)
@app.route('/posts/new/<string:blog_id>', methods=['POST', 'GET'])
def create_new_post(blog_id):
if request.method == 'GET':
return render_template('new_post.html', blog_id=blog_id)
else:
title = request.form['title']
content = request.form['content']
user = User.get_by_email(session['email'])
new_post = Post(blog_id, title, content, user.email)
new_post.save_to_mongo()
return make_response(blog_posts(blog_id))
if __name__ == '__main__':
app.run(port=4995, debug=True)
| 28.040816 | 93 | 0.684498 |
79494c140d955e52372323f701243f5187695e96
| 6,515 |
py
|
Python
|
PySimple/page2.py
|
snehashri08/riscv-config-gui
|
664c5f01459da27d018815c42a9c718d00248b82
|
[
"BSD-3-Clause"
] | null | null | null |
PySimple/page2.py
|
snehashri08/riscv-config-gui
|
664c5f01459da27d018815c42a9c718d00248b82
|
[
"BSD-3-Clause"
] | null | null | null |
PySimple/page2.py
|
snehashri08/riscv-config-gui
|
664c5f01459da27d018815c42a9c718d00248b82
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import logging
import shutil
import PySimpleGUI as sg
import riscv_config.checker as riscv_config
import riscv_config.utils as utils
from collections import OrderedDict
import ruamel
from ruamel.yaml import YAML
import yaml as pyyaml
import re
yaml = YAML(typ="rt")
yaml.default_flow_style = False
yaml.allow_unicode = True
global ispec, wdir, isa_yaml, rvxlen
global csr_name, values, sub_field, window, console_frame
from collections import OrderedDict
def submit():
isa_checked_file = riscv_config.check_isa_specs(ispec, wdir, True)
def update_fields():
'''
Function to update all changes done by the user'''
for k in list(set(isa_yaml['hart0'][csr_name].keys())-set(['rv32', 'rv64'])):
if k in {'address', 'reset-val'}:
isa_yaml['hart0'][csr_name][k]=int(values['-'+csr_name+'_'+k+'-'])
else:
isa_yaml['hart0'][csr_name][k]=values['-'+csr_name+'_'+k+'-']
if sub_field ==[]:
for k in list(set(isa_yaml['hart0'][csr_name][rvxlen].keys()) - set(['fields','accessible'])):
update = values['-'+csr_name+'_'+k+'-']
if k in {'msb', 'lsb'}:
update = int(update)
elif k == 'type':
update=eval(update)
elif update == '':
update=None
else:
update = update
isa_yaml['hart0'][csr_name][rvxlen][k]= update
else:
for sub in sub_field:
for k in list(set(isa_yaml['hart0'][csr_name][rvxlen][sub].keys()) - set(['fields'])):
update = values['-'+csr_name+'_'+sub+k+'-']
if k in {'msb', 'lsb'}:
update = int(update)
elif k == 'type':
if update=='':
update=None
else:
update=eval(update)
elif k == 'implemented':
if update=='1':
update=True
else:
update=False
elif update == '':
update=None
else:
update = update
isa_yaml['hart0'][csr_name][rvxlen][sub][k]=update
isa_yaml['hart0'][csr_name][rvxlen]['accessible']=values['-accessible_'+csr_name+'-']
f=open(ispec, 'w')
utils.dump_yaml(isa_yaml, f)
def inner(csr):
global csr_name, sub_field
csr_name=csr
sub_field=list(set(isa_yaml['hart0'][csr][rvxlen].keys()) - set(['fields', 'msb', 'lsb', 'accessible', 'shadow', 'shadow_type','type']))
if sub_field == [] :
rr= [[sg.Text(k), sg.InputText(isa_yaml['hart0'][csr][rvxlen][k], key='-'+csr+'_'+k+'-', tooltip=str(isa_yaml['hart0'][csr][rvxlen][k])) ] for k in list(set(isa_yaml['hart0'][csr][rvxlen].keys()) - set(['fields', 'type', 'accessible'])) ]
if 'type' in isa_yaml['hart0'][csr][rvxlen].keys():
_type= [[sg.Text('type'), sg.InputText(eval(str(isa_yaml['hart0'][csr][rvxlen]['type']).replace('ordereddict','dict')), key='-'+csr+'_type-', tooltip=str(isa_yaml['hart0'][csr][rvxlen]['type'])) ]]
else:
_type=[[]]
else :
for sub in sub_field:
if not isa_yaml['hart0'][csr][rvxlen][sub]['implemented'] and isa_yaml['hart0'][csr][rvxlen][sub]['shadow']==None:
isa_yaml['hart0'][csr_name][rvxlen][sub]['type']= None
f=open(ispec, 'w')
utils.dump_yaml(isa_yaml, f)
if isa_yaml['hart0'][csr][rvxlen][sub]['shadow']==None:
rr=[[sg.Column([[sg.Frame(sub, [[sg.Text(k), sg.InputText(isa_yaml['hart0'][csr][rvxlen][sub][k], key='-'+csr+'_'+sub+k+'-', tooltip=str(isa_yaml['hart0'][csr][rvxlen][sub][k])) ] for k in list(set(isa_yaml['hart0'][csr][rvxlen][sub].keys()) -set(['fields','type'])) ]+ [[sg.Text('type'), sg.InputText(eval(str(isa_yaml['hart0'][csr][rvxlen][sub]['type']).replace('ordereddict','dict')), key='-'+csr+'_'+sub+'type-', tooltip=str(isa_yaml['hart0'][csr][rvxlen][sub]['type'])) ]]) ] for sub in sub_field] , pad=(0,0), scrollable=True, key = "Columnmmm", size=(400,500))]]
_type= [[]]
else:
rr=[[sg.Column([[sg.Frame(sub, [[sg.Text(k), sg.InputText(isa_yaml['hart0'][csr][rvxlen][sub][k], key='-'+csr+'_'+sub+k+'-', tooltip=str(isa_yaml['hart0'][csr][rvxlen][sub][k])) ] for k in list(set(isa_yaml['hart0'][csr][rvxlen][sub].keys()) -set(['fields','type'])) ]) ] for sub in sub_field] , pad=(0,0), scrollable=True, key = "Columnmmm", size=(400,500))]]
_type= [[]]
return [[sg.Text(k), sg.InputText(isa_yaml['hart0'][csr][k], key='-'+csr+'_'+k+'-', tooltip=str(isa_yaml['hart0'][csr][k])) ] for k in list(set(isa_yaml['hart0'][csr].keys())-set(['rv32', 'rv64'])) ] + rr +_type
def print_csrs(csr):
'''
To print required number of csrs '''
col1 = sg.Column([[sg.Frame(csr, inner(csr))]], pad=(0,0), scrollable=True, key = "Column", size=(450,500))
return col1
def page1(csr):
global values
col3 = sg.Column([[sg.Frame('Actions:',
[[sg.Column([[sg.Checkbox('Accessible', default=True, key='-accessible_'+csr+'-'), sg.Button('Save'), ]],
size=(450,45), pad=(0,0))]])]], pad=(0,0))
col1=print_csrs(csr)
layout=[[col1],
[col3]]
window = sg.Window('PAGE1', layout)
while True:
event, values = window.read()
if event == sg.WIN_CLOSED:
break
if event == 'Save':
update_fields()
window.close()
def index():
global isa_yaml, rvxlen, console_frame
with open(ispec, 'r') as file:
isa_yaml = yaml.load(file)
rvxlen= 'rv'+str(isa_yaml['hart0']['supported_xlen'][0])
csr=[]
for field in isa_yaml['hart0']:
if isinstance(isa_yaml['hart0'][field], dict) and (isa_yaml['hart0'][field][rvxlen]['accessible']==True):
csr.append(str(field))
csr.sort()
col2=[[sg.Button(str(csr[(j*10)+(i+1)]), size=(12,0)) for i in range(0,10)] for j in range(0, int(len(csr)/10))]
col5 = sg.Column([[sg.Frame('Actions:',
[[sg.Column([[sg.Button('Run in Riscv-Config'), sg.Button('Clear') ]],
size=(450,45), pad=(0,0))]])]], pad=(0,0))
layout=[[col2],
[col5]]
window1 = sg.Window('LIST OF CSRS', layout)
while True:
event, values = window1.read()
if event == 'Run in Riscv-Config':
submit()
elif event == 'Clear':
window['-MLl-'].Update(' ')
else:
page1(str(event))
if event == sg.WIN_CLOSED:
break
def gui_page2(isa_spec, work_dir):
global pagenum
global ispec, wdir
ispec=isa_spec
wdir=work_dir
index()
| 42.861842 | 577 | 0.571911 |
79494cbf235d939c0e74a62f7b6cf4416e213b72
| 4,399 |
py
|
Python
|
tool/values.py
|
ashgeek/MAIAN
|
ab387e171bf99969676e60a0e5c59de80af15010
|
[
"MIT"
] | 258 |
2018-03-14T08:50:26.000Z
|
2020-06-08T14:47:47.000Z
|
tool/values.py
|
lazzarello/MAIAN
|
ab387e171bf99969676e60a0e5c59de80af15010
|
[
"MIT"
] | 29 |
2018-03-14T16:25:03.000Z
|
2019-09-16T06:52:43.000Z
|
tool/values.py
|
lazzarello/MAIAN
|
ab387e171bf99969676e60a0e5c59de80af15010
|
[
"MIT"
] | 72 |
2018-03-14T18:17:31.000Z
|
2020-05-10T13:30:53.000Z
|
from web3 import Web3, KeepAliveRPCProvider, IPCProvider
import copy
from z3 import *
# Get value
def get_params(param, input):
if (param+str(input)) in MyGlobals.st:
return MyGlobals.st[param+str(input)]
else:
print('need to set the parameters: %s ' % (param+str(input) ) )
exit(4)
# Is set
def is_params(param,input):
return (param+str(input)) in MyGlobals.st
# Set parameter
def set_params(param, input, value):
global st
MyGlobals.st[param+str(input)] = value
# Create a dict of paramters
def initialize_params(read_from_blockchain, c_address):
# Set (dummy) values for some blockchain parameters used in the contracts
global st
MyGlobals.st = {}
MyGlobals.st['my_address'] = MyGlobals.adversary_account
MyGlobals.st['contract_address'] = c_address
if read_from_blockchain:
MyGlobals.st['contract_balance'] = str(MyGlobals.web3.eth.getBalance(c_address)+1).zfill(64)
else:
MyGlobals.st['contract_balance'] = '7' * 64
MyGlobals.st['gas'] = ('765432').zfill(64)
MyGlobals.st['gas_limit'] = ('%x' % 5000000).zfill(64)
MyGlobals.st['gas_price'] = ('123').zfill(64)
MyGlobals.st['time_stamp'] = ('%x' % 0x7687878).zfill(64)
MyGlobals.st['block_number'] = ('545454').zfill(64)
def print_params():
for s in MyGlobals.st:
print('%20s : %s' % (s, str(MyGlobals.st[s])))
def create_configuration( stack, mmemory, storage):
nc = {}
nc['stack'] = copy.deepcopy(stack)
nc['mmemory'] = copy.deepcopy(mmemory)
nc['storage'] = copy.deepcopy(storage)
return nc
def add_configuration( step, configurations, nc):
if step in configurations: configurations[step].append( nc )
else:configurations[step] = [nc]
def configuration_exist(step, configurations, nc):
if step not in configurations:
return False
found = False
for os in configurations[step]:
# Compare stack
if os['stack'] != nc['stack'] : continue
# Compare mmemory
if os['mmemory'] != nc['mmemory']: continue
# Compare storage
if( os['storage'] != nc['storage'] ):continue
found = True
break
return found
def seen_configuration( configurations, ops, position, stack, mmemory, storage):
# Check if configuration exist
op = ops[position]['o']
step = ops[position]['id']
nc = create_configuration( stack, mmemory, storage)
if configuration_exist(step, configurations, nc):
return True
else:
add_configuration( step, configurations, nc)
return False
def print_configuration( conf ):
for c in conf:
print_stack( c['stack'] )
print_storage(c['storage'])
class MyGlobals(object):
MAX_JUMP_DEPTH = 60 # path length in CFG
MAX_CALL_DEPTH = 0 # different function calls to the contract
MAX_VISITED_NODES = 2000 # sum of all paths in search of one contract
max_calldepth_in_normal_search = 3
ETHER_LOCK_GOOD_IF_CAN_CALL = True
st = {}
#
# Z3 solver
#
s = None
SOLVER_TIMEOUT = 10000 #timeout
search_condition_found = False
stop_search = False
visited_nodes = 0
last_eq_step = -1
last_eq_func = -1
symbolic_vars = []
no_function_calls = 0
function_calls = {}
symbolic_sha = False
symbolic_load = False
# Params related to blockchain
port_number = '8550'
confirming_transaction ='0x3094c123bd9ffc3f41dddefd3ea88e4296e45015b62e892f8bdf9d1b645ef2d2'
etherbase_account = '0x69190bde29255c02363477462f17e816a9533d3a'
adversary_account = '5a1cd1d07d9f59898c434ffc90a74ecd937feb12'
sendingether_account = '564625b3ae8d0602a8fc0fe22c884b091098417f'
send_initial_wei = 44
web3 = None
#
debug = False
read_from_blockchain = False
checktype = 0
exec_as_script = False
def clear_globals():
MyGlobals.s = Solver()
MyGlobals.s.set("timeout", MyGlobals.SOLVER_TIMEOUT)
MyGlobals.search_condition_found = False
MyGlobals.stop_search = False
MyGlobals.visited_nodes = 0
MyGlobals.no_function_calls = 0
MyGlobals.function_calls = {}
| 24.713483 | 100 | 0.635599 |
79494dc5785997045f693d3e31e1f79ac2e7eb15
| 4,520 |
py
|
Python
|
XBlock Integration Files/xdjangobb/xblock/lib/python2.7/site-packages/xdjangobb/xdjangobb.py
|
DANCEcollaborative/forum-xblock
|
fe8a062b4e45966a5faa5282d85799be479ec28a
|
[
"MIT"
] | 7 |
2016-02-09T06:43:14.000Z
|
2021-03-12T06:07:50.000Z
|
XBlock Integration Files/xdjangobb/xblock/lib/python2.7/site-packages/xdjangobb/xdjangobb.py
|
DANCEcollaborative/forum-xblock
|
fe8a062b4e45966a5faa5282d85799be479ec28a
|
[
"MIT"
] | null | null | null |
XBlock Integration Files/xdjangobb/xblock/lib/python2.7/site-packages/xdjangobb/xdjangobb.py
|
DANCEcollaborative/forum-xblock
|
fe8a062b4e45966a5faa5282d85799be479ec28a
|
[
"MIT"
] | null | null | null |
"""TO-DO: Write a description of what this XBlock is."""
import pkg_resources
from xblock.core import XBlock
from xblock.fields import Scope, Integer
from xblock.fragment import Fragment
import requests
import re
class DjangoBBXBlock(XBlock):
"""
TO-DO: document what your XBlock does.
"""
# Fields are defined on the class. You can access them in your code as
# self.<fieldname>.
# TO-DO: delete count, and define your own fields.
count = Integer(
default=0, scope=Scope.user_state,
help="A simple counter, to show something happening",
)
service_url = 'http://127.0.0.1:8000/forum/'
html_service_endpoint_extension = 'testhtml/'
css_service_endpoint_extension = 'testcss/'
js_service_endpoint_extension = 'testjs/'
def resource_string(self, path):
"""Handy helper for getting resources from our kit."""
data = pkg_resources.resource_string(__name__, path)
return data.decode("utf8")
# TO-DO: change this view to display your data your own way.
def student_view(self, context=None):
"""
The primary view of the DjangoBBXBlock, shown to students
when viewing courses.
"""
#html = self.resource_string("static/html/xdjangobb.html")
response = requests.get(self.service_url + self.html_service_endpoint_extension)
#html = str(response.text).strip(' \t\r\n').replace('href="/', 'href="http://localhost:12345/')
#html = str(response.text).strip(' \t\r\n').decode('utf8')
html = str(response.text).strip(' \t\r\n')
#pattern = r'<body>.*</body>'
#start = re.search(pattern, html).start()
#end = re.search(pattern, html).end()
#html = html[start + 6 : end - 7]
response = requests.get(self.service_url + self.css_service_endpoint_extension)
css = str(response.text).strip(' \t\r\n')
#if len(css) > 0:
# html += '<p> The css has length ' + str(len(css)) + 'and has the content ' + css + ' </p>'
#else:
# html += '<p> NO CSS! </p>'
#css = "#my-div{ border-style: solid; border-width: 5px; }"
response = requests.get(self.service_url + self.js_service_endpoint_extension)
js = str(response.text).strip(' \t\r\n')
#if len(js) > 0:
# html += '<p> The js has length ' + str(len(js)) + 'and has the content ' + js + ' </p>'
#else:
# html += '<p> NO JS! </p>'
#js = 'function djangobb(runtime, element){alert("This works!");}'
frag = Fragment(unicode(html).format(self=self))
frag.add_javascript(unicode(js))
frag.add_css(unicode(css))
frag.initialize_js('xdjangobb')
return frag
# TO-DO: change this handler to perform your own actions. You may need more
# than one handler, or you may not need any handlers at all.
@XBlock.json_handler
def increment_count(self, data, suffix=''):
"""
An example handler, which increments the data.
"""
# Just to show data coming in...
assert data['hello'] == 'world'
self.count += 1
return {"count": self.count}
# TO-DO: change this to create the scenarios you'd like to see in the
# workbench while developing your XBlock.
@XBlock.json_handler
def get_django_assets(self, data, suffix=''):
"""
This handler is used to fetch assets (HTML with embedded CSS/JS).
"""
original_url_extension = data.get('url')
original_url_extension = original_url_extension[original_url_extension.find('forum') + 6:]
response = requests.get(self.service_url + original_url_extension)
body_contents= re.findall('<body>(.*?)</body>', response.text, re.DOTALL)
js_contents = ''
#for js in re.findall('(<script(.*)>(.*?)</script>)', response.text, re.DOTALL):
#js_contents += js
return({'html': body_contents, 'js' : js_contents})
"""
error = ''
html = ''
try:
html = self.remote_resource_string(data.get('asset'))
except:
error = self.get_error_msg()
return{'asset': html, 'error': error}
"""
@staticmethod
def workbench_scenarios():
"""A canned scenario for display in the workbench."""
return [
("DjangoBBXBlock",
"""<vertical_demo>
<xdjangobb/>
</vertical_demo>
"""),
]
| 37.355372 | 103 | 0.595796 |
79494e214fe7ff1fa6a201b8508d65096d5a9d44
| 795 |
py
|
Python
|
glamkit_collections/contrib/work_creator/plugins/film/migrations/0004_auto_20161130_1109.py
|
ic-labs/glamkit-collections
|
a99c64f1d6af2d900f2687dd4382404d67d559bb
|
[
"MIT"
] | 52 |
2016-09-13T03:50:58.000Z
|
2022-02-23T16:25:08.000Z
|
glamkit_collections/contrib/work_creator/plugins/film/migrations/0004_auto_20161130_1109.py
|
ic-labs/django-icekit
|
c507ea5b1864303732c53ad7c5800571fca5fa94
|
[
"MIT"
] | 304 |
2016-08-11T14:17:30.000Z
|
2020-07-22T13:35:18.000Z
|
glamkit_collections/contrib/work_creator/plugins/film/migrations/0004_auto_20161130_1109.py
|
ic-labs/django-icekit
|
c507ea5b1864303732c53ad7c5800571fca5fa94
|
[
"MIT"
] | 12 |
2016-09-21T18:46:35.000Z
|
2021-02-15T19:37:50.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('gk_collections_film', '0003_auto_20161117_1801'),
]
operations = [
migrations.AlterField(
model_name='film',
name='media_type',
field=models.ForeignKey(blank=True, to='gk_collections_moving_image.MediaType', null=True, on_delete=django.db.models.deletion.SET_NULL),
),
migrations.AlterField(
model_name='film',
name='rating',
field=models.ForeignKey(blank=True, to='gk_collections_moving_image.Rating', null=True, on_delete=django.db.models.deletion.SET_NULL),
),
]
| 30.576923 | 149 | 0.659119 |
79494f57f7aa0d30b8d93397dd6684b1c73f4638
| 28,681 |
py
|
Python
|
libartipy/pointcloud/pypcd.py
|
Artisense-ai/libartipy
|
7a6a7736637c106e7e9c6763ec8c1dea64db4b01
|
[
"MIT"
] | 5 |
2021-06-15T13:01:51.000Z
|
2021-12-04T04:26:08.000Z
|
libartipy/pointcloud/pypcd.py
|
Artisense-ai/libartipy
|
7a6a7736637c106e7e9c6763ec8c1dea64db4b01
|
[
"MIT"
] | null | null | null |
libartipy/pointcloud/pypcd.py
|
Artisense-ai/libartipy
|
7a6a7736637c106e7e9c6763ec8c1dea64db4b01
|
[
"MIT"
] | 3 |
2021-06-18T10:37:04.000Z
|
2021-11-15T05:55:29.000Z
|
"""
Read and write PCL .pcd files in python.
dimatura@cmu.edu, 2013-2018
- TODO better API for wacky operations.
- TODO add a cli for common operations.
- TODO deal properly with padding
- TODO deal properly with multicount fields
- TODO better support for rgb nonsense
"""
import re
import struct
import copy
# reads and writes a string buffer
try:
from io import StringIO as sio
except:
import cStringIO as sio
import numpy as np
import warnings
import lzf
# To make sure that we can use xrange transparently, independent of the fact whether it is Python 2. or Python 3.
try:
xrange # if Python 2.
except NameError: # if Python 3.
xrange = range
HAS_SENSOR_MSGS = True
try:
from sensor_msgs.msg import PointField
import numpy_pc2 # needs sensor_msgs
except ImportError:
HAS_SENSOR_MSGS = False
__all__ = ['PointCloud',
'point_cloud_to_path',
'point_cloud_to_buffer',
'point_cloud_to_fileobj',
'point_cloud_from_path',
'point_cloud_from_buffer',
'point_cloud_from_fileobj',
'make_xyz_point_cloud',
'make_xyz_rgb_point_cloud',
'make_xyz_label_point_cloud',
'save_txt',
'cat_point_clouds',
'add_fields',
'update_field',
'build_ascii_fmtstr',
'encode_rgb_for_pcl',
'decode_rgb_from_pcl',
'save_point_cloud',
'save_point_cloud_bin',
'save_point_cloud_bin_compressed',
'pcd_type_to_numpy_type',
'numpy_type_to_pcd_type',
]
if HAS_SENSOR_MSGS:
pc2_pcd_type_mappings = [(PointField.INT8, ('I', 1)),
(PointField.UINT8, ('U', 1)),
(PointField.INT16, ('I', 2)),
(PointField.UINT16, ('U', 2)),
(PointField.INT32, ('I', 4)),
(PointField.UINT32, ('U', 4)),
(PointField.FLOAT32, ('F', 4)),
(PointField.FLOAT64, ('F', 8))]
pc2_type_to_pcd_type = dict(pc2_pcd_type_mappings)
pcd_type_to_pc2_type = dict((q, p) for (p, q) in pc2_pcd_type_mappings)
__all__.extend(['pcd_type_to_pc2_type', 'pc2_type_to_pcd_type'])
numpy_pcd_type_mappings = [(np.dtype('float32'), ('F', 4)),
(np.dtype('float64'), ('F', 8)),
(np.dtype('uint8'), ('U', 1)),
(np.dtype('uint16'), ('U', 2)),
(np.dtype('uint32'), ('U', 4)),
(np.dtype('uint64'), ('U', 8)),
(np.dtype('int16'), ('I', 2)),
(np.dtype('int32'), ('I', 4)),
(np.dtype('int64'), ('I', 8))]
numpy_type_to_pcd_type = dict(numpy_pcd_type_mappings)
pcd_type_to_numpy_type = dict((q, p) for (p, q) in numpy_pcd_type_mappings)
def parse_header(lines):
""" Parse header of PCD files.
"""
metadata = {}
for ln in lines:
if ln.startswith('#') or len(ln) < 2:
continue
match = re.match('(\w+)\s+([\w\s\.]+)', ln)
if not match:
warnings.warn("warning: can't understand line: %s" % ln)
continue
key, value = match.group(1).lower(), match.group(2)
if key == 'version':
metadata[key] = value
elif key in ('fields', 'type'):
metadata[key] = value.split()
elif key in ('size', 'count'):
metadata[key] = list(map(int, value.split()))
elif key in ('width', 'height', 'points'):
metadata[key] = int(value)
elif key == 'viewpoint':
metadata[key] = list(map(float, value.split()))
elif key == 'data':
metadata[key] = value.strip().lower()
# TODO apparently count is not required?
# add some reasonable defaults
if 'count' not in metadata:
metadata['count'] = [1]*len(metadata['fields'])
if 'viewpoint' not in metadata:
metadata['viewpoint'] = [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]
if 'version' not in metadata:
metadata['version'] = '.7'
return metadata
def write_header(metadata, rename_padding=False):
""" Given metadata as dictionary, return a string header.
"""
template = """\
VERSION {version}
FIELDS {fields}
SIZE {size}
TYPE {type}
COUNT {count}
WIDTH {width}
HEIGHT {height}
VIEWPOINT {viewpoint}
POINTS {points}
DATA {data}
"""
str_metadata = metadata.copy()
if not rename_padding:
str_metadata['fields'] = ' '.join(metadata['fields'])
else:
new_fields = []
for f in metadata['fields']:
if f == '_':
new_fields.append('padding')
else:
new_fields.append(f)
str_metadata['fields'] = ' '.join(new_fields)
str_metadata['size'] = ' '.join(map(str, metadata['size']))
str_metadata['type'] = ' '.join(metadata['type'])
str_metadata['count'] = ' '.join(map(str, metadata['count']))
str_metadata['width'] = str(metadata['width'])
str_metadata['height'] = str(metadata['height'])
str_metadata['viewpoint'] = ' '.join(map(str, metadata['viewpoint']))
str_metadata['points'] = str(metadata['points'])
tmpl = template.format(**str_metadata)
return tmpl
def _metadata_is_consistent(metadata):
""" Sanity check for metadata. Just some basic checks.
"""
checks = []
required = ('version', 'fields', 'size', 'width', 'height', 'points',
'viewpoint', 'data')
for f in required:
if f not in metadata:
print('%s required' % f)
checks.append((lambda m: all([k in m for k in required]),
'missing field'))
checks.append((lambda m: len(m['type']) == len(m['count']) ==
len(m['fields']),
'length of type, count and fields must be equal'))
checks.append((lambda m: m['height'] > 0,
'height must be greater than 0'))
checks.append((lambda m: m['width'] > 0,
'width must be greater than 0'))
checks.append((lambda m: m['points'] > 0,
'points must be greater than 0'))
checks.append((lambda m: m['data'].lower() in ('ascii', 'binary',
'binary_compressed'),
'unknown data type:'
'should be ascii/binary/binary_compressed'))
ok = True
for check, msg in checks:
if not check(metadata):
print('error:', msg)
ok = False
return ok
# def pcd_type_to_numpy(pcd_type, pcd_sz):
# """ convert from a pcd type string and size to numpy dtype."""
# typedict = {'F' : { 4:np.float32, 8:np.float64 },
# 'I' : { 1:np.int8, 2:np.int16, 4:np.int32, 8:np.int64 },
# 'U' : { 1:np.uint8, 2:np.uint16, 4:np.uint32 , 8:np.uint64 }}
# return typedict[pcd_type][pcd_sz]
def _build_dtype(metadata):
""" Build numpy structured array dtype from pcl metadata.
Note that fields with count > 1 are 'flattened' by creating multiple
single-count fields.
*TODO* allow 'proper' multi-count fields.
"""
fieldnames = []
typenames = []
for f, c, t, s in zip(metadata['fields'],
metadata['count'],
metadata['type'],
metadata['size']):
np_type = pcd_type_to_numpy_type[(t, s)]
if c == 1:
fieldnames.append(f)
typenames.append(np_type)
else:
fieldnames.extend(['%s_%04d' % (f, i) for i in xrange(c)])
typenames.extend([np_type]*c)
#print("Fieldnames {} Typenames {}".format(fieldnames, typenames))
dtype = np.dtype(list(zip(fieldnames, typenames)))
return dtype
def build_ascii_fmtstr(pc):
""" Make a format string for printing to ascii.
Note %.8f is minimum for rgb.
"""
fmtstr = []
for t, cnt in zip(pc.type, pc.count):
if t == 'F':
# Note: ".8g" is essential to encode RGB values e-38, https://stackoverflow.com/a/25122534
fmtstr.extend(['%.8g']*cnt)
elif t == 'I':
fmtstr.extend(['%d']*cnt)
elif t == 'U':
fmtstr.extend(['%u']*cnt)
else:
raise ValueError("don't know about type %s" % t)
return fmtstr
def parse_ascii_pc_data(f, dtype, metadata):
""" Use numpy to parse ascii pointcloud data.
"""
return np.loadtxt(f, dtype=dtype, delimiter=' ')
def parse_binary_pc_data(f, dtype, metadata):
rowstep = metadata['points']*dtype.itemsize
# for some reason pcl adds empty space at the end of files
buf = f.read(rowstep)
return np.fromstring(buf, dtype=dtype)
def parse_binary_compressed_pc_data(f, dtype, metadata):
""" Parse lzf-compressed data.
Format is undocumented but seems to be:
- compressed size of data (uint32)
- uncompressed size of data (uint32)
- compressed data
- junk
"""
fmt = 'II'
compressed_size, uncompressed_size =\
struct.unpack(fmt, f.read(struct.calcsize(fmt)))
compressed_data = f.read(compressed_size)
# TODO what to use as second argument? if buf is None
# (compressed > uncompressed)
# should we read buf as raw binary?
buf = lzf.decompress(compressed_data, uncompressed_size)
if len(buf) != uncompressed_size:
raise IOError('Error decompressing data')
# the data is stored field-by-field
pc_data = np.zeros(metadata['width'], dtype=dtype)
ix = 0
for dti in range(len(dtype)):
dt = dtype[dti]
bytes = dt.itemsize * metadata['width']
column = np.fromstring(buf[ix:(ix+bytes)], dt)
pc_data[dtype.names[dti]] = column
ix += bytes
return pc_data
def point_cloud_from_fileobj(f):
""" Parse pointcloud coming from file object f
"""
header = []
while True:
ln = f.readline().strip()
if not isinstance(ln, str):
ln = ln.decode('utf-8')
header.append(ln)
if ln.startswith('DATA'):
metadata = parse_header(header)
dtype = _build_dtype(metadata)
break
if metadata['data'] == 'ascii':
pc_data = parse_ascii_pc_data(f, dtype, metadata)
elif metadata['data'] == 'binary':
pc_data = parse_binary_pc_data(f, dtype, metadata)
elif metadata['data'] == 'binary_compressed':
pc_data = parse_binary_compressed_pc_data(f, dtype, metadata)
else:
print('DATA field is neither "ascii" or "binary" or\
"binary_compressed"')
return PointCloud(metadata, pc_data)
def point_cloud_from_path(fname):
""" load point cloud in binary format
"""
with open(fname, 'rb') as f:
pc = point_cloud_from_fileobj(f)
return pc
def point_cloud_from_buffer(buf):
fileobj = sio.StringIO(buf)
pc = point_cloud_from_fileobj(fileobj)
fileobj.close() # necessary?
return pc
def point_cloud_to_fileobj(pc, fileobj, data_compression=None):
""" Write pointcloud as .pcd to fileobj.
If data_compression is not None it overrides pc.data.
"""
metadata = pc.get_metadata()
if data_compression is not None:
data_compression = data_compression.lower()
assert(data_compression in ('ascii', 'binary', 'binary_compressed'))
metadata['data'] = data_compression
header = write_header(metadata).encode('utf-8')
fileobj.write(header)
if metadata['data'].lower() == 'ascii':
fmtstr = build_ascii_fmtstr(pc)
np.savetxt(fileobj, pc.pc_data, fmt=fmtstr)
elif metadata['data'].lower() == 'binary':
fileobj.write(pc.pc_data.tostring())
elif metadata['data'].lower() == 'binary_compressed':
# TODO
# a '_' field is ignored by pcl and breakes compressed point clouds.
# changing '_' to '_padding' or other name fixes this.
# admittedly padding shouldn't be compressed in the first place.
# reorder to column-by-column
uncompressed_lst = []
for fieldname in pc.pc_data.dtype.names:
column = np.ascontiguousarray(pc.pc_data[fieldname]).tostring()
uncompressed_lst.append(column)
uncompressed = b''.join(uncompressed_lst)
uncompressed_size = len(uncompressed)
# print("uncompressed_size = %r"%(uncompressed_size))
buf = lzf.compress(uncompressed)
if buf is None:
# compression didn't shrink the file
# TODO what do to do in this case when reading?
buf = uncompressed
compressed_size = uncompressed_size
else:
compressed_size = len(buf)
fmt = 'II'
fileobj.write(struct.pack(fmt, compressed_size, uncompressed_size))
fileobj.write(buf)
else:
raise ValueError('unknown DATA type')
# we can't close because if it's stringio buf then we can't get value after
def point_cloud_to_path(pc, fname):
with open(fname, 'wb') as f:
point_cloud_to_fileobj(pc, f)
def point_cloud_to_buffer(pc, data_compression=None):
fileobj = sio.StringIO()
point_cloud_to_fileobj(pc, fileobj, data_compression)
return fileobj.getvalue()
def save_point_cloud(pc, fname):
""" Save pointcloud to fname in ascii format.
"""
with open(fname, 'wb') as f:
point_cloud_to_fileobj(pc, f, 'ascii')
def save_point_cloud_bin(pc, fname):
""" Save pointcloud to fname in binary format.
"""
with open(fname, 'wb') as f:
point_cloud_to_fileobj(pc, f, 'binary')
def save_point_cloud_bin_compressed(pc, fname):
""" Save pointcloud to fname in binary compressed format.
"""
with open(fname, 'wb') as f:
point_cloud_to_fileobj(pc, f, 'binary_compressed')
def save_xyz_label(pc, fname, use_default_lbl=False):
""" Save a simple (x y z label) pointcloud, ignoring all other features.
Label is initialized to 1000, for an obscure program I use.
"""
md = pc.get_metadata()
if not use_default_lbl and ('label' not in md['fields']):
raise Exception('label is not a field in this point cloud')
with open(fname, 'w') as f:
for i in xrange(pc.points):
x, y, z = ['%.4f' % d for d in (
pc.pc_data['x'][i], pc.pc_data['y'][i], pc.pc_data['z'][i]
)]
lbl = '1000' if use_default_lbl else pc.pc_data['label'][i]
f.write(' '.join((x, y, z, lbl))+'\n')
def save_xyz_intensity_label(pc, fname, use_default_lbl=False):
""" Save XYZI point cloud.
"""
md = pc.get_metadata()
if not use_default_lbl and ('label' not in md['fields']):
raise Exception('label is not a field in this point cloud')
if 'intensity' not in md['fields']:
raise Exception('intensity is not a field in this point cloud')
with open(fname, 'w') as f:
for i in xrange(pc.points):
x, y, z = ['%.4f' % d for d in (
pc.pc_data['x'][i], pc.pc_data['y'][i], pc.pc_data['z'][i]
)]
intensity = '%.4f' % pc.pc_data['intensity'][i]
lbl = '1000' if use_default_lbl else pc.pc_data['label'][i]
f.write(' '.join((x, y, z, intensity, lbl))+'\n')
def save_txt(pc, fname, header=True):
""" Save to csv-style text file, separated by spaces.
TODO:
- support multi-count fields.
- other delimiters.
"""
with open(fname, 'w') as f:
if header:
header_lst = []
for field_name, cnt in zip(pc.fields, pc.count):
if cnt == 1:
header_lst.append(field_name)
else:
for c in xrange(cnt):
header_lst.append('%s_%04d' % (field_name, c))
f.write(' '.join(header_lst)+'\n')
fmtstr = build_ascii_fmtstr(pc)
np.savetxt(f, pc.pc_data, fmt=fmtstr)
def update_field(pc, field, pc_data):
""" Updates field in-place.
"""
pc.pc_data[field] = pc_data
return pc
def add_fields(pc, metadata, pc_data):
""" Builds copy of pointcloud with extra fields.
Multi-count fields are sketchy, yet again.
"""
if len(set(metadata['fields']).intersection(set(pc.fields))) > 0:
raise Exception("Fields with that name exist.")
if pc.points != len(pc_data):
raise Exception("Mismatch in number of points.")
new_metadata = pc.get_metadata()
new_metadata['fields'].extend(metadata['fields'])
new_metadata['count'].extend(metadata['count'])
new_metadata['size'].extend(metadata['size'])
new_metadata['type'].extend(metadata['type'])
# parse metadata to add
# TODO factor this
fieldnames, typenames = [], []
for f, c, t, s in zip(metadata['fields'],
metadata['count'],
metadata['type'],
metadata['size']):
np_type = pcd_type_to_numpy_type[(t, s)]
if c == 1:
fieldnames.append(f)
typenames.append(np_type)
else:
fieldnames.extend(['%s_%04d' % (f, i) for i in xrange(c)])
typenames.extend([np_type]*c)
dtype = list(zip(fieldnames, typenames))
# new dtype. could be inferred?
new_dtype = [(f, pc.pc_data.dtype[f])
for f in pc.pc_data.dtype.names] + dtype
new_data = np.empty(len(pc.pc_data), new_dtype)
for n in pc.pc_data.dtype.names:
new_data[n] = pc.pc_data[n]
for n, n_tmp in zip(fieldnames, pc_data.dtype.names):
new_data[n] = pc_data[n_tmp]
# TODO maybe just all the metadata in the dtype.
# TODO maybe use composite structured arrays for fields with count > 1
newpc = PointCloud(new_metadata, new_data)
return newpc
def cat_point_clouds(pc1, pc2):
""" Concatenate two point clouds into bigger point cloud.
Point clouds must have same metadata.
"""
if len(pc1.fields) != len(pc2.fields):
raise ValueError("Pointclouds must have same fields")
new_metadata = pc1.get_metadata()
new_data = np.concatenate((pc1.pc_data, pc2.pc_data))
# TODO this only makes sense for unstructured pc?
new_metadata['width'] = pc1.width+pc2.width
new_metadata['points'] = pc1.points+pc2.points
pc3 = PointCloud(new_metadata, new_data)
return pc3
def make_xyz_point_cloud(xyz, metadata=None):
""" Make a pointcloud object from xyz array.
xyz array is cast to float32.
"""
md = {'version': .7,
'fields': ['x', 'y', 'z'],
'size': [4, 4, 4],
'type': ['F', 'F', 'F'],
'count': [1, 1, 1],
'width': len(xyz),
'height': 1,
'viewpoint': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
'points': len(xyz),
'data': 'binary'}
if metadata is not None:
md.update(metadata)
xyz = xyz.astype(np.float32)
pc_data = xyz.view(np.dtype([('x', np.float32),
('y', np.float32),
('z', np.float32)]))
# pc_data = np.rec.fromarrays([xyz[:,0], xyz[:,1], xyz[:,2]], dtype=dt)
# data = np.rec.fromarrays([xyz.T], dtype=dt)
pc = PointCloud(md, pc_data)
return pc
def make_xyz_rgb_point_cloud(xyz_rgb, metadata=None):
""" Make a pointcloud object from xyz array.
xyz array is assumed to be float32.
rgb is assumed to be encoded as float32 according to pcl conventions.
"""
md = {'version': .7,
'fields': ['x', 'y', 'z', 'rgb'],
'count': [1, 1, 1, 1],
'width': len(xyz_rgb),
'height': 1,
'viewpoint': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
'points': len(xyz_rgb),
'type': ['F', 'F', 'F', 'F'],
'size': [4, 4, 4, 4],
'data': 'binary'}
if xyz_rgb.dtype != np.float32:
raise ValueError('array must be float32')
if metadata is not None:
md.update(metadata)
pc_data = xyz_rgb.view(np.dtype([('x', np.float32),
('y', np.float32),
('z', np.float32),
('rgb', np.float32)])).squeeze()
# pc_data = np.rec.fromarrays([xyz[:,0], xyz[:,1], xyz[:,2]], dtype=dt)
# data = np.rec.fromarrays([xyz.T], dtype=dt)
pc = PointCloud(md, pc_data)
return pc
def encode_rgb_for_pcl(rgb):
""" Encode bit-packed RGB for use with PCL.
:param rgb: Nx3 uint8 array with RGB values.
:rtype: Nx1 float32 array with bit-packed RGB, for PCL.
"""
assert(rgb.dtype == np.uint8)
assert(rgb.ndim == 2)
assert(rgb.shape[1] == 3)
rgb = rgb.astype(np.uint32)
rgb = np.array((rgb[:, 0] << 16) | (rgb[:, 1] << 8) | (rgb[:, 2] << 0),
dtype=np.uint32)
rgb.dtype = np.float32
return rgb
def decode_rgb_from_pcl(rgb):
""" Decode the bit-packed RGBs used by PCL.
:param rgb: An Nx1 array.
:rtype: Nx3 uint8 array with one column per color.
"""
rgb = rgb.copy()
rgb.dtype = np.uint32
r = np.asarray((rgb >> 16) & 255, dtype=np.uint8)
g = np.asarray((rgb >> 8) & 255, dtype=np.uint8)
b = np.asarray(rgb & 255, dtype=np.uint8)
rgb_arr = np.zeros((len(rgb), 3), dtype=np.uint8)
rgb_arr[:, 0] = r
rgb_arr[:, 1] = g
rgb_arr[:, 2] = b
return rgb_arr
def make_xyz_label_point_cloud(xyzl, label_type='f'):
""" Make XYZL point cloud from numpy array.
TODO i labels?
"""
md = {'version': .7,
'fields': ['x', 'y', 'z', 'label'],
'count': [1, 1, 1, 1],
'width': len(xyzl),
'height': 1,
'viewpoint': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
'points': len(xyzl),
'data': 'ASCII'}
if label_type.lower() == 'f':
md['size'] = [4, 4, 4, 4]
md['type'] = ['F', 'F', 'F', 'F']
elif label_type.lower() == 'u':
md['size'] = [4, 4, 4, 1]
md['type'] = ['F', 'F', 'F', 'U']
else:
raise ValueError('label type must be F or U')
# TODO use .view()
xyzl = xyzl.astype(np.float32)
dt = np.dtype([('x', np.float32), ('y', np.float32), ('z', np.float32),
('label', np.float32)])
pc_data = np.rec.fromarrays([xyzl[:, 0], xyzl[:, 1], xyzl[:, 2],
xyzl[:, 3]], dtype=dt)
pc = PointCloud(md, pc_data)
return pc
class PointCloud(object):
""" Wrapper for point cloud data.
The variable members of this class parallel the ones used by
the PCD metadata (and similar to PCL and ROS PointCloud2 messages),
``pc_data`` holds the actual data as a structured numpy array.
The other relevant metadata variables are:
- ``version``: Version, usually .7
- ``fields``: Field names, e.g. ``['x', 'y' 'z']``.
- ``size.`: Field sizes in bytes, e.g. ``[4, 4, 4]``.
- ``count``: Counts per field e.g. ``[1, 1, 1]``. NB: Multi-count field
support is sketchy.
- ``width``: Number of points, for unstructured point clouds (assumed by
most operations).
- ``height``: 1 for unstructured point clouds (again, what we assume most
of the time.
- ``viewpoint``: A pose for the viewpoint of the cloud, as
x y z qw qx qy qz, e.g. ``[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]``.
- ``points``: Number of points.
- ``type``: Data type of each field, e.g. ``[F, F, F]``.
- ``data``: Data storage format. One of ``ascii``, ``binary`` or ``binary_compressed``.
See `PCL docs <http://pointclouds.org/documentation/tutorials/pcd_file_format.php>`__
for more information.
"""
def __init__(self, metadata, pc_data):
self.metadata_keys = metadata.keys()
self.__dict__.update(metadata)
self.pc_data = pc_data
self.check_sanity()
def get_metadata(self):
""" returns copy of metadata """
metadata = {}
for k in self.metadata_keys:
metadata[k] = copy.copy(getattr(self, k))
return metadata
def check_sanity(self):
# pdb.set_trace()
md = self.get_metadata()
assert(_metadata_is_consistent(md))
assert(len(self.pc_data) == self.points)
assert(self.width*self.height == self.points)
assert(len(self.fields) == len(self.count))
assert(len(self.fields) == len(self.type))
def save(self, fname):
self.save_pcd(fname, 'ascii')
def save_pcd(self, fname, compression=None, **kwargs):
if 'data_compression' in kwargs:
warnings.warn('data_compression keyword is deprecated for'
' compression')
compression = kwargs['data_compression']
# format_py3 = 'wb' #w
format_py3 = 'wb'
with open(fname, format_py3) as f:
point_cloud_to_fileobj(self, f, compression)
def save_pcd_to_fileobj(self, fileobj, compression=None, **kwargs):
if 'data_compression' in kwargs:
warnings.warn('data_compression keyword is deprecated for'
' compression')
compression = kwargs['data_compression']
point_cloud_to_fileobj(self, fileobj, compression)
def save_pcd_to_buffer(self, compression=None, **kwargs):
if 'data_compression' in kwargs:
warnings.warn('data_compression keyword is deprecated for'
' compression')
compression = kwargs['data_compression']
return point_cloud_to_buffer(self, compression)
def save_txt(self, fname):
save_txt(self, fname)
def save_xyz_label(self, fname, **kwargs):
save_xyz_label(self, fname, **kwargs)
def save_xyz_intensity_label(self, fname, **kwargs):
save_xyz_intensity_label(self, fname, **kwargs)
def copy(self):
new_pc_data = np.copy(self.pc_data)
new_metadata = self.get_metadata()
return PointCloud(new_metadata, new_pc_data)
def to_msg(self):
if not HAS_SENSOR_MSGS:
raise Exception('ROS sensor_msgs not found')
# TODO is there some metadata we want to attach?
return numpy_pc2.array_to_pointcloud2(self.pc_data)
@staticmethod
def from_path(fname):
return point_cloud_from_path(fname)
@staticmethod
def from_fileobj(fileobj):
return point_cloud_from_fileobj(fileobj)
@staticmethod
def from_buffer(buf):
return point_cloud_from_buffer(buf)
@staticmethod
def from_array(arr):
""" create a PointCloud object from an array.
"""
pc_data = arr.copy()
md = {'version': .7,
'fields': [],
'size': [],
'count': [],
'width': 0,
'height': 1,
'viewpoint': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
'points': 0,
'type': [],
'data': 'binary_compressed'}
md['fields'] = pc_data.dtype.names
for field in md['fields']:
type_, size_ =\
numpy_type_to_pcd_type[pc_data.dtype.fields[field][0]]
md['type'].append(type_)
md['size'].append(size_)
# TODO handle multicount
md['count'].append(1)
md['width'] = len(pc_data)
md['points'] = len(pc_data)
pc = PointCloud(md, pc_data)
return pc
@staticmethod
def from_msg(msg, squeeze=True):
""" from pointcloud2 msg
squeeze: fix when clouds get 1 as first dim
"""
if not HAS_SENSOR_MSGS:
raise NotImplementedError('ROS sensor_msgs not found')
md = {'version': .7,
'fields': [],
'size': [],
'count': [],
'width': msg.width,
'height': msg.height,
'viewpoint': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
'points': 0,
'type': [],
'data': 'binary_compressed'}
for field in msg.fields:
md['fields'].append(field.name)
t, s = pc2_type_to_pcd_type[field.datatype]
md['type'].append(t)
md['size'].append(s)
# TODO handle multicount correctly
if field.count > 1:
warnings.warn('fields with count > 1 are not well tested')
md['count'].append(field.count)
pc_array = numpy_pc2.pointcloud2_to_array(msg)
pc_data = pc_array.reshape(-1)
md['height'], md['width'] = pc_array.shape
md['points'] = len(pc_data)
pc = PointCloud(md, pc_data)
return pc
| 34.764848 | 113 | 0.570726 |
79495099d22b886b8d0bea2510d554b297f1a131
| 5,572 |
py
|
Python
|
srht/blueprints/oauth.py
|
prplecake/legacy.sr.ht
|
191ba17ab59ffc9a3818712ac976e37a734f7cdc
|
[
"MIT"
] | null | null | null |
srht/blueprints/oauth.py
|
prplecake/legacy.sr.ht
|
191ba17ab59ffc9a3818712ac976e37a734f7cdc
|
[
"MIT"
] | 8 |
2021-05-15T20:33:08.000Z
|
2021-06-02T04:39:23.000Z
|
srht/blueprints/oauth.py
|
prplecake/legacy.sr.ht
|
191ba17ab59ffc9a3818712ac976e37a734f7cdc
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, render_template, abort, request, redirect, url_for, send_file, Response
from flask_login import current_user, login_user, logout_user
from srht.database import db
from srht.objects import *
from srht.common import *
from srht.config import _cfg
from datetime import datetime, timedelta, timezone
import urllib
import redis
import os
import hashlib
oauth = Blueprint('oauth', __name__, template_folder='../../templates')
@oauth.route("/oauth/clients")
@loginrequired
def clients():
return render_template("oauth-clients.html")
@oauth.route("/oauth/clients", methods=["POST"])
@loginrequired
def clients_POST():
name = request.form.get("name")
info_url = request.form.get("info_url")
redirect_uri = request.form.get("redirect_uri")
if not name or not info_url or not redirect_uri:
return render_template("oauth-clients.html", errors="All fields are required.")
if not info_url.startswith("http://") and not info_url.startswith("https://"):
return render_template("oauth-clients.html", errors="URL fields must be a URL.")
if not redirect_uri.startswith("http://") and not redirect_uri.startswith("https://"):
return render_template("oauth-clients.html", errors="URL fields must be a URL.")
if len(current_user.clients) > 10:
return render_template("oauth-clients.html", errors="You can only have 10 clients, chill out dude.")
client = OAuthClient(current_user, name, info_url, redirect_uri)
db.add(client)
db.commit()
return redirect("/oauth/clients")
@oauth.route("/oauth/clients/<secret>/regenerate")
@loginrequired
def regenerate(secret):
client = OAuthClient.query.filter(OAuthClient.client_secret == secret).first()
if not client:
abort(404)
salt = os.urandom(40)
client.client_secret = hashlib.sha256(salt).hexdigest()[:40]
db.commit()
return redirect("/oauth/clients")
@oauth.route("/oauth/clients/<secret>/revoke")
@loginrequired
def revoke_all(secret):
client = OAuthClient.query.filter(OAuthClient.client_secret == secret).first()
if not client:
abort(404)
# TODO: Revoke tokens
return redirect("/oauth/clients")
@oauth.route("/oauth/clients/<secret>/delete")
@loginrequired
def delete_client(secret):
client = OAuthClient.query.filter(OAuthClient.client_secret == secret).first()
if not client:
abort(404)
db.delete(client)
db.commit()
return redirect("/oauth/clients")
@oauth.route("/oauth/authorize")
@loginrequired
def authorize():
client_id = request.args.get("client_id")
if not client_id:
return render_template("oauth-authorize.html", errors="Missing client_id in URL")
client = OAuthClient.query.filter(OAuthClient.client_id == client_id).first()
if not client:
abort(404)
return render_template("oauth-authorize.html", client=client)
@oauth.route("/oauth/authorize", methods=["POST"])
@loginrequired
def authorize_POST():
client_id = request.form.get("client_id")
if not client_id:
return render_template("oauth-authorize.html", errors="Missing client_id")
client = OAuthClient.query.filter(OAuthClient.client_id == client_id).first()
if not client:
abort(404)
salt = os.urandom(40)
code = hashlib.sha256(salt).hexdigest()[:10]
r = redis.Redis()
r.setex("oauth.exchange.client." + code, client_id, 600) # expires in 10 minutes
r.setex("oauth.exchange.user." + code, current_user.id, 600)
params = {
"code": code
}
parts = list(urllib.parse.urlparse(client.redirect_uri))
parsed = urllib.parse.parse_qs(parts[4])
parsed.update(params)
parts[4] = urllib.parse.urlencode(parsed)
return redirect(urllib.parse.urlunparse(parts))
@oauth.route("/oauth/exchange", methods=["POST"])
@json_output
def exchange():
client_id = request.form.get("client_id")
client_secret = request.form.get("client_secret")
code = request.form.get("code")
if not client_id:
return { "error": "Missing client_id" }, 400
client = OAuthClient.query.filter(OAuthClient.client_id == client_id).first()
if not client:
return { "error": "Unknown client" }, 404
if client.client_secret != client_secret:
return { "error": "Incorrect client secret" }, 401
r = redis.Redis()
_client_id = r.get("oauth.exchange.client." + code)
user_id = r.get("oauth.exchange.user." + code)
if not client_id or not user_id:
return { "error": "Unknown or expired exchange code" }, 404
_client_id = _client_id.decode("utf-8")
user_id = int(user_id.decode("utf-8"))
user = User.query.filter(User.id == user_id).first()
if not user or _client_id != client.client_id:
return { "error": "Unknown or expired exchange code" }, 404
token = OAuthToken.query.filter(OAuthToken.client == client, OAuthToken.user == user).first()
if not token:
token = OAuthToken(user, client)
db.add(token)
db.commit()
r.delete("oauth.exchange.client." + code)
r.delete("oauth.exchange.user." + code)
return { "token": token.token }
@oauth.route("/oauth/tokens")
@loginrequired
def tokens():
return render_template("oauth-tokens.html")
@oauth.route("/oauth/tokens/<token>/revoke")
@loginrequired
def revoke_token(token):
token = OAuthToken.query.filter(OAuthToken.token == token).first()
if not token:
abort(404)
if token.user != current_user:
abort(404)
db.delete(token)
db.commit()
return redirect("/oauth/tokens")
| 35.265823 | 108 | 0.690237 |
79495100e222a9cec881385415621e9557ed6f8f
| 1,875 |
py
|
Python
|
gui/data_ingestion/migrations/0006_auto_20151217_1049.py
|
alpha-zou/TAMP
|
91f0e7b08e2d6a03b541b07dd4768bf5222044dd
|
[
"MIT"
] | 1 |
2020-03-20T06:52:07.000Z
|
2020-03-20T06:52:07.000Z
|
gui/data_ingestion/migrations/0006_auto_20151217_1049.py
|
alpha-zou/TAMP
|
91f0e7b08e2d6a03b541b07dd4768bf5222044dd
|
[
"MIT"
] | 1 |
2021-11-12T15:20:56.000Z
|
2021-11-12T15:20:56.000Z
|
gui/data_ingestion/migrations/0006_auto_20151217_1049.py
|
alpha-zou/TAMP
|
91f0e7b08e2d6a03b541b07dd4768bf5222044dd
|
[
"MIT"
] | 3 |
2019-03-10T19:56:17.000Z
|
2020-03-20T07:00:10.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('data_ingestion', '0005_auto_20151217_0751'),
]
operations = [
migrations.AlterField(
model_name='collectiontable',
name='application',
field=models.CharField(default='CL', max_length=3, verbose_name=b'Application field', choices=[(b'CL', b'Cloud'), (b'PR', b'Precipitation'), (b'OZ', b'Ozone'), (b'AE', b'Aerosol'), (b'NO', b'NO2'), (b'CH', b'CH4'), (b'OG', b'other gases'), (b'D1', b'Dummy application field 1'), (b'D2', b'Dummy application field 2'), (b'D3', b'Dummy application field 3'), (b'D4', b'Dummy application field 4')]),
preserve_default=False,
),
migrations.AlterField(
model_name='collectiontable',
name='group',
field=models.CharField(default='G1', max_length=3, verbose_name=b'Group', choices=[(b'G1', b'Group 1'), (b'G2', b'Group 2'), (b'G3', b'Group 3')]),
preserve_default=False,
),
migrations.AlterField(
model_name='collectiontable',
name='location',
field=models.FileField(default='coll', upload_to=b'colections/', verbose_name=b'Location on Server'),
preserve_default=False,
),
migrations.AlterField(
model_name='collectiontable',
name='uploaded_by',
field=models.ForeignKey(verbose_name=b'Uploaded by', blank=True, to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AlterField(
model_name='collectiontable',
name='use_case_field',
field=models.ManyToManyField(to='data_ingestion.UseCaseField', verbose_name=b'Use case'),
),
]
| 42.613636 | 409 | 0.6112 |
7949526a759e5c4ae2829522a9635915c2df8346
| 20 |
py
|
Python
|
tests/unit/test_modulegraph/testpkg-regr7/script.py
|
hawkhai/pyinstaller
|
016a24479b34de161792c72dde455a81ad4c78ae
|
[
"Apache-2.0"
] | 9,267 |
2015-01-01T04:08:45.000Z
|
2022-03-31T11:42:38.000Z
|
tests/unit/test_modulegraph/testpkg-regr7/script.py
|
jeremysanders/pyinstaller
|
321b24f9a9a5978337735816b36ca6b4a90a2fb4
|
[
"Apache-2.0"
] | 5,150 |
2015-01-01T12:09:56.000Z
|
2022-03-31T18:06:12.000Z
|
tests/unit/test_modulegraph/testpkg-regr7/script.py
|
jeremysanders/pyinstaller
|
321b24f9a9a5978337735816b36ca6b4a90a2fb4
|
[
"Apache-2.0"
] | 2,101 |
2015-01-03T10:25:27.000Z
|
2022-03-30T11:04:42.000Z
|
import pkg.mod as m
| 10 | 19 | 0.75 |
794952949420585d2ee169eb3494d5a1f7c0d3b4
| 136 |
py
|
Python
|
djparakeet/apps.py
|
misli/django-parakeet
|
0ae9af7a638f92fd51845bc10a3b72b8d34194a0
|
[
"BSD-2-Clause"
] | 12 |
2016-11-23T20:26:56.000Z
|
2021-05-27T02:23:35.000Z
|
djparakeet/apps.py
|
misli/django-parakeet
|
0ae9af7a638f92fd51845bc10a3b72b8d34194a0
|
[
"BSD-2-Clause"
] | 4 |
2019-09-23T11:45:45.000Z
|
2022-02-11T03:39:11.000Z
|
djparakeet/apps.py
|
misli/django-parakeet
|
0ae9af7a638f92fd51845bc10a3b72b8d34194a0
|
[
"BSD-2-Clause"
] | 2 |
2017-07-07T12:31:40.000Z
|
2019-09-23T11:22:52.000Z
|
from __future__ import unicode_literals
from django.apps import AppConfig
class DjparakeetConfig(AppConfig):
name = 'djparakeet'
| 17 | 39 | 0.801471 |
794952c361662f3a7530689773eef06eafcabcb0
| 6,976 |
py
|
Python
|
Lib/objc/_CoreDAV.py
|
snazari/Pyto
|
bcea7bbef35cab21ce73087b1a0c00a07d07ec72
|
[
"MIT"
] | 701 |
2018-10-22T11:54:09.000Z
|
2022-03-31T14:39:30.000Z
|
Lib/objc/_CoreDAV.py
|
snazari/Pyto
|
bcea7bbef35cab21ce73087b1a0c00a07d07ec72
|
[
"MIT"
] | 229 |
2018-10-24T09:15:31.000Z
|
2021-12-24T16:51:37.000Z
|
Lib/objc/_CoreDAV.py
|
snazari/Pyto
|
bcea7bbef35cab21ce73087b1a0c00a07d07ec72
|
[
"MIT"
] | 131 |
2018-11-25T18:33:03.000Z
|
2022-03-24T03:18:07.000Z
|
"""
Classes from the 'CoreDAV' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
CoreDAVXMLElementGenerator = _Class("CoreDAVXMLElementGenerator")
CoreDAVXMLElementAttribute = _Class("CoreDAVXMLElementAttribute")
CoreDAVParseRule = _Class("CoreDAVParseRule")
CoreDAVItemParserMapping = _Class("CoreDAVItemParserMapping")
CoreDAVXMLData_Impl = _Class("CoreDAVXMLData_Impl")
CoreDAVExpandProperty = _Class("CoreDAVExpandProperty")
CoreDAVOctetStreamParser = _Class("CoreDAVOctetStreamParser")
CoreDAVOctetStreamToFileParser = _Class("CoreDAVOctetStreamToFileParser")
CoreDAVNullParser = _Class("CoreDAVNullParser")
CoreDAVXMLData = _Class("CoreDAVXMLData")
CoreDAVRequestLogger = _Class("CoreDAVRequestLogger")
CoreDAVSRVResourceRecord = _Class("CoreDAVSRVResourceRecord")
CoreDAVLogging = _Class("CoreDAVLogging")
CoreDAVLeafDataPayload = _Class("CoreDAVLeafDataPayload")
CoreDAVItem = _Class("CoreDAVItem")
CoreDAVMatchResultsItem = _Class("CoreDAVMatchResultsItem")
CoreDAVItemWithHrefChildItem = _Class("CoreDAVItemWithHrefChildItem")
CoreDAVItemWithHrefChildItemTolerateBarePayload = _Class(
"CoreDAVItemWithHrefChildItemTolerateBarePayload"
)
CoreDAVBulkRequestsSupportedItem = _Class("CoreDAVBulkRequestsSupportedItem")
CoreDAVBulkRequestsSetItem = _Class("CoreDAVBulkRequestsSetItem")
CoreDAVBulkRequestsItem = _Class("CoreDAVBulkRequestsItem")
CoreDAVPropertySearchItem = _Class("CoreDAVPropertySearchItem")
CoreDAVItemWithHrefChildren = _Class("CoreDAVItemWithHrefChildren")
CoreDAVResourceTypeItem = _Class("CoreDAVResourceTypeItem")
CoreDAVCalendarServerEmailAddressSetItem = _Class(
"CoreDAVCalendarServerEmailAddressSetItem"
)
CoreDAVCalendarServerTransportItem = _Class("CoreDAVCalendarServerTransportItem")
CoreDAVCalendarServerPushTransportsItem = _Class(
"CoreDAVCalendarServerPushTransportsItem"
)
CoreDAVSupportedReportSetItem = _Class("CoreDAVSupportedReportSetItem")
CoreDAVSupportedReportItem = _Class("CoreDAVSupportedReportItem")
CoreDAVSetItem = _Class("CoreDAVSetItem")
CoreDAVRemoveItem = _Class("CoreDAVRemoveItem")
CoreDAVPropStatItem = _Class("CoreDAVPropStatItem")
CoreDAVPropFindItem = _Class("CoreDAVPropFindItem")
CoreDAVPropertyUpdateItem = _Class("CoreDAVPropertyUpdateItem")
CoreDAVMultiStatusItem = _Class("CoreDAVMultiStatusItem")
CoreDAVSyncReportItem = _Class("CoreDAVSyncReportItem")
CoreDAVMkcolResponseItem = _Class("CoreDAVMkcolResponseItem")
CoreDAVLeafItem = _Class("CoreDAVLeafItem")
CoreDAVHrefItem = _Class("CoreDAVHrefItem")
CoreDAVCurrentUserPrincipalItem = _Class("CoreDAVCurrentUserPrincipalItem")
CoreDAVPrincipalSearchPropertySetItem = _Class("CoreDAVPrincipalSearchPropertySetItem")
CoreDAVPrincipalSearchPropertyItem = _Class("CoreDAVPrincipalSearchPropertyItem")
CoreDAVCurrentUserPrivilegeSetItem = _Class("CoreDAVCurrentUserPrivilegeSetItem")
CoreDAVItemWithNoChildren = _Class("CoreDAVItemWithNoChildren")
CoreDAVDenyItem = _Class("CoreDAVDenyItem")
CoreDAVGrantItem = _Class("CoreDAVGrantItem")
CoreDAVPrincipalItem = _Class("CoreDAVPrincipalItem")
CoreDAVInvertItem = _Class("CoreDAVInvertItem")
CoreDAVACEItem = _Class("CoreDAVACEItem")
CoreDAVACLItem = _Class("CoreDAVACLItem")
CoreDAVResponseItem = _Class("CoreDAVResponseItem")
CoreDAVErrorItem = _Class("CoreDAVErrorItem")
CoreDAVDiscoveryAccountInfo = _Class("CoreDAVDiscoveryAccountInfo")
CoreDAVTask = _Class("CoreDAVTask")
CoreDAVBulkChangeTask = _Class("CoreDAVBulkChangeTask")
CardDAVBulkChangeTask = _Class("CardDAVBulkChangeTask")
CoreDAVMultiPutTask = _Class("CoreDAVMultiPutTask")
CardDAVMultiPutTask = _Class("CardDAVMultiPutTask")
CoreDAVHeadTask = _Class("CoreDAVHeadTask")
CoreDAVGetTask = _Class("CoreDAVGetTask")
CardDAVFolderGetTask = _Class("CardDAVFolderGetTask")
CoreDAVGetToFileTask = _Class("CoreDAVGetToFileTask")
CoreDAVACLTask = _Class("CoreDAVACLTask")
CoreDAVPrincipalSearchPropertySetTask = _Class("CoreDAVPrincipalSearchPropertySetTask")
CoreDAVCopyOrMoveTask = _Class("CoreDAVCopyOrMoveTask")
CoreDAVMoveTask = _Class("CoreDAVMoveTask")
CoreDAVCopyTask = _Class("CoreDAVCopyTask")
CoreDAVSRVLookupTask = _Class("CoreDAVSRVLookupTask")
CoreDAVPropertyFindBaseTask = _Class("CoreDAVPropertyFindBaseTask")
CoreDAVExpandPropertiesTask = _Class("CoreDAVExpandPropertiesTask")
CoreDAVPrincipalPropertySearchTask = _Class("CoreDAVPrincipalPropertySearchTask")
CoreDAVSyncReportTask = _Class("CoreDAVSyncReportTask")
CoreDAVPropFindTask = _Class("CoreDAVPropFindTask")
CoreDAVOptionsTask = _Class("CoreDAVOptionsTask")
CoreDAVActionBackedTask = _Class("CoreDAVActionBackedTask")
CoreDAVPropPatchTask = _Class("CoreDAVPropPatchTask")
CoreDAVMkcolTask = _Class("CoreDAVMkcolTask")
CoreDAVPostOrPutTask = _Class("CoreDAVPostOrPutTask")
CoreDAVPostTask = _Class("CoreDAVPostTask")
CoreDAVPutTask = _Class("CoreDAVPutTask")
CoreDAVPutStreamTask = _Class("CoreDAVPutStreamTask")
CoreDAVDeleteTask = _Class("CoreDAVDeleteTask")
CoreDAVContainerQueryTask = _Class("CoreDAVContainerQueryTask")
CardDAVFolderQueryTask = _Class("CardDAVFolderQueryTask")
CoreDAVContainerMultiGetTask = _Class("CoreDAVContainerMultiGetTask")
CoreDAVFolderContentsMultiGetTask = _Class("CoreDAVFolderContentsMultiGetTask")
CardDAVFolderMultiGetTask = _Class("CardDAVFolderMultiGetTask")
CoreDAVContainer = _Class("CoreDAVContainer")
CardDAVAddressBookContainer = _Class("CardDAVAddressBookContainer")
CoreDAVAction = _Class("CoreDAVAction")
CoreDAVOrderedAction = _Class("CoreDAVOrderedAction")
CoreDAVTaskGroup = _Class("CoreDAVTaskGroup")
CoreDAVMultiMoveWithFallbackTaskGroup = _Class("CoreDAVMultiMoveWithFallbackTaskGroup")
CoreDAVMultiGetWithFallbackTaskGroup = _Class("CoreDAVMultiGetWithFallbackTaskGroup")
CoreDAVMoveWithFallbackTaskGroup = _Class("CoreDAVMoveWithFallbackTaskGroup")
CardDAVUpdateMeCardTaskGroup = _Class("CardDAVUpdateMeCardTaskGroup")
CoreDAVBulkUploadTaskGroup = _Class("CoreDAVBulkUploadTaskGroup")
CardDAVBulkUploadTaskGroup = _Class("CardDAVBulkUploadTaskGroup")
CoreDAVValidatePrincipalsTaskGroup = _Class("CoreDAVValidatePrincipalsTaskGroup")
CoreDAVRecursiveContainerSyncTaskGroup = _Class(
"CoreDAVRecursiveContainerSyncTaskGroup"
)
CoreDAVContainerInfoSyncTaskGroup = _Class("CoreDAVContainerInfoSyncTaskGroup")
CoreDAVUpdateACLTaskGroup = _Class("CoreDAVUpdateACLTaskGroup")
CoreDAVDiscoveryTaskGroup = _Class("CoreDAVDiscoveryTaskGroup")
CoreDAVContainerSyncTaskGroup = _Class("CoreDAVContainerSyncTaskGroup")
CardDAVFolderSyncTaskGroup = _Class("CardDAVFolderSyncTaskGroup")
CoreDAVContainerInfoTaskGroup = _Class("CoreDAVContainerInfoTaskGroup")
CardDAVFolderInfoTaskGroup = _Class("CardDAVFolderInfoTaskGroup")
CoreDAVGetAccountPropertiesTaskGroup = _Class("CoreDAVGetAccountPropertiesTaskGroup")
CardDAVGetAccountPropertiesTaskGroup = _Class("CardDAVGetAccountPropertiesTaskGroup")
CoreDAVXMLParser = _Class("CoreDAVXMLParser")
| 50.550725 | 87 | 0.856795 |
7949530bcecc894df11ffb30154714a4e277e1c9
| 23,314 |
py
|
Python
|
wisdem/drivetrainse/layout.py
|
johnjasa/WISDEM
|
a4571e71cb5b9869c81790f8abb1bb7fba8fdb02
|
[
"Apache-2.0"
] | 81 |
2015-01-19T18:17:31.000Z
|
2022-03-17T07:14:43.000Z
|
wisdem/drivetrainse/layout.py
|
johnjasa/WISDEM
|
a4571e71cb5b9869c81790f8abb1bb7fba8fdb02
|
[
"Apache-2.0"
] | 159 |
2015-02-05T01:54:52.000Z
|
2022-03-30T22:44:39.000Z
|
wisdem/drivetrainse/layout.py
|
johnjasa/WISDEM
|
a4571e71cb5b9869c81790f8abb1bb7fba8fdb02
|
[
"Apache-2.0"
] | 70 |
2015-01-02T15:22:39.000Z
|
2022-02-11T00:33:07.000Z
|
#!/usr/bin/env python
# encoding: utf-8
import numpy as np
import openmdao.api as om
import wisdem.commonse.utilities as util
from scipy.special import ellipeinc
from wisdem.commonse.cross_sections import IBeam
def rod_prop(s, Di, ti, rho):
L = s.max() - s.min()
def equal_pts(xi):
if len(xi) < len(s) and len(xi) == 2:
x = np.interp((s - s.min()) / L, [0, 1], xi)
elif len(xi) == len(s):
x = xi
else:
raise ValueError("Unknown grid of input", str(xi))
return x
D = equal_pts(Di)
t = equal_pts(ti)
y = 0.25 * rho * np.pi * (D ** 2 - (D - 2 * t) ** 2)
m = np.trapz(y, s)
cm = np.trapz(y * s, s) / m
Dm = D.mean()
tm = t.mean()
I = np.array(
[
0.5 * 0.25 * (Dm ** 2 + (Dm - 2 * tm) ** 2),
(1.0 / 12.0) * (3 * 0.25 * (Dm ** 2 + (Dm - 2 * tm) ** 2) + L ** 2),
(1.0 / 12.0) * (3 * 0.25 * (Dm ** 2 + (Dm - 2 * tm) ** 2) + L ** 2),
]
)
return m, cm, m * I
class Layout(om.ExplicitComponent):
"""
Calculate lengths, heights, and diameters of key drivetrain components in a
direct drive system (valid for upwind or downwind).
Parameters
----------
upwind : boolean
Flag whether the design is upwind or downwind
L_12 : float, [m]
Length from bearing #1 to bearing #2
L_h1 : float, [m]
Length from hub / start of lss to bearing #1
L_generator : float, [m]
Generator stack width
overhang : float, [m]
Overhang of rotor from tower along x-axis in yaw-aligned c.s.
drive_height : float, [m]
Hub height above tower top
tilt : float, [deg]
Angle of drivetrain lss tilt
lss_diameter : numpy array[2], [m]
LSS outer diameter from hub to bearing 2
lss_wall_thickness : numpy array[2], [m]
LSS wall thickness
hub_diameter : float, [m]
Diameter of hub
D_top : float, [m]
Tower top outer diameter
lss_rho : float, [kg/m**3]
material density
bedplate_rho : float, [kg/m**3]
material density
Returns
-------
L_lss : float, [m]
Length of nose
L_drive : float, [m]
Length of drivetrain from bedplate to hub flang
s_lss : numpy array[5], [m]
LSS discretized s-coordinates
lss_mass : float, [kg]
LSS mass
lss_cm : float, [m]
LSS center of mass along lss axis from bedplate
lss_I : numpy array[3], [kg*m**2]
LSS moment of inertia around cm in axial (hub-aligned) c.s.
L_bedplate : float, [m]
Length of bedplate
H_bedplate : float, [m]
height of bedplate
bedplate_mass : float, [kg]
Bedplate mass
bedplate_cm : numpy array[3], [m]
Bedplate center of mass
bedplate_I : numpy array[6], [kg*m**2]
Bedplate mass moment of inertia about base
s_mb1 : float, [m]
Bearing 1 s-coordinate along drivetrain, measured from bedplate
s_mb2 : float, [m]
Bearing 2 s-coordinate along drivetrain, measured from bedplate
s_gearbox : float, [m]
Overall gearbox cm
s_generator : float, [m]
Overall generator cm
constr_length : float, [m]
Margin for drivetrain length and desired overhang distance (should be > 0)
constr_height : float, [m]
Margin for drivetrain height and desired hub height (should be > 0)
"""
def setup(self):
self.add_discrete_input("upwind", True)
self.add_input("L_12", 0.0, units="m")
self.add_input("L_h1", 0.0, units="m")
self.add_input("L_generator", 0.0, units="m")
self.add_input("overhang", 0.0, units="m")
self.add_input("drive_height", 0.0, units="m")
self.add_input("tilt", 0.0, units="deg")
self.add_input("lss_diameter", np.zeros(2), units="m")
self.add_input("lss_wall_thickness", np.zeros(2), units="m")
self.add_input("D_top", 0.0, units="m")
self.add_input("hub_diameter", val=0.0, units="m")
self.add_input("lss_rho", val=0.0, units="kg/m**3")
self.add_input("bedplate_rho", val=0.0, units="kg/m**3")
self.add_output("L_lss", 0.0, units="m")
self.add_output("L_drive", 0.0, units="m")
self.add_output("s_lss", val=np.zeros(5), units="m")
self.add_output("lss_mass", val=0.0, units="kg")
self.add_output("lss_cm", val=0.0, units="m")
self.add_output("lss_I", val=np.zeros(3), units="kg*m**2")
self.add_output("L_bedplate", 0.0, units="m")
self.add_output("H_bedplate", 0.0, units="m")
self.add_output("bedplate_mass", val=0.0, units="kg")
self.add_output("bedplate_cm", val=np.zeros(3), units="m")
self.add_output("bedplate_I", val=np.zeros(6), units="kg*m**2")
self.add_output("s_mb1", val=0.0, units="m")
self.add_output("s_mb2", val=0.0, units="m")
self.add_output("s_gearbox", val=0.0, units="m")
self.add_output("s_generator", val=0.0, units="m")
self.add_output("hss_mass", val=0.0, units="kg")
self.add_output("hss_cm", val=0.0, units="m")
self.add_output("hss_I", val=np.zeros(3), units="kg*m**2")
self.add_output("constr_length", 0.0, units="m")
self.add_output("constr_height", 0.0, units="m")
class DirectLayout(Layout):
"""
Calculate lengths, heights, and diameters of key drivetrain components in a
direct drive system (valid for upwind or downwind).
Parameters
----------
access_diameter : float, [m]
Minimum diameter required for maintenance access
nose_diameter : numpy array[2], [m]
Nose outer diameter from bearing 1 to bedplate
nose_wall_thickness : numpy array[2], [m]
Nose wall thickness
bedplate_wall_thickness : numpy array[4], [m]
Bedplate wall thickness
Returns
-------
L_nose : float, [m]
Length of nose
D_bearing1 : float, [m]
Diameter of bearing #1 (closer to hub)
D_bearing2 : float, [m]
Diameter of bearing #2 (closer to tower)
s_nose : numpy array[5], [m]
Nose discretized hub-aligned s-coordinates
nose_mass : float, [kg]
Nose mass
nose_cm : float, [m]
Nose center of mass along nose axis from bedplate
nose_I : numpy array[3], [kg*m**2]
Nose moment of inertia around cm in axial (hub-aligned) c.s.
x_bedplate : numpy array[12], [m]
Bedplate centerline x-coordinates
z_bedplate : numpy array[12], [m]
Bedplate centerline z-coordinates
x_bedplate_inner : numpy array[12], [m]
Bedplate lower curve x-coordinates
z_bedplate_inner : numpy array[12], [m]
Bedplate lower curve z-coordinates
x_bedplate_outer : numpy array[12], [m]
Bedplate outer curve x-coordinates
z_bedplate_outer : numpy array[12], [m]
Bedplate outer curve z-coordinates
D_bedplate : numpy array[12], [m]
Bedplate diameters
t_bedplate : numpy array[12], [m]
Bedplate wall thickness (mirrors input)
s_stator : float, [m]
Generator stator attachment to nose s-coordinate
s_rotor : float, [m]
Generator rotor attachment to lss s-coordinate
constr_access : numpy array[2], [m]
Margin for allowing maintenance access (should be > 0)
constr_ecc : float, [m]
Margin for bedplate ellipse eccentricity (should be > 0)
"""
def setup(self):
super().setup()
self.add_input("access_diameter", 0.0, units="m")
self.add_input("nose_diameter", np.zeros(2), units="m")
self.add_input("nose_wall_thickness", np.zeros(2), units="m")
self.add_input("bedplate_wall_thickness", np.zeros(4), units="m")
self.add_output("L_nose", 0.0, units="m")
self.add_output("D_bearing1", 0.0, units="m")
self.add_output("D_bearing2", 0.0, units="m")
self.add_output("s_nose", val=np.zeros(5), units="m")
self.add_output("nose_mass", val=0.0, units="kg")
self.add_output("nose_cm", val=0.0, units="m")
self.add_output("nose_I", val=np.zeros(3), units="kg*m**2")
self.add_output("x_bedplate", val=np.zeros(12), units="m")
self.add_output("z_bedplate", val=np.zeros(12), units="m")
self.add_output("x_bedplate_inner", val=np.zeros(12), units="m")
self.add_output("z_bedplate_inner", val=np.zeros(12), units="m")
self.add_output("x_bedplate_outer", val=np.zeros(12), units="m")
self.add_output("z_bedplate_outer", val=np.zeros(12), units="m")
self.add_output("D_bedplate", val=np.zeros(12), units="m")
self.add_output("t_bedplate", val=np.zeros(12), units="m")
self.add_output("s_stator", val=0.0, units="m")
self.add_output("s_rotor", val=0.0, units="m")
self.add_output("constr_access", np.zeros((2, 2)), units="m")
self.add_output("constr_ecc", 0.0, units="m")
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
# Unpack inputs
L_12 = float(inputs["L_12"])
L_h1 = float(inputs["L_h1"])
L_generator = float(inputs["L_generator"])
L_overhang = float(inputs["overhang"])
H_drive = float(inputs["drive_height"])
tilt = float(np.deg2rad(inputs["tilt"]))
D_access = float(inputs["access_diameter"])
D_nose = inputs["nose_diameter"]
D_lss = inputs["lss_diameter"]
D_top = float(inputs["D_top"])
D_hub = float(inputs["hub_diameter"])
t_nose = inputs["nose_wall_thickness"]
t_lss = inputs["lss_wall_thickness"]
t_bed = inputs["bedplate_wall_thickness"]
upwind = discrete_inputs["upwind"]
lss_rho = float(inputs["lss_rho"])
bedplate_rho = float(inputs["bedplate_rho"])
# ------- Discretization ----------------
L_grs = 0.5 * L_h1
L_gsn = L_generator - L_grs - L_12
L_2n = 2.0 * L_gsn
# Length of lss and nose
L_lss = L_12 + L_h1
L_nose = L_12 + L_2n
outputs["L_lss"] = L_lss
outputs["L_nose"] = L_nose
# Total length from bedplate to hub flange
ds = 0.5 * np.ones(2)
s_drive = np.cumsum(np.r_[0.0, L_2n * ds, L_12 * ds, L_h1 * ds])
L_drive = s_drive[-1]
outputs["L_drive"] = L_drive
# From Overhang input (dist from center of tower measured in yaw-aligned
# c.s.-parallel to ground), compute bedplate length and height
L_bedplate = L_overhang - (L_drive + 0.5 * D_hub) * np.cos(tilt)
constr_Ldrive = L_bedplate - 0.5 * D_top # Should be > 0
if constr_Ldrive < 0:
L_bedplate = 0.5 * D_top
H_bedplate = H_drive - (L_drive + 0.5 * D_hub) * np.sin(tilt) # Keep eccentricity under control
outputs["L_bedplate"] = L_bedplate
outputs["H_bedplate"] = H_bedplate
# Discretize the drivetrain from bedplate to hub
s_mb1 = s_drive[4]
s_mb2 = s_drive[2]
s_rotor = s_drive[-2]
s_stator = s_drive[1]
s_nose = s_drive[:5]
s_lss = s_drive[2:]
# Store outputs
# outputs['s_drive'] = np.sort(s_drive)
outputs["s_rotor"] = s_rotor
outputs["s_stator"] = s_stator
outputs["s_nose"] = s_nose
outputs["s_lss"] = s_lss
outputs["s_generator"] = 0.5 * (s_rotor + s_stator)
outputs["s_mb1"] = s_mb1
outputs["s_mb2"] = s_mb2
# ------------------------------------
# ------------ Bedplate geometry and coordinates -------------
# Define reference/centroidal axis
# Origin currently set like standard ellipse eqns, but will shift below to being at tower top
# The end point of 90 deg isn't exactly right for non-zero tilt, but leaving that for later
n_points = 12
if upwind:
rad = np.linspace(0.0, 0.5 * np.pi, n_points)
else:
rad = np.linspace(np.pi, 0.5 * np.pi, n_points)
# Make sure we have the right number of bedplate thickness points
t_bed = np.interp(rad, np.linspace(rad[0], rad[-1], len(t_bed)), t_bed)
# Centerline
x_c = L_bedplate * np.cos(rad)
z_c = H_bedplate * np.sin(rad)
# Points on the outermost ellipse
x_outer = (L_bedplate + 0.5 * D_top) * np.cos(rad)
z_outer = (H_bedplate + 0.5 * D_nose[0]) * np.sin(rad)
# Points on the innermost ellipse
x_inner = (L_bedplate - 0.5 * D_top) * np.cos(rad)
z_inner = (H_bedplate - 0.5 * D_nose[0]) * np.sin(rad)
# Cross-sectional properties
D_bed = np.sqrt((z_outer - z_inner) ** 2 + (x_outer - x_inner) ** 2)
r_bed_o = 0.5 * D_bed
r_bed_i = r_bed_o - t_bed
A_bed = np.pi * (r_bed_o ** 2 - r_bed_i ** 2)
# This finds the central angle (rad2) given the parametric angle (rad)
rad2 = np.arctan(L_bedplate / H_bedplate * np.tan(rad))
# arc length from eccentricity of the centroidal ellipse using incomplete elliptic integral of the second kind
if L_bedplate >= H_bedplate:
ecc = np.sqrt(1 - (H_bedplate / L_bedplate) ** 2)
arc = L_bedplate * np.diff(ellipeinc(rad2, ecc))
else:
ecc = np.sqrt(1 - (L_bedplate / H_bedplate) ** 2)
arc = H_bedplate * np.diff(ellipeinc(rad2, ecc))
# Mass and MoI properties
x_c_sec = util.nodal2sectional(x_c)[0]
z_c_sec = util.nodal2sectional(z_c)[0]
# R_c_sec = np.sqrt( x_c_sec**2 + z_c_sec**2 ) # unnecesary
mass = util.nodal2sectional(A_bed)[0] * arc * bedplate_rho
mass_tot = mass.sum()
cm = np.array([np.sum(mass * x_c_sec), 0.0, np.sum(mass * z_c_sec)]) / mass_tot
# For I, could do integral over sectional I, rotate axes by rad2, and then parallel axis theorem
# we simplify by assuming lumped point mass. TODO: Find a good way to check this? Torus shell?
I_bed = util.assembleI(np.zeros(6))
for k in range(len(mass)):
r_bed_o_k = 0.5 * (r_bed_o[k] + r_bed_o[k + 1])
r_bed_i_k = 0.5 * (r_bed_i[k] + r_bed_i[k + 1])
I_sec = mass[k] * np.array(
[
0.5 * (r_bed_o_k ** 2 + r_bed_i_k ** 2),
(1.0 / 12.0) * (3 * (r_bed_o_k ** 2 + r_bed_i_k ** 2) + arc[k] ** 2),
(1.0 / 12.0) * (3 * (r_bed_o_k ** 2 + r_bed_i_k ** 2) + arc[k] ** 2),
]
)
I_sec_rot = util.rotateI(I_sec, 0.5 * np.pi - rad2[k], axis="y")
R_k = np.array([x_c_sec[k] - x_c[0], 0.0, z_c_sec[k]])
I_bed += util.assembleI(I_sec_rot) + mass[k] * (np.dot(R_k, R_k) * np.eye(3) - np.outer(R_k, R_k))
# Now shift origin to be at tower top
cm[0] -= x_c[0]
x_inner -= x_c[0]
x_outer -= x_c[0]
x_c -= x_c[0]
outputs["bedplate_mass"] = mass_tot
outputs["bedplate_cm"] = cm
outputs["bedplate_I"] = util.unassembleI(I_bed)
# Geometry outputs
outputs["x_bedplate"] = x_c
outputs["z_bedplate"] = z_c
outputs["x_bedplate_inner"] = x_inner
outputs["z_bedplate_inner"] = z_inner
outputs["x_bedplate_outer"] = x_outer
outputs["z_bedplate_outer"] = z_outer
outputs["D_bedplate"] = D_bed
outputs["t_bedplate"] = t_bed
# ------------------------------------
# ------- Constraints ----------------
outputs["constr_access"] = np.c_[D_lss - 2 * t_lss - D_nose - 0.25 * D_access, D_nose - 2 * t_nose - D_access]
outputs["constr_length"] = constr_Ldrive # Should be > 0
outputs["constr_height"] = H_bedplate # Should be > 0
outputs["constr_ecc"] = L_bedplate - H_bedplate # Should be > 0
# ------------------------------------
# ------- Nose, lss, and bearing properties ----------------
# Now is a good time to set bearing diameters
outputs["D_bearing1"] = D_lss[-1] - t_lss[-1] - D_nose[0]
outputs["D_bearing2"] = D_lss[-1] - t_lss[-1] - D_nose[-1]
# Compute center of mass based on area
m_nose, cm_nose, I_nose = rod_prop(s_nose, D_nose, t_nose, bedplate_rho)
outputs["nose_mass"] = m_nose
outputs["nose_cm"] = cm_nose
outputs["nose_I"] = I_nose
m_lss, cm_lss, I_lss = rod_prop(s_lss, D_lss, t_lss, lss_rho)
outputs["lss_mass"] = m_lss
outputs["lss_cm"] = cm_lss
outputs["lss_I"] = I_lss
class GearedLayout(Layout):
"""
Calculate lengths, heights, and diameters of key drivetrain components in a
geared drive system (valid for upwind or downwind).
|_Lgen|_Lhss|Lgear|dl|_L12_|_Lh1_|
|_____Llss_____|
|--|--|--|--|--|--|--|--|--|--|--|
0 1 2 3 4 5 6 7 8 9 10 11 (indices)
mb2 mb1
Parameters
----------
hss_diameter : numpy array[2], [m]
HSS outer diameter from hub to bearing 2
hss_wall_thickness : numpy array[2], [m]
HSS wall thickness
bedplate_flange_width : float, [m]
Bedplate is two parallel I beams, this is the flange width
bedplate_flange_thickness : float, [m]
Bedplate is two parallel I beams, this is the flange thickness
bedplate_web_thickness : float, [m]
Bedplate is two parallel I beams, this is the web thickness
bedplate_web_height : float, [m]
Bedplate is two parallel I beams, this is the web height
hss_rho : float, [kg/m**3]
material density
Returns
-------
s_drive : numpy array[12], [m]
Discretized, hub-aligned s-coordinates of the drivetrain starting at
generator and ending at hub flange
s_hss : numpy array[5], [m]
HSS discretized s-coordinates
hss_mass : float, [kg]
HSS mass
hss_cm : float, [m]
HSS center of mass along hss axis from bedplate
hss_I : numpy array[3], [kg*m**2]
HSS moment of inertia around cm in axial (hub-aligned) c.s.
s_gearbox : float, [m]
Gearbox (centroid) position in s-coordinates
s_generator : float, [m]
Generator (centroid) position in s-coordinates
"""
def setup(self):
super().setup()
self.add_input("L_hss", 0.0, units="m")
self.add_input("L_gearbox", 0.0, units="m")
self.add_input("hss_diameter", np.zeros(2), units="m")
self.add_input("hss_wall_thickness", np.zeros(2), units="m")
self.add_input("hss_rho", val=0.0, units="kg/m**3")
self.add_input("bedplate_flange_width", val=0.0, units="m")
self.add_input("bedplate_flange_thickness", val=0.0, units="m")
self.add_input("bedplate_web_thickness", val=0.0, units="m")
self.add_output("s_drive", val=np.zeros(12), units="m")
self.add_output("s_hss", val=np.zeros(3), units="m")
self.add_output("bedplate_web_height", val=0.0, units="m")
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
# Unpack inputs
upwind = discrete_inputs["upwind"]
Cup = -1.0 if upwind else 1.0
L_12 = float(inputs["L_12"])
L_h1 = float(inputs["L_h1"])
L_hss = float(inputs["L_hss"])
L_gearbox = float(inputs["L_gearbox"])
L_generator = float(inputs["L_generator"])
L_overhang = float(inputs["overhang"])
H_drive = float(inputs["drive_height"])
tilt = float(np.deg2rad(inputs["tilt"]))
D_lss = inputs["lss_diameter"]
t_lss = inputs["lss_wall_thickness"]
D_hss = inputs["hss_diameter"]
t_hss = inputs["hss_wall_thickness"]
D_top = float(inputs["D_top"])
D_hub = float(inputs["hub_diameter"])
bed_w_flange = float(inputs["bedplate_flange_width"])
bed_t_flange = float(inputs["bedplate_flange_thickness"])
# bed_h_web = float(inputs['bedplate_web_height'])
bed_t_web = float(inputs["bedplate_web_thickness"])
lss_rho = float(inputs["lss_rho"])
hss_rho = float(inputs["hss_rho"])
bedplate_rho = float(inputs["bedplate_rho"])
# ------- Discretization ----------------
# Length of lss and drivetrain length
delta = 0.1 # separation between MB2 and gearbox attachment
L_lss = L_12 + L_h1 + delta
L_drive = L_lss + L_gearbox + L_hss + L_generator
ds = 0.5 * np.ones(2)
s_drive = np.cumsum(np.r_[0.0, L_generator * ds, L_hss * ds, L_gearbox * ds, delta, L_12 * ds, L_h1 * ds])
L_drive = s_drive[-1] - s_drive[0]
outputs["L_drive"] = L_drive
outputs["L_lss"] = L_lss
# Put tower at 0 position
s_tower = s_drive[-1] + 0.5 * D_hub - L_overhang / np.cos(tilt)
s_drive -= s_tower
outputs["s_drive"] = s_drive
# Discretize the drivetrain from generator to hub
s_generator = s_drive[1]
s_mb1 = s_drive[9]
s_mb2 = s_drive[7]
s_gearbox = s_drive[5]
s_lss = s_drive[6:]
s_lss = np.r_[s_lss[:-2], s_lss[-1]] # Need to stick to 5 points
s_hss = s_drive[2:5]
# Store outputs
outputs["s_generator"] = s_generator
outputs["s_gearbox"] = s_gearbox
outputs["s_mb1"] = s_mb1
outputs["s_mb2"] = s_mb2
# ------------------------------------
# ------- hss, lss, and bearing properties ----------------
# Compute center of mass based on area
m_hss, cm_hss, I_hss = rod_prop(s_hss, D_hss, t_hss, hss_rho)
outputs["hss_mass"] = m_hss
outputs["hss_cm"] = cm_hss
outputs["hss_I"] = I_hss
outputs["s_hss"] = s_hss
m_lss, cm_lss, I_lss = rod_prop(s_lss, D_lss, t_lss, lss_rho)
outputs["lss_mass"] = m_lss
outputs["lss_cm"] = cm_lss
outputs["lss_I"] = I_lss
outputs["s_lss"] = s_lss
# ------- Bedplate I-beam properties ----------------
L_bedplate = L_drive * np.cos(tilt)
H_bedplate = H_drive - (L_drive + 0.5 * D_hub) * np.sin(tilt) # Subtract thickness of platform plate
outputs["L_bedplate"] = L_bedplate
outputs["H_bedplate"] = H_bedplate
bed_h_web = H_bedplate - 2 * bed_t_flange - 0.05 # Leave some extra room for plate?
yoff = 0.25 * D_top
myI = IBeam(bed_w_flange, bed_t_flange, bed_h_web, bed_t_web)
m_bedplate = myI.Area * L_bedplate * bedplate_rho
cg_bedplate = np.r_[Cup * (L_overhang - 0.5 * L_bedplate), 0.0, myI.CG] # from tower top
I_bedplate = (
bedplate_rho * L_bedplate * np.r_[myI.Jxx, myI.Iyy, myI.Izz]
+ m_bedplate * L_bedplate ** 2 / 12.0 * np.r_[0.0, 1.0, 1.0]
+ m_bedplate * yoff ** 2 * np.r_[1.0, 0.0, 1.0]
)
outputs["bedplate_web_height"] = bed_h_web
outputs["bedplate_mass"] = 2 * m_bedplate
outputs["bedplate_cm"] = cg_bedplate
outputs["bedplate_I"] = 2 * np.r_[I_bedplate, np.zeros(3)]
# ------- Constraints ----------------
outputs["constr_length"] = (L_drive + 0.5 * D_hub) * np.cos(tilt) - L_overhang - 0.5 * D_top # Should be > 0
outputs["constr_height"] = H_bedplate # Should be > 0
# ------------------------------------
| 40.058419 | 118 | 0.581282 |
794954d32137a650cc7b5bf9ac8e657eeafcd06d
| 8,211 |
py
|
Python
|
sdk/storage/azure-storage-file-share/tests/test_handle.py
|
praveenkuttappan/azure-sdk-for-python
|
4b79413667b7539750a6c7dde15737013a3d4bd5
|
[
"MIT"
] | 2,728 |
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/storage/azure-storage-file-share/tests/test_handle.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 17,773 |
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/storage/azure-storage-file-share/tests/test_handle.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 1,916 |
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import unittest
import pytest
from azure.storage.fileshare import (
ShareServiceClient,
ShareDirectoryClient,
ShareFileClient,
ShareClient
)
from devtools_testutils.storage import StorageTestCase
from settings.testcase import FileSharePreparer
# ------------------------------------------------------------------------------
TEST_SHARE_NAME = 'test'
TEST_SHARE_PREFIX = 'share'
# ------------------------------------------------------------------------------
class StorageHandleTest(StorageTestCase):
def _setup(self, storage_account_name, storage_account_key):
file_url = self.account_url(storage_account_name, "file")
credentials = storage_account_key
self.fsc = ShareServiceClient(account_url=file_url, credential=credentials)
self.test_shares = []
# --Helpers-----------------------------------------------------------------
def _get_share_reference(self, prefix=TEST_SHARE_PREFIX):
share_name = self.get_resource_name(prefix)
share = self.fsc.get_share_client(share_name)
self.test_shares.append(share)
return share
def _create_share(self, prefix=TEST_SHARE_PREFIX):
share_client = self._get_share_reference(prefix)
share = share_client.create_share()
return share_client
def _validate_handles(self, handles):
# Assert
self.assertIsNotNone(handles)
self.assertGreaterEqual(len(handles), 1)
self.assertIsNotNone(handles[0])
# verify basic fields
# path may or may not be present
# last_connect_time_string has been missing in the test
self.assertIsNotNone(handles[0].id)
self.assertIsNotNone(handles[0].file_id)
self.assertIsNotNone(handles[0].parent_id)
self.assertIsNotNone(handles[0].session_id)
self.assertIsNotNone(handles[0].client_ip)
self.assertIsNotNone(handles[0].open_time)
@FileSharePreparer()
def test_list_handles_on_share(self, storage_account_name, storage_account_key):
# don't run live, since the test set up was highly manual
# only run when recording, or playing back in CI
if self.is_live:
pytest.skip("Cannot run in live without manual setup")
self._setup(storage_account_name, storage_account_key)
share = self.fsc.get_share_client(TEST_SHARE_NAME)
root = share.get_directory_client()
# Act
handles = list(root.list_handles(recursive=True))
# Assert
self._validate_handles(handles)
#
@FileSharePreparer()
def test_list_handles_on_share_snapshot(self, storage_account_name, storage_account_key):
# don't run live, since the test set up was highly manual
# only run when recording, or playing back in CI
if self.is_live:
pytest.skip("Cannot run in live without manual setup")
self._setup(storage_account_name, storage_account_key)
share = self.fsc.get_share_client(TEST_SHARE_NAME, snapshot="2019-05-08T23:27:24.0000000Z")
root = share.get_directory_client()
# Act
handles = list(root.list_handles(recursive=True))
# Assert
self._validate_handles(handles)
@FileSharePreparer()
def test_list_handles_with_marker(self, storage_account_name, storage_account_key):
# don't run live, since the test set up was highly manual
# only run when recording, or playing back in CI
if self.is_live:
pytest.skip("Cannot run in live without manual setup")
self._setup(storage_account_name, storage_account_key)
share = self.fsc.get_share_client(TEST_SHARE_NAME)
root = share.get_directory_client()
# Act
handle_generator = root.list_handles(recursive=True, results_per_page=1).by_page()
handles = list(next(handle_generator))
# Assert
self.assertIsNotNone(handle_generator.continuation_token)
self._validate_handles(handles)
# Note down a handle that we saw
old_handle = handles[0]
# Continue listing
remaining_handles = list(next(
root.list_handles(recursive=True).by_page(
continuation_token=handle_generator.continuation_token)
))
self._validate_handles(handles)
# Make sure the old handle did not appear
# In other words, the marker worked
old_handle_not_present = all([old_handle.id != handle.id for handle in remaining_handles])
self.assertTrue(old_handle_not_present)
@FileSharePreparer()
def test_list_handles_on_directory(self, storage_account_name, storage_account_key):
# don't run live, since the test set up was highly manual
# only run when recording, or playing back in CI
if self.is_live:
pytest.skip("Cannot run in live without manual setup")
self._setup(storage_account_name, storage_account_key)
share = self.fsc.get_share_client(TEST_SHARE_NAME)
dir = share.get_directory_client('wut')
# Act
handles = list(dir.list_handles(recursive=True))
# Assert
self._validate_handles(handles)
# Act
handles = list(dir.list_handles(recursive=False))
# Assert recursive option is functioning when disabled
self.assertTrue(len(handles) == 0)
@FileSharePreparer()
def test_list_handles_on_file(self, storage_account_name, storage_account_key):
# don't run live, since the test set up was highly manual
# only run when recording, or playing back in CI
if self.is_live:
pytest.skip("Cannot run in live without manual setup")
self._setup(storage_account_name, storage_account_key)
share = self.fsc.get_share_client(TEST_SHARE_NAME)
client = share.get_file_client('wut/bla.txt')
# Act
handles = list(client.list_handles())
# Assert
self._validate_handles(handles)
@FileSharePreparer()
def test_close_single_handle(self, storage_account_name, storage_account_key):
# don't run live, since the test set up was highly manual
# only run when recording, or playing back in CI
if self.is_live:
pytest.skip("Cannot run in live without manual setup")
self._setup(storage_account_name, storage_account_key)
share = self.fsc.get_share_client(TEST_SHARE_NAME)
root = share.get_directory_client()
handles = list(root.list_handles(recursive=True))
self._validate_handles(handles)
# Act
with self.assertRaises(ValueError):
root.close_handle('*')
handles_info = root.close_handle(handles[0])
# Assert 1 handle has been closed
self.assertEqual(1, handles_info['closed_handles_count'])
self.assertEqual(handles_info['failed_handles_count'], 0)
@FileSharePreparer()
def test_close_all_handle(self, storage_account_name, storage_account_key):
# don't run live, since the test set up was highly manual
# only run when recording, or playing back in CI
if self.is_live:
pytest.skip("Cannot run in live without manual setup")
self._setup(storage_account_name, storage_account_key)
share = self.fsc.get_share_client(TEST_SHARE_NAME)
root = share.get_directory_client()
handles = list(root.list_handles(recursive=True))
self._validate_handles(handles)
# Act
handles_info = root.close_all_handles(recursive=True)
# Assert at least 1 handle has been closed
self.assertTrue(handles_info['closed_handles_count'] > 1)
self.assertEqual(handles_info['failed_handles_count'], 0)
# ------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| 37.83871 | 99 | 0.650225 |
79495516d3d13abcd312f18b3e07647b058c417a
| 837 |
py
|
Python
|
recipes/Python/81984_Class_Adoption/recipe-81984.py
|
tdiprima/code
|
61a74f5f93da087d27c70b2efe779ac6bd2a3b4f
|
[
"MIT"
] | 2,023 |
2017-07-29T09:34:46.000Z
|
2022-03-24T08:00:45.000Z
|
recipes/Python/81984_Class_Adoption/recipe-81984.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 32 |
2017-09-02T17:20:08.000Z
|
2022-02-11T17:49:37.000Z
|
recipes/Python/81984_Class_Adoption/recipe-81984.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 780 |
2017-07-28T19:23:28.000Z
|
2022-03-25T20:39:41.000Z
|
def adopt_class(klass, obj, *args, **kwds):
'reclass obj to inherit klass; call __init__ with *args, **kwds'
classname = '%s_%s' % (klass.__name__, obj.__class__.__name__)
obj.__class__ = new.classobj(classname, (klass, obj.__class__), {})
klass.__init__(obj, *args, **kwds)
def demo():
class Sandwich:
def __init__(self, ingredients):
self.ingredients = ingredients
def __repr__(self):
return reduce((lambda a,b: a+' and '+b), self.ingredients)
class WithSpam:
def __init__(self, spam_count):
self.spam_count = spam_count
def __repr__(self):
return Sandwich.__repr__(self) + self.spam_count * ' and spam'
pbs = Sandwich(['peanut butter', 'jelly'])
adopt_class(WithSpam, pbs, 2)
print pbs
| 31 | 74 | 0.60454 |
79495605c0ef4436efbeb18efe25a05aada617b6
| 2,338 |
py
|
Python
|
world/mission.py
|
remremrem/EV-Tribute
|
c7dd412eedad4b8eba0cf2d4c95d539d4b80c852
|
[
"MIT"
] | 1 |
2015-06-23T03:48:03.000Z
|
2015-06-23T03:48:03.000Z
|
world/mission.py
|
remremrem/EV-Tribute
|
c7dd412eedad4b8eba0cf2d4c95d539d4b80c852
|
[
"MIT"
] | null | null | null |
world/mission.py
|
remremrem/EV-Tribute
|
c7dd412eedad4b8eba0cf2d4c95d539d4b80c852
|
[
"MIT"
] | null | null | null |
import missioncommander
class Mission:
def __init__(self, data):
self.idnum = data.get("idnum")
self.title = data.get("title")
self.mtype = data.get("mtype")
self.start = data.get("start")
self.finish = data.get("finish")
self.agency = data.get("agency")
self.filename = data.get("filename")
self.source = data.get("source")
self.description = data.get("description")
self.start = data.get("start")
self.finish = data.get("finish")
self.time = data.get("time")
self.cargo = data.get("cargo")
self.camount = data.get("camount")
self.passengers = data.get("passengers")
self.pamount = data.get("pamount")
self.target = data.get("target")
self.tname = data.get("tname")
self.tsystem = data.get("tsystem")
self.shiptype = data.get("shiptype")
self.escorts = data.get("escorts")
self.objective = data.get("objective")
self.payout = data.get("payout")
self.string = data.get("string")
self.stringfile = data.get("stringfile")
self.dialogues = data.get("dialogues")
class Delivery(Mission):
def __init__(self, data):
self.idnum = data.get("idnum")
self.title = data.get("title")
self.mtype = data.get("mtype")
self.start = data.get("start")
self.finish = data.get("finish")
self.agency = data.get("agency")
self.filename = data.get("filename")
self.source = data.get("source")
self.description = data.get("description")
self.start = data.get("start")
self.finish = data.get("finish")
self.time = data.get("time")
self.cargo = data.get("cargo")
self.camount = data.get("camount")
self.passengers = data.get("passengers")
self.pamount = data.get("pamount")
self.target = data.get("target")
self.tname = data.get("tname")
self.tsystem = data.get("tsystem")
self.shiptype = data.get("shiptype")
self.escorts = data.get("escorts")
self.objective = data.get("objective")
self.payout = data.get("payout")
self.string = data.get("string")
self.stringfile = data.get("stringfile")
self.dialogues = data.get("dialogues")
| 38.966667 | 50 | 0.581694 |
794956be76b9b4404613dc8d5014ef727a2548bf
| 4,105 |
py
|
Python
|
test/test_odict.py
|
droope/netlib
|
a3107474f9f336f28dc195f1406a4e035aa51c84
|
[
"MIT"
] | null | null | null |
test/test_odict.py
|
droope/netlib
|
a3107474f9f336f28dc195f1406a4e035aa51c84
|
[
"MIT"
] | null | null | null |
test/test_odict.py
|
droope/netlib
|
a3107474f9f336f28dc195f1406a4e035aa51c84
|
[
"MIT"
] | null | null | null |
from netlib import odict
import tutils
class TestODict:
def setUp(self):
self.od = odict.ODict()
def test_str_err(self):
h = odict.ODict()
tutils.raises(ValueError, h.__setitem__, "key", "foo")
def test_dictToHeader1(self):
self.od.add("one", "uno")
self.od.add("two", "due")
self.od.add("two", "tre")
expected = [
"one: uno\r\n",
"two: due\r\n",
"two: tre\r\n",
"\r\n"
]
out = repr(self.od)
for i in expected:
assert out.find(i) >= 0
def test_getset_state(self):
self.od.add("foo", 1)
self.od.add("foo", 2)
self.od.add("bar", 3)
state = self.od._get_state()
nd = odict.ODict._from_state(state)
assert nd == self.od
nd._load_state(state)
def test_dictToHeader2(self):
self.od["one"] = ["uno"]
expected1 = "one: uno\r\n"
expected2 = "\r\n"
out = repr(self.od)
assert out.find(expected1) >= 0
assert out.find(expected2) >= 0
def test_match_re(self):
h = odict.ODict()
h.add("one", "uno")
h.add("two", "due")
h.add("two", "tre")
assert h.match_re("uno")
assert h.match_re("two: due")
assert not h.match_re("nonono")
def test_in_any(self):
self.od["one"] = ["atwoa", "athreea"]
assert self.od.in_any("one", "two")
assert self.od.in_any("one", "three")
assert not self.od.in_any("one", "four")
assert not self.od.in_any("nonexistent", "foo")
assert not self.od.in_any("one", "TWO")
assert self.od.in_any("one", "TWO", True)
def test_iter(self):
assert not [i for i in self.od]
self.od.add("foo", 1)
assert [i for i in self.od]
def test_keys(self):
assert not self.od.keys()
self.od.add("foo", 1)
assert self.od.keys() == ["foo"]
self.od.add("foo", 2)
assert self.od.keys() == ["foo"]
self.od.add("bar", 2)
assert len(self.od.keys()) == 2
def test_copy(self):
self.od.add("foo", 1)
self.od.add("foo", 2)
self.od.add("bar", 3)
assert self.od == self.od.copy()
def test_del(self):
self.od.add("foo", 1)
self.od.add("Foo", 2)
self.od.add("bar", 3)
del self.od["foo"]
assert len(self.od.lst) == 2
def test_replace(self):
self.od.add("one", "two")
self.od.add("two", "one")
assert self.od.replace("one", "vun") == 2
assert self.od.lst == [
["vun", "two"],
["two", "vun"],
]
def test_get(self):
self.od.add("one", "two")
assert self.od.get("one") == ["two"]
assert self.od.get("two") == None
def test_get_first(self):
self.od.add("one", "two")
self.od.add("one", "three")
assert self.od.get_first("one") == "two"
assert self.od.get_first("two") == None
class TestODictCaseless:
def setUp(self):
self.od = odict.ODictCaseless()
def test_override(self):
o = odict.ODictCaseless()
o.add('T', 'application/x-www-form-urlencoded; charset=UTF-8')
o["T"] = ["foo"]
assert o["T"] == ["foo"]
def test_case_preservation(self):
self.od["Foo"] = ["1"]
assert "foo" in self.od
assert self.od.items()[0][0] == "Foo"
assert self.od.get("foo") == ["1"]
assert self.od.get("foo", [""]) == ["1"]
assert self.od.get("Foo", [""]) == ["1"]
assert self.od.get("xx", "yy") == "yy"
def test_del(self):
self.od.add("foo", 1)
self.od.add("Foo", 2)
self.od.add("bar", 3)
del self.od["foo"]
assert len(self.od) == 1
def test_keys(self):
assert not self.od.keys()
self.od.add("foo", 1)
assert self.od.keys() == ["foo"]
self.od.add("Foo", 2)
assert self.od.keys() == ["foo"]
self.od.add("bar", 2)
assert len(self.od.keys()) == 2
| 28.310345 | 70 | 0.503045 |
794956d0c37bace0066b09d1821415fa813dc738
| 5,568 |
py
|
Python
|
wrappers/tensorflow/example5 - denoise.py
|
NobuoTsukamoto/librealsense
|
bc0910f8ba3c33307ff247a29dd2b9e9ef1b269d
|
[
"Apache-2.0"
] | 6,457 |
2016-01-21T03:56:07.000Z
|
2022-03-31T11:57:15.000Z
|
wrappers/tensorflow/example5 - denoise.py
|
NobuoTsukamoto/librealsense
|
bc0910f8ba3c33307ff247a29dd2b9e9ef1b269d
|
[
"Apache-2.0"
] | 8,393 |
2016-01-21T09:47:28.000Z
|
2022-03-31T22:21:42.000Z
|
wrappers/tensorflow/example5 - denoise.py
|
NobuoTsukamoto/librealsense
|
bc0910f8ba3c33307ff247a29dd2b9e9ef1b269d
|
[
"Apache-2.0"
] | 4,874 |
2016-01-21T09:20:08.000Z
|
2022-03-31T15:18:00.000Z
|
import pyrealsense2 as rs
import numpy as np
import cv2
from tensorflow import keras
import time, sys
# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 848, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.infrared, 1, 848, 480, rs.format.y8, 30) # 1 for left frame
# Start streaming
pipeline.start(config)
channels = 2
cropped_w, cropped_h = 480, 480
test_model_name = ""
if (len(sys.argv) > 1):
test_model_name = str(sys.argv[1])
t1 = time.perf_counter()
model = keras.models.load_model(test_model_name)
t2 = time.perf_counter()
print('model loading : ', t2 - t1, 'seconds')
def predict(noisy_image, ir_image):
t1 = time.perf_counter()
ir_image = np.array(ir_image).astype("uint16")
cropped_ir , cropped_noisy = [], []
width, height = 848, 480
w, h = cropped_w, cropped_h
for col_i in range(0, width, w):
for row_i in range(0, height, h):
cropped_ir.append(ir_image[row_i:row_i+h, col_i:col_i+w])
cropped_noisy.append(noisy_image[row_i:row_i+h, col_i:col_i+w])
# fill with zero to get size 480x480 for both images
fill = np.zeros((h, w - cropped_ir[-1].shape[1]), dtype="uint16")
cropped_ir[-1] = np.hstack((cropped_ir[-1], fill))
cropped_noisy[-1] = np.hstack((cropped_noisy[-1], fill))
t2 = time.perf_counter()
print('image cropping : ', t2 - t1, 'seconds')
cropped_image_offsets = [(0,0), (0,480)]
whole_image = np.zeros((height, width, channels), dtype="float32")
for i in range(len(cropped_ir)):
t1 = time.perf_counter()
noisy_images_plt = cropped_noisy[i].reshape(1, cropped_w, cropped_h, 1)
ir_images_plt = cropped_ir[i].reshape(1, cropped_w, cropped_h, 1)
im_and_ir = np.stack((noisy_images_plt, ir_images_plt), axis=3)
im_and_ir = im_and_ir.reshape(1, cropped_w, cropped_h, channels)
img = np.array(im_and_ir)
# Parse numbers as floats
img = img.astype('float32')
# Normalize data : remove average then devide by standard deviation
img = img / 65535
sample = img
row, col = cropped_image_offsets[i]
t2 = time.perf_counter()
print('image channeling : ', t2 - t1, 'seconds')
t1 = time.perf_counter()
denoised_image = model.predict(sample)
t2 = time.perf_counter()
print('prediction only : ', t2 - t1, 'seconds')
row_end = row + cropped_h
col_end = col + cropped_w
denoised_row = cropped_h
denoised_col = cropped_w
if row + cropped_h >= height:
row_end = height - 1
denoised_row = abs(row - row_end)
if col + cropped_w >= width:
col_end = width - 1
denoised_col = abs(col - col_end)
# combine tested images
whole_image[row:row_end, col:col_end] = denoised_image[:, 0:denoised_row, 0:denoised_col, :]
return whole_image[:, :, 0]
#=============================================================================================================
def convert_image(i):
m = np.min(i)
M = np.max(i)
i = np.divide(i, np.array([M - m], dtype=np.float)).astype(np.float)
i = (i - m).astype(np.float)
i8 = (i * 255.0).astype(np.uint8)
if i8.ndim == 3:
i8 = cv2.cvtColor(i8, cv2.COLOR_BGRA2GRAY)
i8 = cv2.equalizeHist(i8)
colorized = cv2.applyColorMap(i8, cv2.COLORMAP_JET)
colorized[i8 == int(m)] = 0
font = cv2.FONT_HERSHEY_SIMPLEX
m = float("{:.2f}".format(m))
M = float("{:.2f}".format(M))
colorized = cv2.putText(colorized, str(m) + " .. " + str(M) + "[m]", (20, 50), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
return colorized
try:
c = rs.colorizer()
while True:
print("==============================================================")
t0 = time.perf_counter()
# Wait for a coherent pair of frames: depth and ir
t1 = time.perf_counter()
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
ir_frame = frames.get_infrared_frame()
t2 = time.perf_counter()
print('getting depth + ir frames : ', t2 - t1, 'seconds')
if not depth_frame or not ir_frame:
continue
# Convert images to numpy arrays
t1 = time.perf_counter()
depth_image = np.asanyarray(depth_frame.get_data())
ir_image = np.asanyarray(ir_frame.get_data())
t2 = time.perf_counter()
print('convert frames to numpy arrays : ', t2 - t1, 'seconds')
t1 = time.perf_counter()
predicted_image = predict(depth_image, ir_image)
t2 = time.perf_counter()
print('processing + prediction : ', t2 - t1, 'seconds')
# Stack both images horizontally
# depth_image = convert_image(depth_image)
t1 = time.perf_counter()
depth_image = np.asanyarray(c.process(depth_frame).get_data())
predicted_image = convert_image(predicted_image)
red = depth_image[:, :, 2].copy()
blue = depth_image[:, :, 0].copy()
depth_image[:, :, 0] = red
depth_image[:, :, 2] = blue
images = np.hstack((depth_image, predicted_image))
# Show images
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('RealSense', images)
cv2.waitKey(1)
t2 = time.perf_counter()
print('show image : ', t2 - t1, 'seconds')
print('TOTAL TIME : ', t2 - t0, 'seconds')
finally:
# Stop streaming
pipeline.stop()
| 36.155844 | 124 | 0.598958 |
7949577d9aee12cfd5cc307b16f5fc6d0c6206e3
| 412 |
py
|
Python
|
tests/pyre.pkg/config/configurator.py
|
rtburns-jpl/pyre
|
ffc4fc1b2936e355f709d084eb4055954960b3a2
|
[
"BSD-3-Clause"
] | null | null | null |
tests/pyre.pkg/config/configurator.py
|
rtburns-jpl/pyre
|
ffc4fc1b2936e355f709d084eb4055954960b3a2
|
[
"BSD-3-Clause"
] | 1 |
2021-06-10T23:42:13.000Z
|
2021-06-10T23:42:13.000Z
|
tests/pyre.pkg/config/configurator.py
|
jlmaurer/pyre
|
6af38a83621d7d6228d147b4bb94f97fbb10f6e2
|
[
"BSD-3-Clause"
] | 2 |
2020-08-31T18:07:52.000Z
|
2021-12-10T08:54:39.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2020 all rights reserved
#
"""
Sanity check: verify that the configurator factory is accessible
"""
def test():
# access the package
import pyre
# and return the configurator built by the executive
return pyre.executive.configurator
# main
if __name__ == "__main__":
test()
# end of file
| 14.714286 | 64 | 0.667476 |
79495994e36f58dbc41b2b957f3cee7a91d51e4d
| 5,097 |
py
|
Python
|
python/oneflow/test/modules/test_sub.py
|
LiPengze97/oneflow
|
1c1d2d3faa1c02d20e009046a290cf1095ee12e0
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/test/modules/test_sub.py
|
LiPengze97/oneflow
|
1c1d2d3faa1c02d20e009046a290cf1095ee12e0
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/test/modules/test_sub.py
|
LiPengze97/oneflow
|
1c1d2d3faa1c02d20e009046a290cf1095ee12e0
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from oneflow.test_utils.automated_test_util import *
from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
def _test_sub_impl(test_case, shape, device):
x = flow.tensor(
np.random.randn(*shape),
dtype=flow.float32,
device=flow.device(device),
requires_grad=True,
)
y = flow.tensor(
np.random.randn(*shape),
dtype=flow.float32,
device=flow.device(device),
requires_grad=True,
)
of_out = flow.sub(x, y)
np_out = np.subtract(x.numpy(), y.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
of_out = of_out.sum()
of_out.backward()
np_grad_x = np.ones(shape)
np_grad_y = -np.ones(shape)
test_case.assertTrue(np.allclose(x.grad.numpy(), np_grad_x, 1e-05, 1e-05))
test_case.assertTrue(np.allclose(y.grad.numpy(), np_grad_y, 1e-05, 1e-05))
x = 5
y = flow.tensor(
np.random.randn(*shape), dtype=flow.float32, device=flow.device(device)
)
of_out = flow.sub(x, y)
np_out = np.subtract(x, y.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
x = flow.tensor(
np.random.randn(*shape), dtype=flow.float32, device=flow.device(device)
)
y = 5
of_out = flow.sub(x, y)
np_out = np.subtract(x.numpy(), y)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
x = flow.tensor(
np.random.randn(*shape), dtype=flow.float32, device=flow.device(device)
)
y = flow.tensor(
np.random.randn(1, 1), dtype=flow.float32, device=flow.device(device)
)
of_out = flow.sub(x, y)
np_out = np.subtract(x.numpy(), y.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
x = flow.tensor(np.array([5.0]), dtype=flow.float32)
y = flow.tensor(np.random.randn(1, 1), dtype=flow.float32)
of_out = flow.sub(x, y)
np_out = np.subtract(x.numpy(), y.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
x = flow.tensor(np.random.randn(1, 1), dtype=flow.float32, requires_grad=True)
y = flow.tensor(np.array([5.0]), dtype=flow.float32, requires_grad=True)
of_out = flow.sub(x, y)
np_out = np.subtract(x.numpy(), y.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
of_out = of_out.sum()
of_out.backward()
np_grad_x = np.ones((1, 1))
np_grad_y = -np.ones(1)
test_case.assertTrue(np.allclose(x.grad.numpy(), np_grad_x, 1e-05, 1e-05))
test_case.assertTrue(np.allclose(y.grad.numpy(), np_grad_y, 1e-05, 1e-05))
@flow.unittest.skip_unless_1n1d()
class TestSubModule(flow.unittest.TestCase):
def test_sub(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(2, 3), (2, 3, 4), (2, 4, 5, 6)]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
_test_sub_impl(test_case, *arg)
def test_sub_against_pytorch(test_case):
arg_dict = OrderedDict()
arg_dict["test_type"] = [test_flow_against_pytorch, test_tensor_against_pytorch]
arg_dict["device"] = ["cpu", "cuda"]
arg_dict["op"] = ["sub"]
for arg in GenArgList(arg_dict):
arg[0](
test_case,
arg[2],
extra_annotations={"other": flow.Tensor},
extra_generators={
"input": random_tensor(ndim=2, dim0=2, dim1=3),
"other": random_tensor(ndim=2, dim0=2, dim1=3),
},
device=arg[1],
)
arg[0](
test_case,
arg[2],
extra_annotations={"other": float},
extra_generators={
"input": random_tensor(ndim=2, dim0=2, dim1=3),
"other": random(0, 5),
},
device=arg[1],
)
@autotest(auto_backward=False, check_graph=False)
def test_sub_with_0shape_data(test_case):
device = random_device()
x = random_pytorch_tensor(2, 0, 3).to(device)
y = random_pytorch_tensor(2, 1, 3).to(device)
out1 = x - y
out2 = x - 2
out3 = 2 - x
out4 = torch.sub(x, y)
return out1, out2, out3, out4
if __name__ == "__main__":
unittest.main()
| 35.395833 | 88 | 0.620954 |
794959a0687f981afccc92ffcd73474a32a38538
| 3,786 |
py
|
Python
|
tap_list_providers/parsers/thenook.py
|
it-avenger/hsv-beer
|
039611bbcf260ad24c9a829fca0af96b4c7da014
|
[
"Apache-2.0"
] | null | null | null |
tap_list_providers/parsers/thenook.py
|
it-avenger/hsv-beer
|
039611bbcf260ad24c9a829fca0af96b4c7da014
|
[
"Apache-2.0"
] | 6 |
2020-08-03T09:50:01.000Z
|
2021-06-10T18:17:28.000Z
|
tap_list_providers/parsers/thenook.py
|
cartacode/hsv_dot_beer
|
039611bbcf260ad24c9a829fca0af96b4c7da014
|
[
"Apache-2.0"
] | null | null | null |
"""HTML scraper for The Nook"""
from decimal import Decimal
import logging
import os
from bs4 import BeautifulSoup
import requests
import configurations
from django.core.exceptions import ImproperlyConfigured, AppRegistryNotReady
# boilerplate code necessary for launching outside manage.py
try:
from ..base import BaseTapListProvider
except (ImproperlyConfigured, AppRegistryNotReady):
os.environ['DJANGO_SETTINGS_MODULE'] = 'hsv_dot_beer.config'
os.environ.setdefault("DJANGO_CONFIGURATION", "Local")
configurations.setup()
from ..base import BaseTapListProvider
from beers.models import Manufacturer
from taps.models import Tap
LOG = logging.getLogger(__name__)
class NookParser(BaseTapListProvider):
"""Parser for The Nook's static HTML page
Note: because The Nook uses rows inside of columns instead of the more
common reverse, we essentially have to look up the components we need
separately and then zip() them together.
"""
provider_name = 'nook_html'
NAME_COLUMN_ID = 'id9'
STYLE_COLUMN_ID = 'id10'
BREWERY_COLUMN_ID = 'id12'
ABV_COLUMN_ID = 'id17'
def __init__(self):
self.parser = None
super().__init__()
def fetch_html(self, url):
self.parser = BeautifulSoup(requests.get(url).content, 'html.parser')
def dump_html(self):
print(self.parser.prettify())
def get_names(self):
return list(
i.contents[0].replace('*', '').strip() for i in
self.parser.find(id=self.NAME_COLUMN_ID).find_all('p')
)
def get_abvs(self):
return list(
Decimal(i.contents[0].strip())
# weirdly, the ABVs are in spans
for i in self.parser.find(id=self.ABV_COLUMN_ID).find_all('span')
)
def get_manufacturers(self):
return list(
i.contents[0] for i in
self.parser.find(id=self.BREWERY_COLUMN_ID).find_all('p')
)
def get_styles(self):
return list(
i.contents[0].strip()
for i in self.parser.find(id=self.STYLE_COLUMN_ID).find_all('p')
)
def venue_details(self):
return enumerate(zip(
self.get_names(), self.get_manufacturers(), self.get_abvs(),
self.get_styles(),
))
def handle_venue(self, venue):
url = venue.api_configuration.url
self.fetch_html(url)
taps = {tap.tap_number: tap for tap in venue.taps.all()}
manufacturers = {mfg.name: mfg for mfg in Manufacturer.objects.filter(
name__in=self.get_manufacturers()
)}
for index, (name, mfg, abv, style) in self.venue_details():
tap_number = index + 1
# 1. get the tap
try:
tap = taps[tap_number]
except KeyError:
tap = Tap(venue=venue, tap_number=tap_number)
# 2. get the mfg
try:
manufacturer = manufacturers[mfg]
except KeyError:
manufacturer = self.get_manufacturer(name=mfg)
manufacturers[manufacturer.name] = manufacturer
# 3. get the beer
beer = self.get_beer(
name, manufacturer, abv=abv, style=style,
)
if tap.beer_id != beer.id:
tap.beer = beer
# only save if beer changed so as not to disturb updated time
LOG.debug('Saving %s on tap %s', beer, tap.tap_number)
tap.save()
else:
LOG.debug(
'Not saving changes to beer %s on tap %s', beer, tap.tap_number,
)
| 32.637931 | 85 | 0.588484 |
79495a9d94443bf9897dd4195fa123d37b177244
| 762 |
py
|
Python
|
generators/skip.py
|
CodyKochmann/generators
|
a637bf9cb5e48251aa800753ba0aa79b3ca18dcf
|
[
"MIT"
] | 6 |
2017-12-21T04:32:35.000Z
|
2022-02-15T07:06:45.000Z
|
generators/skip.py
|
CodyKochmann/generators
|
a637bf9cb5e48251aa800753ba0aa79b3ca18dcf
|
[
"MIT"
] | 21 |
2017-09-08T13:02:18.000Z
|
2020-03-28T19:10:01.000Z
|
generators/skip.py
|
CodyKochmann/generators
|
a637bf9cb5e48251aa800753ba0aa79b3ca18dcf
|
[
"MIT"
] | 2 |
2018-09-30T16:16:10.000Z
|
2019-05-06T02:16:11.000Z
|
# -*- coding: utf-8 -*-
# @Author: Cody Kochmann
# @Date: 2018-02-17 10:40:09
# @Last Modified by: Cody Kochmann
# @Last Modified time: 2018-02-17 11:59:23
from itertools import islice
def skip(pipe, how_many=1):
''' this is a helper function that allows you to skip x number of items
in a pipe. its basically the same is running next() on a generator
multiple times to move down the generator's stream.
The return value is the pipe that has now skipped x number of steps
'''
for _ in islice(pipe, how_many):
pass
return pipe
if __name__ == '__main__':
g = iter(range(10))
print(next(g))
print(next(g))
print(next(g))
skip(g,3)
print(next(g))
print(next(g))
print(next(g))
| 25.4 | 75 | 0.632546 |
79495b02cafecb3fedc0a62f7079efa01338e191
| 438 |
py
|
Python
|
examples/auto_connect.py
|
stickpin/pyatv
|
cb45bf5d303593a4e0be05215f21140e2fbbb03d
|
[
"MIT"
] | null | null | null |
examples/auto_connect.py
|
stickpin/pyatv
|
cb45bf5d303593a4e0be05215f21140e2fbbb03d
|
[
"MIT"
] | null | null | null |
examples/auto_connect.py
|
stickpin/pyatv
|
cb45bf5d303593a4e0be05215f21140e2fbbb03d
|
[
"MIT"
] | null | null | null |
"""Simple example that connects to a device with autodiscover."""
from pyatv import helpers
# Method that is dispatched by the asyncio event loop
async def print_what_is_playing(atv):
"""Print what is playing for the discovered device."""
playing = await atv.metadata.playing()
print('Currently playing:')
print(playing)
# logout is automatically performed by auto_connect
helpers.auto_connect(print_what_is_playing)
| 27.375 | 65 | 0.762557 |
79495bce3c41b8e34fdaa67622cac6da4443a55f
| 211 |
py
|
Python
|
Recursion/CodingNinjas/15_replace_pi.py
|
sounak95/100_days_of_code
|
50fbf088ce6ab2137aa216a30e3b3f828b278a22
|
[
"Apache-2.0"
] | null | null | null |
Recursion/CodingNinjas/15_replace_pi.py
|
sounak95/100_days_of_code
|
50fbf088ce6ab2137aa216a30e3b3f828b278a22
|
[
"Apache-2.0"
] | null | null | null |
Recursion/CodingNinjas/15_replace_pi.py
|
sounak95/100_days_of_code
|
50fbf088ce6ab2137aa216a30e3b3f828b278a22
|
[
"Apache-2.0"
] | null | null | null |
def replacePi(s):
if len(s)==1 or len(s)==0:
return s
if s.startswith("pi"):
return "3.14" + replacePi(s[2:])
else:
return s[0]+ replacePi(s[1:])
print(replacePi("pippi"))
| 17.583333 | 40 | 0.526066 |
79495cbef9be30d277aaf21f7705033679fce11a
| 663 |
py
|
Python
|
0623_session/project/blog/migrations/0001_initial.py
|
olzlgur/Like_lion
|
ac55cd5a0dd81863cb9481b1c7635d629d409660
|
[
"MIT"
] | null | null | null |
0623_session/project/blog/migrations/0001_initial.py
|
olzlgur/Like_lion
|
ac55cd5a0dd81863cb9481b1c7635d629d409660
|
[
"MIT"
] | null | null | null |
0623_session/project/blog/migrations/0001_initial.py
|
olzlgur/Like_lion
|
ac55cd5a0dd81863cb9481b1c7635d629d409660
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.4 on 2021-06-23 08:43
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('writer', models.CharField(max_length=100)),
('pub_date', models.DateTimeField(auto_now=True)),
('body', models.TextField()),
],
),
]
| 26.52 | 117 | 0.562594 |
79495e072504b3a9a4fdbdc75efb447f2d53d9e9
| 1,794 |
py
|
Python
|
setup.py
|
yottahawk/edalize
|
f656a6897740359605a5270566359bd2739fe32e
|
[
"BSD-2-Clause"
] | 2 |
2020-01-14T03:14:55.000Z
|
2022-02-18T16:55:36.000Z
|
setup.py
|
yottahawk/edalize
|
f656a6897740359605a5270566359bd2739fe32e
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
yottahawk/edalize
|
f656a6897740359605a5270566359bd2739fe32e
|
[
"BSD-2-Clause"
] | null | null | null |
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "edalize",
version = "0.1.6",
packages=['edalize'],
package_data = {'edalize' : [
'templates/icestorm/icestorm-makefile.j2',
'templates/spyglass/Makefile.j2',
'templates/spyglass/spyglass-project.prj.j2',
'templates/spyglass/spyglass-run-goal.tcl.j2',
'templates/vcs/Makefile.j2',
'templates/vivado/vivado-makefile.j2',
'templates/vivado/vivado-program.tcl.j2',
'templates/vivado/vivado-project.tcl.j2',
'templates/vivado/vivado-run.tcl.j2',
'templates/vivado/vivado-synth.tcl.j2',
'templates/quartus/quartus-project.tcl.j2',
'templates/quartus/quartus-std-makefile.j2',
'templates/quartus/quartus-pro-makefile.j2',
'templates/trellis/trellis-makefile.j2',
'templates/ascentlint/Makefile.j2',
'templates/ascentlint/run-ascentlint.tcl.j2',
]},
author = "Olof Kindgren",
author_email = "olof.kindgren@gmail.com",
description = ("Edalize is a library for interfacing EDA tools, primarily for FPGA development"),
license = "BSD-2-Clause",
keywords = ["VHDL", "verilog", "EDA", "hdl", "rtl", "synthesis", "FPGA", "simulation", "Xilinx", "Altera"],
url = "https://github.com/olofk/edalize",
long_description=read('README.rst'),
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: BSD License",
"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)",
"Topic :: Utilities",
],
install_requires=[
'pytest>=3.3.0',
'Jinja2>=2.8',
],
tests_require=[
'pyyaml',
],
)
| 35.88 | 111 | 0.625975 |
79496058161f29136c35f9bab2fc8957d30eae38
| 15,445 |
py
|
Python
|
test/test_cli.py
|
petermarko/dxf
|
561433b69be585138c06d7b34e669c868495887f
|
[
"MIT"
] | null | null | null |
test/test_cli.py
|
petermarko/dxf
|
561433b69be585138c06d7b34e669c868495887f
|
[
"MIT"
] | null | null | null |
test/test_cli.py
|
petermarko/dxf
|
561433b69be585138c06d7b34e669c868495887f
|
[
"MIT"
] | null | null | null |
import os
import sys
import errno
import time
import hashlib
import requests.exceptions
import pytest
import tqdm
import dxf.main
# pylint: disable=no-member
def test_empty(dxf_main, capsys):
assert dxf.main.doit(['list-repos'], dxf_main) == 0
out, err = capsys.readouterr()
assert out == ""
assert err == ""
def _not_found(dxf_main, name):
assert dxf.main.doit(['blob-size', pytest.repo, name], dxf_main) == errno.ENOENT
def test_not_found(dxf_main):
_not_found(dxf_main, pytest.blob1_hash)
_not_found(dxf_main, pytest.blob2_hash)
_not_found(dxf_main, '@fooey')
def test_push_blob(dxf_main, capsys):
assert dxf.main.doit(['push-blob', pytest.repo, pytest.blob1_file], dxf_main) == 0
out, err = capsys.readouterr()
assert out == pytest.blob1_hash + os.linesep
assert err == ""
assert dxf.main.doit(['push-blob', pytest.repo, pytest.blob2_file], dxf_main) == 0
out, err = capsys.readouterr()
assert out == pytest.blob2_hash + os.linesep
assert err == ""
assert dxf.main.doit(['get-alias', pytest.repo, 'fooey'], dxf_main) == errno.ENOENT
out, err = capsys.readouterr()
assert out == ""
assert err.index('Not Found') >= 0
assert dxf.main.doit(['push-blob', pytest.repo, pytest.blob1_file, '@fooey'], dxf_main) == 0
out, err = capsys.readouterr()
assert out == pytest.blob1_hash + os.linesep
assert err == ""
assert dxf.main.doit(['get-alias', pytest.repo, 'fooey'], dxf_main) == 0
out, err = capsys.readouterr()
assert out == pytest.blob1_hash + os.linesep
assert err == ""
assert dxf.main.doit(['list-repos'], dxf_main) == 0
out, err = capsys.readouterr()
assert out == pytest.repo + os.linesep
assert err == ""
def _pull_blob(dxf_main, name, dgst, capfd):
assert dxf.main.doit(['pull-blob', pytest.repo, name], dxf_main) == 0
# pylint: disable=protected-access
capfd._capture.out.tmpfile.encoding = None
out, err = capfd.readouterr()
sha256 = hashlib.sha256()
sha256.update(out)
assert 'sha256:' + sha256.hexdigest() == dgst
assert err == ""
def test_pull_blob(dxf_main, capfd):
environ = {'DXF_BLOB_INFO': '1'}
environ.update(dxf_main)
assert dxf.main.doit(['pull-blob', pytest.repo, pytest.blob1_hash, pytest.blob2_hash], environ) == 0
# pylint: disable=protected-access
capfd._capture.out.tmpfile.encoding = None
out, err = capfd.readouterr()
out_sha256 = hashlib.sha256()
out_sha256.update(out)
expected_sha256 = hashlib.sha256()
expected_sha256.update(pytest.blob1_hash.encode('utf-8'))
expected_sha256.update(b' ')
expected_sha256.update(str(pytest.blob1_size).encode('utf-8'))
expected_sha256.update(os.linesep.encode('utf-8'))
with open(pytest.blob1_file, 'rb') as f:
for chunk in iter(lambda: f.read(8192), b''):
expected_sha256.update(chunk)
expected_sha256.update(pytest.blob2_hash.encode('utf-8'))
expected_sha256.update(b' ')
expected_sha256.update(str(pytest.blob2_size).encode('utf-8'))
expected_sha256.update(os.linesep.encode('utf-8'))
with open(pytest.blob2_file, 'rb') as f:
for chunk in iter(lambda: f.read(8192), b''):
expected_sha256.update(chunk)
assert out_sha256.digest() == expected_sha256.digest()
assert err == ""
_pull_blob(dxf_main, pytest.blob1_hash, pytest.blob1_hash, capfd)
_pull_blob(dxf_main, pytest.blob2_hash, pytest.blob2_hash, capfd)
_pull_blob(dxf_main, '@fooey', pytest.blob1_hash, capfd)
def test_progress(dxf_main, capfd):
environ = {'DXF_PROGRESS': '1'}
environ.update(dxf_main)
assert dxf.main.doit(['pull-blob', pytest.repo, pytest.blob1_hash], environ) == 0
_, err = capfd.readouterr()
assert pytest.blob1_hash[0:8] in err
assert " 0%" in err
assert " 100%" in err
assert " " + str(pytest.blob1_size) + "/" + str(pytest.blob1_size) in err
assert dxf.main.doit(['push-blob', pytest.repo, pytest.blob3_file], environ) == 0
_, err = capfd.readouterr()
assert pytest.blob3_hash[0:8] in err
assert " 0%" in err
assert " 100%" in err
assert " " + str(pytest.blob3_size) + "/" + str(pytest.blob3_size) in err
def test_see_progress(dxf_main, monkeypatch):
environ = {'DXF_PROGRESS': '1'}
environ.update(dxf_main)
# pylint: disable=too-few-public-methods
class FakeStdout(object):
# pylint: disable=no-self-use
def write(self, _):
time.sleep(0.05)
monkeypatch.setattr(sys, 'stdout', FakeStdout())
assert dxf.main.doit(['pull-blob', pytest.repo, pytest.blob1_hash], environ) == 0
orig_tqdm = tqdm.tqdm
def new_tqdm(*args, **kwargs):
tqdm_obj = orig_tqdm(*args, **kwargs)
class TQDM(object):
# pylint: disable=no-self-use
def update(self, n):
tqdm_obj.update(n)
time.sleep(0.025)
def close(self):
tqdm_obj.close()
@property
def n(self):
return tqdm_obj.n
@property
def total(self):
return tqdm_obj.total
return TQDM()
monkeypatch.setattr(tqdm, 'tqdm', new_tqdm)
assert dxf.main.doit(['push-blob', pytest.repo, pytest.blob4_file], environ) == 0
def test_set_alias(dxf_main, capsys):
assert dxf.main.doit(['set-alias', pytest.repo, 'hello', pytest.blob1_hash], dxf_main) == 0
_, err = capsys.readouterr()
assert err == ""
if dxf_main['REGVER'] != 2.2:
assert dxf.main.doit(['del-alias', pytest.repo, 'hello'], dxf_main) == 0
out, err = capsys.readouterr()
assert out == pytest.blob1_hash + os.linesep
assert err == ""
# Deleting tag actually deletes by DCD:
# https://github.com/docker/distribution/issues/1566
# So fooey gets deleted too
assert dxf.main.doit(['list-aliases', pytest.repo], dxf_main) == 0
out, err = capsys.readouterr()
assert out == ""
assert err == ""
assert dxf.main.doit(['set-alias', pytest.repo, 'hello', pytest.blob1_hash], dxf_main) == 0
assert dxf.main.doit(['set-alias', pytest.repo, 'fooey', pytest.blob1_hash], dxf_main) == 0
_, err = capsys.readouterr()
assert err == ""
assert dxf.main.doit(['set-alias', pytest.repo, 'there', pytest.blob1_hash, pytest.blob2_hash], dxf_main) == 0
_, err = capsys.readouterr()
assert err == ""
assert dxf.main.doit(['set-alias', pytest.repo, 'world', pytest.blob2_file], dxf_main) == 0
_, err = capsys.readouterr()
assert err == ""
def test_get_alias(dxf_main, capsys):
assert dxf.main.doit(['get-alias', pytest.repo, 'hello'], dxf_main) == 0
out, err = capsys.readouterr()
assert out == pytest.blob1_hash + os.linesep
assert err == ""
assert dxf.main.doit(['get-alias', pytest.repo, 'there'], dxf_main) == 0
out, err = capsys.readouterr()
assert out == pytest.blob1_hash + os.linesep + \
pytest.blob2_hash + os.linesep
assert err == ""
assert dxf.main.doit(['get-alias', pytest.repo, 'world'], dxf_main) == 0
out, err = capsys.readouterr()
assert out == pytest.blob2_hash + os.linesep
assert err == ""
def test_get_digest(dxf_main, capsys):
if dxf_main['REGVER'] == 2.2:
with pytest.raises(dxf.exceptions.DXFDigestNotAvailableForSchema1):
dxf.main.doit(['get-digest', pytest.repo, 'hello'], dxf_main)
return
assert dxf.main.doit(['get-digest', pytest.repo, 'hello'], dxf_main) == 0
out, err = capsys.readouterr()
assert out == pytest.blob1_hash + os.linesep
assert err == ""
assert dxf.main.doit(['get-digest', pytest.repo, 'there'], dxf_main) == 0
out, err = capsys.readouterr()
assert out == pytest.blob1_hash + os.linesep
assert err == ""
assert dxf.main.doit(['get-digest', pytest.repo, 'world'], dxf_main) == 0
out, err = capsys.readouterr()
assert out == pytest.blob2_hash + os.linesep
assert err == ""
pytest.copy_registry_image(dxf_main['REGVER'])
assert dxf.main.doit(['get-digest',
'test/registry',
str(dxf_main['REGVER'])],
dxf_main) == 0
out, err = capsys.readouterr()
assert out == dxf_main['REG_DIGEST'] + os.linesep
assert err == ""
def test_blob_size(dxf_main, capsys):
assert dxf.main.doit(['blob-size', pytest.repo, pytest.blob1_hash, pytest.blob2_hash, '@hello', '@there', '@world'], dxf_main) == 0
out, err = capsys.readouterr()
assert out == str(pytest.blob1_size) + os.linesep + \
str(pytest.blob2_size) + os.linesep + \
str(pytest.blob1_size) + os.linesep + \
str(pytest.blob1_size + pytest.blob2_size) + os.linesep + \
str(pytest.blob2_size) + os.linesep
assert err == ""
def test_list_aliases(dxf_main, capsys):
assert dxf.main.doit(['list-aliases', pytest.repo], dxf_main) == 0
out, err = capsys.readouterr()
assert sorted(out.split(os.linesep)) == ['', 'fooey', 'hello', 'there', 'world']
assert err == ""
def test_manifest(dxf_main, capfd, monkeypatch):
assert dxf.main.doit(['set-alias', pytest.repo, 'mani_test', pytest.blob1_hash], dxf_main) == 0
manifest, err = capfd.readouterr()
assert manifest
assert err == ""
# pylint: disable=too-few-public-methods
class FakeStdin(object):
# pylint: disable=no-self-use
def read(self):
return manifest
monkeypatch.setattr(sys, 'stdin', FakeStdin())
assert dxf.main.doit(['get-alias', pytest.repo], dxf_main) == 0
out, err = capfd.readouterr()
assert out == pytest.blob1_hash + os.linesep
assert err == ""
assert dxf.main.doit(['blob-size', pytest.repo], dxf_main) == 0
out, err = capfd.readouterr()
assert out == str(pytest.blob1_size) + os.linesep
assert err == ""
assert dxf.main.doit(['pull-blob', pytest.repo], dxf_main) == 0
# pylint: disable=protected-access
capfd._capture.out.tmpfile.encoding = None
out, err = capfd.readouterr()
sha256 = hashlib.sha256()
sha256.update(out)
assert 'sha256:' + sha256.hexdigest() == pytest.blob1_hash
assert err == ""
assert dxf.main.doit(['del-blob', pytest.repo], dxf_main) == 0
assert dxf.main.doit(['pull-blob', pytest.repo], dxf_main) == errno.ENOENT
#@pytest.mark.onlytest
def test_auth(dxf_main, capsys):
if (not dxf_main['TEST_DO_AUTH']) or (not dxf_main['TEST_DO_TOKEN']):
assert dxf.main.doit(['auth', pytest.repo], dxf_main) == 0
out, err = capsys.readouterr()
assert out == ""
assert err == ""
else:
assert dxf.main.doit(['auth', pytest.repo, '*'], dxf_main) == 0
token, err = capsys.readouterr()
assert token
assert err == ""
environ = {}
environ.update(dxf_main)
environ.pop('DXF_USERNAME', None)
environ.pop('DXF_PASSWORD', None)
environ.pop('DXF_AUTHORIZATION', None)
assert dxf.main.doit(['list-repos'], environ) == 0
out, err = capsys.readouterr()
expected = [pytest.repo]
if dxf_main['REGVER'] != 2.2:
expected += ['test/registry']
assert sorted(out.rstrip().split(os.linesep)) == sorted(expected)
assert err == ""
assert dxf.main.doit(['list-aliases', pytest.repo], environ) == errno.EACCES
out, err = capsys.readouterr()
assert out == ""
environ['DXF_TOKEN'] = token.strip()
assert dxf.main.doit(['list-aliases', pytest.repo], environ) == 0
out, err = capsys.readouterr()
assert sorted(out.split(os.linesep)) == ['', 'fooey', 'hello', 'mani_test', 'there', 'world']
assert err == ""
def test_del_blob(dxf_main, capfd):
_pull_blob(dxf_main, pytest.blob2_hash, pytest.blob2_hash, capfd)
assert dxf.main.doit(['del-blob', pytest.repo, pytest.blob2_hash], dxf_main) == 0
_not_found(dxf_main, pytest.blob2_hash)
assert dxf.main.doit(['del-blob', pytest.repo, pytest.blob2_hash], dxf_main) == errno.ENOENT
def test_del_alias(dxf_main, capsys):
assert dxf.main.doit(['get-alias', pytest.repo, 'world'], dxf_main) == 0
out, err = capsys.readouterr()
assert out == pytest.blob2_hash + os.linesep
assert err == ""
if dxf_main['REGVER'] == 2.2:
with pytest.raises(requests.exceptions.HTTPError) as ex:
dxf.main.doit(['del-alias', pytest.repo, 'world'], dxf_main)
assert ex.value.response.status_code == requests.codes.method_not_allowed
assert dxf.main.doit(['get-alias', pytest.repo, 'world'], dxf_main) == 0
else:
assert dxf.main.doit(['del-alias', pytest.repo, 'world'], dxf_main) == 0
out, err = capsys.readouterr()
assert out == pytest.blob2_hash + os.linesep
# Note: test gc but it isn't needed to make a 404
pytest.gc()
assert dxf.main.doit(['get-alias', pytest.repo, 'world'], dxf_main) == errno.ENOENT
assert dxf.main.doit(['del-alias', pytest.repo, 'world'], dxf_main) == errno.ENOENT
def _num_args(dxf_main, op, minimum, maximum, capsys):
if minimum is not None:
with pytest.raises(SystemExit):
dxf.main.doit([op, pytest.repo] + ['a'] * (minimum - 1), dxf_main)
out, err = capsys.readouterr()
assert out == ""
assert "too few arguments" in err
if maximum is not None:
with pytest.raises(SystemExit):
dxf.main.doit([op, pytest.repo] + ['a'] * (maximum + 1), dxf_main)
out, err = capsys.readouterr()
assert out == ""
assert "too many arguments" in err
def test_bad_args(dxf_main, capsys):
_num_args(dxf_main, 'push-blob', 1, 2, capsys)
_num_args(dxf_main, 'set-alias', 2, None, capsys)
_num_args(dxf_main, 'list-aliases', None, 0, capsys)
with pytest.raises(SystemExit):
dxf.main.doit(['push-blob', pytest.repo, pytest.blob1_file, 'fooey'], dxf_main)
out, err = capsys.readouterr()
assert out == ""
assert "invalid alias" in err
def test_auth_host(dxf_main):
if dxf_main['TEST_DO_TOKEN']:
environ = {
'DXF_AUTH_HOST': 'localhost:5002'
}
environ.update(dxf_main)
with pytest.raises(requests.exceptions.ConnectionError):
dxf.main.doit(['list-repos'], environ)
def test_tlsverify(dxf_main):
if dxf_main['DXF_INSECURE'] == '0':
v = os.environ['REQUESTS_CA_BUNDLE']
del os.environ['REQUESTS_CA_BUNDLE']
try:
if dxf_main['DXF_SKIPTLSVERIFY'] == '0':
with pytest.raises(requests.exceptions.SSLError):
dxf.main.doit(['list-repos'], dxf_main)
else:
assert dxf.main.doit(['list-repos'], dxf_main) == 0
finally:
os.environ['REQUESTS_CA_BUNDLE'] = v
def test_tlsverify_str(dxf_main):
if dxf_main['DXF_INSECURE'] == '0':
v = os.environ['REQUESTS_CA_BUNDLE']
del os.environ['REQUESTS_CA_BUNDLE']
skip = dxf_main['DXF_SKIPTLSVERIFY']
dxf_main['DXF_SKIPTLSVERIFY'] = '0'
dxf_main['DXF_TLSVERIFY'] = v
try:
assert dxf.main.doit(['list-repos'], dxf_main) == 0
finally:
os.environ['REQUESTS_CA_BUNDLE'] = v
dxf_main['DXF_SKIPTLSVERIFY'] = skip
del dxf_main['DXF_TLSVERIFY']
| 41.743243 | 135 | 0.626093 |
794960fd904eac97bfa24f0ddeaaa222aea51a74
| 2,248 |
py
|
Python
|
waad/utils/asset.py
|
ANSSI-FR/WAAD
|
276820be3e1aa45c52351b481105ab95a069b3e0
|
[
"BSD-2-Clause"
] | 13 |
2021-04-08T15:59:57.000Z
|
2022-03-28T14:04:23.000Z
|
waad/utils/asset.py
|
ANSSI-FR/WAAD
|
276820be3e1aa45c52351b481105ab95a069b3e0
|
[
"BSD-2-Clause"
] | null | null | null |
waad/utils/asset.py
|
ANSSI-FR/WAAD
|
276820be3e1aa45c52351b481105ab95a069b3e0
|
[
"BSD-2-Clause"
] | 1 |
2022-03-08T19:50:36.000Z
|
2022-03-08T19:50:36.000Z
|
"""This module implements the `Asset` objects and related."""
from abc import ABC, abstractmethod
from typing import Any, Optional
class Asset(ABC):
"""This class defines a `Asset` and is abstract."""
@abstractmethod
def to_tuple(self):
pass
@abstractmethod
def __eq__(self, obj: Any):
pass
@abstractmethod
def __hash__(self):
pass
@abstractmethod
def __repr__(self):
pass
class Machine(Asset):
"""This class defines a `Machine` object as a tuple (name, domain), child of `Asset`."""
def __init__(self, name: Optional[str] = None, domain: Optional[str] = None):
self.name = name
self.domain = domain
def to_tuple(self):
return (self.name, self.domain)
def __eq__(self, obj: Any):
return isinstance(obj, Machine) and obj.name == self.name and obj.domain == self.domain
def __hash__(self):
return hash((self.name, self.domain))
def __repr__(self):
return f"{self.name} - {self.domain}"
class Account(Asset):
"""This class defines an `Account` object as a tuple (name, domain, sid), child of `Asset`."""
def __init__(self, name: Optional[str] = None, domain: Optional[str] = None, sid: Optional[str] = None):
self.name = name
self.domain = domain
self.sid = sid
def to_tuple(self):
return (self.name, self.domain, self.sid)
def __eq__(self, obj: Any):
return isinstance(obj, Account) and obj.name == self.name and obj.domain == self.domain and obj.sid == self.sid
def __hash__(self):
return hash((self.name, self.domain, self.sid))
def __repr__(self):
if self.sid is not None:
return f"{self.name} - {self.domain} - {self.sid}"
return f"{self.name} - {self.domain}"
class IP(Asset):
"""This class defines an `IP` object as a child of `Asset`."""
def __init__(self, address: str):
self.address = address
def to_tuple(self):
return (self.address,)
def __eq__(self, obj: Any):
return isinstance(obj, IP) and obj.address == self.address
def __hash__(self):
return hash(self.address)
def __repr__(self):
return self.address
| 25.83908 | 119 | 0.619217 |
794961d384fb2fa789c053d776e1b122ed780215
| 12,176 |
py
|
Python
|
Python/example/higgs.py
|
jrade/JrBoost
|
427300477b5afc4e36896a6e028095d1de9e38d3
|
[
"MIT"
] | 1 |
2021-04-14T16:13:14.000Z
|
2021-04-14T16:13:14.000Z
|
Python/example/higgs.py
|
jrade/JrBoost
|
427300477b5afc4e36896a6e028095d1de9e38d3
|
[
"MIT"
] | null | null | null |
Python/example/higgs.py
|
jrade/JrBoost
|
427300477b5afc4e36896a6e028095d1de9e38d3
|
[
"MIT"
] | null | null | null |
# Copyright 2021 Johan Rade <johan.rade@gmail.com>.
# Distributed under the MIT license.
# (See accompanying file License.txt or copy at https://opensource.org/licenses/MIT)
import datetime, gc, math, os, random, time
import numpy as np
import pandas as pd
import jrboost
PROFILE = jrboost.PROFILE
#-----------------------------------------------------------------------------------------------------------------------
param = {
'threadCount': os.cpu_count() // 2,
'profile': True,
#'trainFraction': 0.1,
}
trainParam = {
'minimizeAlgorithm': jrboost.minimizePopulation,
'foldCount': 5,
#'lossFun': jrboost.negAucWeighted,
'lossFun': jrboost.LogLossWeighted(0.001),
#'lossFun': lambda a, b, c: -optimalCutoff(a, b, c)[1],
'boostParamGrid': {
#'minRelSampleWeight': [0.001], #??????????????????????????+
#'iterationCount': [1000],
'iterationCount': [300],
'eta': [0.005, 0.007, 0.01, 0.02, 0.03, 0.05, 0.07, 0.1],
'usedSampleRatio': [0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
'usedVariableRatio': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
'maxTreeDepth': [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
'minNodeSize': [100, 150, 200, 300, 500, 700, 1000],
#'pruneFactor': [0.0, 0.1, 0.2, 0.5],
},
'minimizeParam': {
'populationCount': 100,
'survivorCount': 50,
'cycleCount': 2,
'bestCount': 10,
}
}
#-----------------------------------------------------------------------------------------------------------------------
def main():
logFilePath = f'../Log OptTree {datetime.datetime.now().strftime("%y%m%d-%H%M%S")}.txt'
with open(logFilePath, 'w', 1) as logFile:
def log(msg = '', end = '\n'): print(msg, end = end); logFile.write(msg + end)
log(f'Parameters: {param}\n')
log(f'Training parameters:{trainParam}\n')
threadCount = param['threadCount']
profile = param['profile']
trainFraction = param.get('trainFraction', None)
jrboost.setThreadCount(threadCount)
(trainInDataFrame, trainOutDataSeries, trainWeightSeries,
testInDataFrame, testOutDataSeries, testWeightSeries,
validationInDataFrame, validationOutDataSeries, validationWeightSeries
) = loadData(trainFrac = trainFraction)
trainInData = trainInDataFrame.to_numpy(dtype = np.float32)
trainOutData = trainOutDataSeries.to_numpy(dtype = np.uint64)
trainWeights = trainWeightSeries.to_numpy(dtype = np.float64)
testInData = testInDataFrame.to_numpy(dtype = np.float32)
testOutData = testOutDataSeries.to_numpy(dtype = np.uint64)
testWeights = testWeightSeries.to_numpy(dtype = np.float64)
validationInData = validationInDataFrame.to_numpy(dtype = np.float32)
validationOutData = validationOutDataSeries.to_numpy(dtype = np.uint64)
validationWeights = validationWeightSeries.to_numpy(dtype = np.float64)
log(pd.DataFrame(
index = ['Train', 'Test', 'Validation'],
columns = ['Total', 'Neg.', 'Pos.', 'Pos. Ratio', 'Weight'],
data = [
[len(trainOutData), (1 - trainOutData).sum(), trainOutData.sum(), trainOutData.sum() / len(trainOutData), trainWeightSeries.sum()],
[len(testOutData), (1 - testOutData).sum(), testOutData.sum(), testOutData.sum() / len(testOutData), testWeightSeries.sum()],
[len(validationOutData), (1 - validationOutData).sum(), validationOutData.sum(), validationOutData.sum() / len(validationOutData), validationWeightSeries.sum()],
]
).to_string() + '\n')
# train predictor ..........................................................
t = -time.time()
if profile: PROFILE.START()
predictor, cutoff, msg = train(trainInData, trainOutData, trainWeights)
log(msg + '\n')
t += time.time()
if profile: log(PROFILE.STOP() + '\n')
log(formatTime(t) + '\n')
log(f'cutoff = {cutoff}\n')
# score ...................................................................
testPredData = predictor.predict(testInData)
score = amsScore(testOutData, testPredData, testWeights, cutoff)
log(f'test AMS = {score}')
validationPredData = predictor.predict(validationInData)
score = amsScore(validationOutData, validationPredData, validationWeights, cutoff)
log(f'validation AMS = {score}')
# create and save .........................................................
testPredDataSeries = pd.Series(index = testOutDataSeries.index, data = testPredData)
validationPredDataSeries = pd.Series(index = validationOutDataSeries.index, data = validationPredData)
submissionPredDataSeries = pd.concat((testPredDataSeries, validationPredDataSeries)).sort_index()
submissionPredData = submissionPredDataSeries.to_numpy()
submissionPredRank = rank(submissionPredData) + 1
submissionPredClass = np.where(submissionPredData >= cutoff, 's', 'b')
submissionDataFrame = pd.DataFrame(
index = submissionPredDataSeries.index,
data = { 'RankOrder': submissionPredRank, 'Class': submissionPredClass, }
)
submissionDataFrame.to_csv('../../Higgs Submission.csv', sep = ',')
#-----------------------------------------------------------------------------------------------------------------------
def train(inData, outData, weights):
optimizeFun = trainParam['minimizeAlgorithm']
# determine the best hyperparameters
boostParamGrid = trainParam['boostParamGrid']
minimizeParam = trainParam['minimizeParam']
bestOptList = optimizeFun(
lambda optionList: evaluateBoostParam(
optionList, inData, outData, weights),
boostParamGrid,
minimizeParam
)
msg = formatOptions(bestOptList[0])
# determine optimal cutoff
predOutData = np.zeros((len(outData),))
foldCount = trainParam['foldCount']
folds = jrboost.stratifiedRandomFolds(outData, foldCount)
for trainSamples, testSamples in folds:
trainInData = inData[trainSamples, :]
trainOutData = outData[trainSamples]
trainWeights = weights[trainSamples]
trainer = jrboost.BoostTrainer(trainInData, trainOutData, trainWeights)
predictor = jrboost.EnsemblePredictor(jrboost.parallelTrain(trainer, bestOptList))
testInData = inData[testSamples, :]
predOutData[testSamples] = predictor.predict(testInData)
estCutoff, _ = optimalCutoff(outData, predOutData, weights)
# build predictor
trainer = jrboost.BoostTrainer(inData, outData, weights)
predictor = jrboost.EnsemblePredictor(jrboost.parallelTrain(trainer, bestOptList))
return predictor, estCutoff, msg
def evaluateBoostParam(boostParamList, inData, outData, weights):
foldCount = trainParam['foldCount']
lossFun = trainParam['lossFun']
boostParamCount = len(boostParamList)
loss = np.zeros((boostParamCount,))
folds = jrboost.stratifiedRandomFolds(outData, foldCount)
for trainSamples, testSamples in folds:
print('.', end = '', flush = True)
trainInData = inData[trainSamples, :]
trainOutData = outData[trainSamples]
trainWeights = weights[trainSamples]
trainer = jrboost.BoostTrainer(trainInData, trainOutData, trainWeights)
testInData = inData[testSamples, :]
testOutData = outData[testSamples]
testWeights = weights[testSamples]
loss += jrboost.parallelTrainAndEvalWeighted(trainer, boostParamList, testInData, testOutData, testWeights, lossFun)
print()
return loss
#-----------------------------------------------------------------------------------------------
def loadData(trainFrac = None):
dataFilePath = 'C:/Data/Higgs/atlas-higgs-challenge-2014-v2.csv'
dataFrame = pd.read_csv(dataFilePath, sep = ',', index_col = 0)
trainSamples = dataFrame.index[dataFrame['KaggleSet'] == 't']
if trainFrac is not None and trainFrac != 1.0:
trainSampleCount = len(trainSamples)
trainSamples = pd.Index(random.sample(
trainSamples.tolist(),
round(trainFrac * trainSampleCount)
))
testSamples = dataFrame.index[dataFrame['KaggleSet'] == 'b']
validationSamples = dataFrame.index[dataFrame['KaggleSet'] == 'v']
outDataSeries = pd.Series(index = dataFrame.index, data = 0)
outDataSeries[dataFrame['Label'] == 's'] = 1
weightSeries = dataFrame['KaggleWeight']
inDataFrame = dataFrame.drop(['Label', 'Weight', 'KaggleSet', 'KaggleWeight'], axis = 1)
trainInDataFrame = inDataFrame.loc[trainSamples, :]
trainOutDataSeries = outDataSeries[trainSamples]
trainWeightSeries = weightSeries[trainSamples]
testInDataFrame = inDataFrame.loc[testSamples, :]
testOutDataSeries = outDataSeries[testSamples]
testWeightSeries = weightSeries[testSamples]
validationInDataFrame = inDataFrame.loc[validationSamples, :]
validationOutDataSeries = outDataSeries[validationSamples]
validationWeightSeries = weightSeries[validationSamples]
return (
trainInDataFrame, trainOutDataSeries, trainWeightSeries,
testInDataFrame, testOutDataSeries, testWeightSeries,
validationInDataFrame, validationOutDataSeries, validationWeightSeries
)
#-----------------------------------------------------------------------------------------------------------------------
def formatOptions(opt):
eta = opt['eta']
usr = opt.get('usedSampleRatio', 1.0)
uvr = opt.get('usedVariableRatio', 1.0)
mns = opt.get('minNodeSize', 1)
md = opt.get('maxTreeDepth', 1)
pf = opt.get('pruneFactor', 0.0)
return f'eta = {eta} usr = {usr} uvr = {uvr} mns = {mns} md = {md} pf = {pf}'
def formatScore(score, precision = 4):
return '(' + ', '.join((f'{x:.{precision}f}' for x in score)) + ')'
def formatTime(t):
h = int(t / 3600)
t -= 3600 * h;
m = int(t / 60)
t -= 60 * m
s = int(t)
return f'{h}:{m:02}:{s:02}'
#-----------------------------------------------------------------------------------------------------------------------
def rank(data):
temp = data.argsort()
ranks1 = np.empty_like(temp)
ranks1[temp] = np.arange(len(temp))
return ranks1
#-----------------------------------------------------------------------------------------------------------------------
def _amsScoreImpl(s, b):
b_r = 10.0
return math.sqrt( 2.0 * (
(s + b + b_r)
* math.log (1.0 + s / (b + b_r))
- s
))
def amsScore(outData, predData, weights, cutoff):
truePos = np.sum(outData * (predData >= cutoff) * weights)
trueNeg = np.sum((1 - outData) * (predData >= cutoff) * weights)
return _amsScoreImpl(truePos, trueNeg)
def optimalCutoff(outData, predData, weights):
truePos = 0.0
falsePos = 0.0
a = sorted(list(zip(outData, predData, weights)), key = lambda x: -x[1])
bestScore = _amsScoreImpl(truePos, falsePos)
bestI = -1
for i, (outValue, _, weight) in enumerate(a):
if outValue:
truePos += weight
else:
falsePos += weight
score = _amsScoreImpl(truePos, falsePos)
if score <= bestScore: continue
if i != len(a) - 1 and a[i][1] == a[i + 1][1]: continue
bestScore = score
bestI = i
if bestI == -1:
bestCutoff = 1.0
elif bestI == len(a) - 1:
bestCutoff = 0.0
else:
bestCutoff = (a[bestI][1] + a[bestI + 1][1]) / 2.0
return bestCutoff, bestScore
#---------------------------------------------------------------------------------------------
while True:
main()
print()
| 37.235474 | 178 | 0.566606 |
7949625c3dfa46c51b856a51efc1b744225c4c0e
| 1,667 |
py
|
Python
|
bfgame/effects/healing.py
|
ChrisLR/BasicDungeonRL
|
b293d40bd9a0d3b7aec41b5e1d58441165997ff1
|
[
"MIT"
] | 3 |
2017-10-28T11:28:38.000Z
|
2018-09-12T09:47:00.000Z
|
bfgame/effects/healing.py
|
ChrisLR/BasicDungeonRL
|
b293d40bd9a0d3b7aec41b5e1d58441165997ff1
|
[
"MIT"
] | null | null | null |
bfgame/effects/healing.py
|
ChrisLR/BasicDungeonRL
|
b293d40bd9a0d3b7aec41b5e1d58441165997ff1
|
[
"MIT"
] | null | null | null |
from core.effects.base import Effect
from bflib import effects
from bflib.dice import D6
from core.contexts import Action
from core.messaging import StringBuilder, Actor, Verb
class Healing(Effect):
name = "Healing"
base_effect = effects.Healing
"""
Every round, recover health
"""
def __init__(self, duration, power=None, dice=D6(1)):
super().__init__(duration, power, dice)
def on_start(self, game_object):
"""
Show a message,
:param game_object:
:return:
"""
context = Action(game_object, None)
message = StringBuilder(Actor, Verb("start", Actor), "healing!")
game_object.game.echo.see(
actor=game_object,
message=message,
context=context,
)
def update(self, game_object):
"""
At Each Update it deals damage to the game_object and shows a message
:param game_object:
:return:
"""
super().update(game_object)
health = self.dice.roll_total()
context = Action(game_object, None)
message = StringBuilder(Actor, Verb("recover", Actor), "%s health!" % health)
game_object.game.echo.see(
actor=game_object,
message=message,
context=context,
)
game_object.health.restore_health(health)
def on_finish(self, game_object):
context = Action(game_object, None)
message = StringBuilder(Actor, Verb("is", Actor), "no longer healing.")
game_object.game.echo.see(
actor=game_object,
message=message,
context=context,
)
| 27.783333 | 85 | 0.59928 |
7949631986e9af50b1db97a2f305fb69c493f976
| 11,022 |
py
|
Python
|
salt/modules/mine.py
|
iMilnb/saltstack
|
31033aac6bde314f544db8b7781470325c205549
|
[
"Apache-2.0"
] | 1 |
2020-06-16T05:47:58.000Z
|
2020-06-16T05:47:58.000Z
|
salt/modules/mine.py
|
iMilnb/saltstack
|
31033aac6bde314f544db8b7781470325c205549
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/mine.py
|
iMilnb/saltstack
|
31033aac6bde314f544db8b7781470325c205549
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
The function cache system allows for data to be stored on the master so it can be easily read by other minions
'''
# Import python libs
import copy
import logging
# Import salt libs
import salt.crypt
import salt.payload
import salt.utils.network
__proxyenabled__ = ['*']
log = logging.getLogger(__name__)
def _auth():
'''
Return the auth object
'''
if 'auth' not in __context__:
__context__['auth'] = salt.crypt.SAuth(__opts__)
return __context__['auth']
def update(clear=False):
'''
Execute the configured functions and send the data back up to the master
The functions to be executed are merged from the master config, pillar and
minion config under the option "function_cache":
.. code-block:: yaml
mine_functions:
network.ip_addrs:
- eth0
disk.usage: []
The function cache will be populated with information from executing these
functions
CLI Example:
.. code-block:: bash
salt '*' mine.update
'''
m_data = __salt__['config.option']('mine_functions', {})
data = {}
for func in m_data:
if func not in __salt__:
log.error('Function {0} in mine_functions not available'
.format(func))
continue
try:
if m_data[func] and isinstance(m_data[func], dict):
data[func] = __salt__[func](**m_data[func])
elif m_data[func] and isinstance(m_data[func], list):
data[func] = __salt__[func](*m_data[func])
else:
data[func] = __salt__[func]()
except Exception:
log.error('Function {0} in mine_functions failed to execute'
.format(func))
continue
if __opts__['file_client'] == 'local':
if not clear:
old = __salt__['data.getval']('mine_cache')
if isinstance(old, dict):
old.update(data)
data = old
return __salt__['data.update']('mine_cache', data)
auth = _auth()
load = {
'cmd': '_mine',
'data': data,
'id': __opts__['id'],
'clear': clear,
'tok': auth.gen_token('salt'),
}
# Changed for transport plugin
# sreq = salt.payload.SREQ(__opts__['master_uri'])
# ret = sreq.send('aes', auth.crypticle.dumps(load))
# return auth.crypticle.loads(ret)
sreq = salt.transport.Channel.factory(__opts__)
ret = sreq.send(load)
return ret
def send(func, *args, **kwargs):
'''
Send a specific function to the mine.
CLI Example:
.. code-block:: bash
salt '*' mine.send network.interfaces eth0
'''
if not func in __salt__:
return False
data = {}
arg_data = salt.utils.arg_lookup(__salt__[func])
func_data = copy.deepcopy(kwargs)
for ind, _ in enumerate(arg_data.get('args', [])):
try:
func_data[arg_data['args'][ind]] = args[ind]
except IndexError:
# Safe error, arg may be in kwargs
pass
f_call = salt.utils.format_call(__salt__[func], func_data)
try:
if 'kwargs' in f_call:
data[func] = __salt__[func](*f_call['args'], **f_call['kwargs'])
else:
data[func] = __salt__[func](*f_call['args'])
except Exception as exc:
log.error('Function {0} in mine.send failed to execute: {1}'
.format(func, exc))
return False
if __opts__['file_client'] == 'local':
old = __salt__['data.getval']('mine_cache')
if isinstance(old, dict):
old.update(data)
data = old
return __salt__['data.update']('mine_cache', data)
auth = _auth()
load = {
'cmd': '_mine',
'data': data,
'id': __opts__['id'],
'tok': auth.gen_token('salt'),
}
# Changed for transport plugin
# sreq = salt.payload.SREQ(__opts__['master_uri'])
# ret = sreq.send('aes', auth.crypticle.dumps(load))
# return auth.crypticle.loads(ret)
sreq = salt.transport.Channel.factory(__opts__)
ret = sreq.send(load)
return ret
def get(tgt, fun, expr_form='glob'):
'''
Get data from the mine based on the target, function and expr_form
Targets can be matched based on any standard matching system that can be
matched on the master via these keywords::
glob
pcre
grain
grain_pcre
compound
CLI Example:
.. code-block:: bash
salt '*' mine.get '*' network.interfaces
salt '*' mine.get 'os:Fedora' network.interfaces grain
salt '*' mine.get 'os:Fedora and S@192.168.5.0/24' network.ipaddrs compound
'''
if expr_form.lower == 'pillar':
log.error('Pillar matching not supported on mine.get')
return ''
if __opts__['file_client'] == 'local':
ret = {}
is_target = {'glob': __salt__['match.glob'],
'pcre': __salt__['match.pcre'],
'list': __salt__['match.list'],
'grain': __salt__['match.grain'],
'grain_pcre': __salt__['match.grain_pcre'],
'compound': __salt__['match.compound'],
'ipcidr': __salt__['match.ipcidr'],
}[expr_form](tgt)
if is_target:
data = __salt__['data.getval']('mine_cache')
if isinstance(data, dict) and fun in data:
ret[__opts__['id']] = data[fun]
return ret
auth = _auth()
load = {
'cmd': '_mine_get',
'id': __opts__['id'],
'tgt': tgt,
'fun': fun,
'expr_form': expr_form,
'tok': auth.gen_token('salt'),
}
# Changed for transport plugin
# sreq = salt.payload.SREQ(__opts__['master_uri'])
# ret = sreq.send('aes', auth.crypticle.dumps(load))
# return auth.crypticle.loads(ret)
sreq = salt.transport.Channel.factory(__opts__)
ret = sreq.send(load)
return ret
def delete(fun):
'''
Remove specific function contents of minion. Returns True on success.
CLI Example:
.. code-block:: bash
salt '*' mine.delete 'network.interfaces'
'''
if __opts__['file_client'] == 'local':
data = __salt__['data.getval']('mine_cache')
if isinstance(data, dict) and fun in data:
del data[fun]
return __salt__['data.update']('mine_cache', data)
auth = _auth()
load = {
'cmd': '_mine_delete',
'id': __opts__['id'],
'fun': fun,
'tok': auth.gen_token('salt'),
}
# Changed for transport plugin
# sreq = salt.payload.SREQ(__opts__['master_uri'])
# ret = sreq.send('aes', auth.crypticle.dumps(load))
# return auth.crypticle.loads(ret)
sreq = salt.transport.Channel.factory(__opts__)
ret = sreq.send(load)
return ret
def flush():
'''
Remove all mine contents of minion. Returns True on success.
CLI Example:
.. code-block:: bash
salt '*' mine.flush
'''
if __opts__['file_client'] == 'local':
return __salt__['data.update']('mine_cache', {})
auth = _auth()
load = {
'cmd': '_mine_flush',
'id': __opts__['id'],
'tok': auth.gen_token('salt'),
}
# Changed for transport plugin
# sreq = salt.payload.SREQ(__opts__['master_uri'])
# ret = sreq.send('aes', auth.crypticle.dumps(load))
# return auth.crypticle.loads(ret)
sreq = salt.transport.Channel.factory(__opts__)
ret = sreq.send(load)
return ret
def get_docker(interfaces=None, cidrs=None):
'''
Get all mine data for 'docker.get_containers' and run an aggregation
routine. The "interfaces" parameter allows for specifying which network
interfaces to select ip addresses from. The "cidrs" parameter allows for
specifying a list of cidrs which the ip address must match.
CLI Example:
.. code-block:: bash
salt '*' mine.get_docker
salt '*' mine.get_docker interfaces='eth0'
salt '*' mine.get_docker interfaces='["eth0", "eth1"]'
salt '*' mine.get_docker cidrs='107.170.147.0/24'
salt '*' mine.get_docker cidrs='["107.170.147.0/24", "172.17.42.0/24"]'
salt '*' mine.get_docker interfaces='["eth0", "eth1"]' cidrs='["107.170.147.0/24", "172.17.42.0/24"]'
'''
# Enforce that interface and cidr are lists
if interfaces:
interface_ = []
interface_.extend(interfaces if isinstance(interfaces, list) else [interfaces])
interfaces = interface_
if cidrs:
cidr_ = []
cidr_.extend(cidrs if isinstance(cidrs, list) else [cidrs])
cidrs = cidr_
# Get docker info
cmd = 'docker.get_containers'
docker_hosts = get('*', cmd)
proxy_lists = {}
# Process docker info
for host, containers in docker_hosts.items():
host_ips = []
# Prepare host_ips list
if not interfaces:
for iface, info in containers['host']['interfaces'].items():
if 'inet' in info:
for ip_ in info['inet']:
host_ips.append(ip_['address'])
else:
for interface in interfaces:
if interface in containers['host']['interfaces']:
for item in containers['host']['interfaces'][interface]['inet']:
host_ips.append(item['address'])
host_ips = list(set(host_ips))
# Filter out ips from host_ips with cidrs
if cidrs:
good_ips = []
for cidr in cidrs:
for ip_ in host_ips:
if salt.utils.network.in_subnet(cidr, [ip_]):
good_ips.append(ip_)
host_ips = list(set(good_ips))
# Process each container
if containers['out']:
for container in containers['out']:
if container['Image'] not in proxy_lists:
proxy_lists[container['Image']] = {}
for dock_port in container['Ports']:
# If port is 0.0.0.0, then we must get the docker host IP
if dock_port['IP'] == '0.0.0.0':
for ip_ in host_ips:
proxy_lists[container['Image']].setdefault('ipv4', []).append(
'{0}:{1}'.format(ip_, dock_port['PublicPort']))
proxy_lists[container['Image']]['ipv4'] = list(set(proxy_lists[container['Image']]['ipv4']))
elif dock_port['IP']:
proxy_lists[container['Image']].setdefault('ipv4', []).append(
'{0}:{1}'.format(dock_port['IP'], dock_port['PublicPort']))
proxy_lists[container['Image']]['ipv4'] = list(set(proxy_lists[container['Image']]['ipv4']))
return proxy_lists
| 32.609467 | 120 | 0.561786 |
794964a23d9acd2708f173175001356b8216ca7f
| 282 |
py
|
Python
|
epikjjh/baekjoon/10769.py
|
15ers/Solve_Naively
|
23ee4a3aedbedb65b9040594b8c9c6d9cff77090
|
[
"MIT"
] | 3 |
2019-05-19T13:44:39.000Z
|
2019-07-03T11:15:20.000Z
|
epikjjh/baekjoon/10769.py
|
15ers/Solve_Naively
|
23ee4a3aedbedb65b9040594b8c9c6d9cff77090
|
[
"MIT"
] | 7 |
2019-05-06T02:37:26.000Z
|
2019-06-29T07:28:02.000Z
|
epikjjh/baekjoon/10769.py
|
15ers/Solve_Naively
|
23ee4a3aedbedb65b9040594b8c9c6d9cff77090
|
[
"MIT"
] | 1 |
2019-07-28T06:24:54.000Z
|
2019-07-28T06:24:54.000Z
|
import sys
input = sys.stdin.readline().rstrip
target = input()
happy = ":-)"
sad = ":-("
happy_num = target.count(happy)
sad_num = target.count(sad)
print("none" if not happy_num and not sad_num else "unsure" if happy_num==sad_num else "happy" if happy_num>sad_num else "sad")
| 21.692308 | 127 | 0.705674 |
794964d973d9c05a818fbb3d3b21495fe95f4dab
| 6,104 |
py
|
Python
|
qa/rpc-tests/proxy_test.py
|
rushiraj111/timicoin-test
|
246fa8b1e249912afd97c77dfad36fe92328c644
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/proxy_test.py
|
rushiraj111/timicoin-test
|
246fa8b1e249912afd97c77dfad36fe92328c644
|
[
"MIT"
] | 1 |
2018-09-03T20:27:15.000Z
|
2018-09-03T20:31:21.000Z
|
qa/rpc-tests/proxy_test.py
|
rushiraj111/timicoin-test
|
246fa8b1e249912afd97c77dfad36fe92328c644
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import socket
import traceback, sys
from binascii import hexlify
import time, os
from socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework import BitcoinTestFramework
from util import *
'''
Test plan:
- Start bitcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on bitcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create bitcoinds that connect to them
- Manipulate the bitcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
'''
class ProxyTest(BitcoinTestFramework):
def __init__(self):
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', 13000 + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', 14000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', 15000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
def setup_nodes(self):
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
return start_nodes(4, self.options.tmpdir, extra_args=[
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0']
])
def node_test(self, node, proxies, auth):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing onion connection through node
node.addnode("timicoinvj7kcklujarx.onion:43472", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "timicoinvj7kcklujarx.onion")
assert_equal(cmd.port, 43472)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), 4)
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False)
if __name__ == '__main__':
ProxyTest().main()
| 41.808219 | 145 | 0.653342 |
79496505a88259829a4e1eb38d7a88dd9a9be8d0
| 13,437 |
py
|
Python
|
landshark/dataprocess.py
|
basaks/landshark
|
87ec1fada74addd58f37bdaf3b1adbc10b1544b2
|
[
"Apache-2.0"
] | null | null | null |
landshark/dataprocess.py
|
basaks/landshark
|
87ec1fada74addd58f37bdaf3b1adbc10b1544b2
|
[
"Apache-2.0"
] | null | null | null |
landshark/dataprocess.py
|
basaks/landshark
|
87ec1fada74addd58f37bdaf3b1adbc10b1544b2
|
[
"Apache-2.0"
] | null | null | null |
"""Process training and query data."""
# Copyright 2019 CSIRO (Data61)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from itertools import count, groupby
from typing import Any, Dict, Iterator, List, NamedTuple, Optional, Tuple
import numpy as np
import tables
from landshark import patch, tfwrite
from landshark.basetypes import ArraySource, FixedSlice, IdReader, Worker
from landshark.featurewrite import read_feature_metadata
from landshark.hread import H5Features
from landshark.image import (
ImageSpec,
image_to_world,
indices_strip,
random_indices,
world_to_image,
)
from landshark.iteration import batch_slices
from landshark.kfold import KFolds
from landshark.metadata import FeatureSet
from landshark.multiproc import task_list
from landshark.patch import PatchMaskRowRW, PatchRowRW
from landshark.serialise import DataArrays, serialise
from landshark.tfread import XData
from landshark.util import points_per_batch
log = logging.getLogger(__name__)
class ProcessTrainingArgs(NamedTuple):
name: str
feature_path: str
target_src: ArraySource
image_spec: ImageSpec
halfwidth: int
testfold: int
folds: KFolds
directory: str
batchsize: int
nworkers: int
class ProcessQueryArgs(NamedTuple):
name: str
feature_path: str
image_spec: ImageSpec
strip_idx: int
total_strips: int
strip_spec: ImageSpec
halfwidth: int
directory: str
batchsize: int
nworkers: int
tag: str
def _direct_read(
array: tables.CArray,
patch_reads: List[PatchRowRW],
mask_reads: List[PatchMaskRowRW],
npatches: int,
patchwidth: int,
) -> np.ma.MaskedArray:
"""Build patches from a data source given the read/write operations."""
assert npatches > 0
assert patchwidth > 0
nfeatures = array.atom.shape[0]
dtype = array.atom.dtype.base
patch_data = np.zeros((npatches, patchwidth, patchwidth, nfeatures), dtype=dtype)
patch_mask = np.zeros_like(patch_data, dtype=bool)
for r in patch_reads:
patch_data[r.idx, r.yp, r.xp] = array[r.y, r.x]
for m in mask_reads:
patch_mask[m.idx, m.yp, m.xp] = True
if array.missing is not None:
patch_mask |= patch_data == array.missing
marray = np.ma.MaskedArray(data=patch_data, mask=patch_mask)
return marray
def _cached_read(
row_dict: Dict[int, np.ndarray],
array: tables.CArray,
patch_reads: List[PatchRowRW],
mask_reads: List[PatchMaskRowRW],
npatches: int,
patchwidth: int,
) -> np.ma.MaskedArray:
"""Build patches from a data source given the read/write operations."""
assert npatches > 0
assert patchwidth > 0
nfeatures = array.atom.shape[0]
dtype = array.atom.dtype.base
patch_data = np.zeros((npatches, patchwidth, patchwidth, nfeatures), dtype=dtype)
patch_mask = np.zeros_like(patch_data, dtype=bool)
for r in patch_reads:
patch_data[r.idx, r.yp, r.xp] = row_dict[r.y][r.x]
for m in mask_reads:
patch_mask[m.idx, m.yp, m.xp] = True
if array.missing is not None:
patch_mask |= patch_data == array.missing
marray = np.ma.MaskedArray(data=patch_data, mask=patch_mask)
return marray
def _as_range(iterable: Iterator[int]) -> FixedSlice:
lst = list(iterable)
if len(lst) > 1:
return FixedSlice(start=lst[0], stop=(lst[-1] + 1))
else:
return FixedSlice(start=lst[0], stop=(lst[0] + 1))
def _slices_from_patches(patch_reads: List[PatchRowRW]) -> List[FixedSlice]:
rowlist = sorted({k.y for k in patch_reads})
c_init = count()
def _get(n: int, c: Iterator[int] = c_init) -> int:
res = n - next(c)
return res
slices = [_as_range(g) for _, g in groupby(rowlist, key=_get)]
return slices
def _get_rows(slices: List[FixedSlice], array: tables.CArray) -> Dict[int, np.ndarray]:
# TODO make faster
data_slices = [array[s.start : s.stop] for s in slices]
data = {}
for s, d in zip(slices, data_slices):
for i, d_io in zip(range(s[0], s[1]), d):
data[i] = d_io
return data
def _process_training(
coords: np.ndarray,
targets: np.ndarray,
feature_source: H5Features,
image_spec: ImageSpec,
halfwidth: int,
) -> DataArrays:
coords_x, coords_y = coords.T
indices_x = world_to_image(coords_x, image_spec.x_coordinates)
indices_y = world_to_image(coords_y, image_spec.y_coordinates)
patch_reads, mask_reads = patch.patches(
indices_x, indices_y, halfwidth, image_spec.width, image_spec.height
)
npatches = indices_x.shape[0]
patchwidth = 2 * halfwidth + 1
con_marray, cat_marray = None, None
if feature_source.continuous:
con_marray = _direct_read(
feature_source.continuous, patch_reads, mask_reads, npatches, patchwidth
)
if feature_source.categorical:
cat_marray = _direct_read(
feature_source.categorical, patch_reads, mask_reads, npatches, patchwidth
)
indices = np.vstack((indices_x, indices_y)).T
output = DataArrays(con_marray, cat_marray, targets, coords, indices)
return output
def _process_query(
indices: np.ndarray,
feature_source: H5Features,
image_spec: ImageSpec,
halfwidth: int,
) -> DataArrays:
indices_x, indices_y = indices.T
coords_x = image_to_world(indices_x, image_spec.x_coordinates)
coords_y = image_to_world(indices_y, image_spec.y_coordinates)
patch_reads, mask_reads = patch.patches(
indices_x, indices_y, halfwidth, image_spec.width, image_spec.height
)
patch_data_slices = _slices_from_patches(patch_reads)
npatches = indices_x.shape[0]
patchwidth = 2 * halfwidth + 1
con_marray, cat_marray = None, None
if feature_source.continuous:
con_data_cache = _get_rows(patch_data_slices, feature_source.continuous)
con_marray = _cached_read(
con_data_cache,
feature_source.continuous,
patch_reads,
mask_reads,
npatches,
patchwidth,
)
if feature_source.categorical:
cat_data_cache = _get_rows(patch_data_slices, feature_source.categorical)
cat_marray = _cached_read(
cat_data_cache,
feature_source.categorical,
patch_reads,
mask_reads,
npatches,
patchwidth,
)
coords = np.vstack((coords_x, coords_y)).T
output = DataArrays(con_marray, cat_marray, None, coords, indices)
return output
class _TrainingDataProcessor(Worker):
def __init__(
self, feature_path: str, image_spec: ImageSpec, halfwidth: int
) -> None:
self.feature_path = feature_path
self.feature_source: Optional[H5Features] = None
self.image_spec = image_spec
self.halfwidth = halfwidth
def __call__(self, values: Tuple[np.ndarray, np.ndarray]) -> DataArrays:
if not self.feature_source:
self.feature_source = H5Features(self.feature_path)
targets, coords = values
arrays = _process_training(
coords, targets, self.feature_source, self.image_spec, self.halfwidth
)
return arrays
class _QueryDataProcessor(Worker):
def __init__(
self, feature_path: str, image_spec: ImageSpec, halfwidth: int
) -> None:
self.feature_path = feature_path
self.feature_source: Optional[H5Features] = None
self.image_spec = image_spec
self.halfwidth = halfwidth
def __call__(self, indices: np.ndarray) -> DataArrays:
if not self.feature_source:
self.feature_source = H5Features(self.feature_path)
arrays = _process_query(
indices, self.feature_source, self.image_spec, self.halfwidth
)
return arrays
class Serialised(Worker):
"""Serialise worker output."""
def __init__(self, w: Worker) -> None:
self.worker = w
def __call__(self, x: Any) -> List[bytes]:
"""Wrap worker function and serialise output."""
arrays = self.worker(x)
strings = serialise(arrays)
return strings
def write_trainingdata(args: ProcessTrainingArgs) -> None:
"""Write training data to tfrecord."""
log.info("Testing data is fold {} of {}".format(args.testfold, args.folds.K))
log.info(
"Writing training data to tfrecord in {}-point batches".format(args.batchsize)
)
n_rows = len(args.target_src)
worker = _TrainingDataProcessor(args.feature_path, args.image_spec, args.halfwidth)
sworker = Serialised(worker)
tasks = list(batch_slices(args.batchsize, n_rows))
out_it = task_list(tasks, args.target_src, sworker, args.nworkers)
fold_it = args.folds.iterator(args.batchsize)
tfwrite.training(out_it, n_rows, args.directory, args.testfold, fold_it)
def write_querydata(args: ProcessQueryArgs) -> None:
"""Write query data to tfrecord."""
log.info("Query data is strip {} of {}".format(args.strip_idx, args.total_strips))
log.info(
"Writing query data to tfrecord in {}-point batches".format(args.batchsize)
)
reader_src = IdReader()
it, n_total = indices_strip(
args.image_spec, args.strip_idx, args.total_strips, args.batchsize
)
worker = _QueryDataProcessor(args.feature_path, args.image_spec, args.halfwidth)
sworker = Serialised(worker)
tasks = list(it)
out_it = task_list(tasks, reader_src, sworker, args.nworkers)
tfwrite.query(out_it, n_total, args.directory, args.tag)
#
# Functions for reading hdf5 query data directy
#
def _islice_batched(it: Iterator[np.ndarray], n: int) -> Iterator[np.ndarray]:
"""Slice an iterator which comes in batches."""
while n > 0:
arr: np.ndarray = next(it)
k = arr.shape[0]
yield arr[:n, :]
n -= k
def dataarrays_to_xdata(arrays: DataArrays, features: FeatureSet) -> XData:
"""Convert DataArrays to XData (i.e. add column names)."""
x_con = None
if arrays.con_marray is not None:
assert features.continuous
con_labels = features.continuous.columns.keys()
x_con = dict(zip(con_labels, np.rollaxis(arrays.con_marray, 3)))
x_cat = None
if arrays.cat_marray is not None:
assert features.categorical
cat_labels = features.categorical.columns.keys()
x_cat = dict(zip(cat_labels, np.rollaxis(arrays.cat_marray, 3)))
xdata = XData(x_con, x_cat, arrays.image_indices, arrays.world_coords)
return xdata
class HDF5FeatureReader:
"""Read feature data from HDF5 file."""
def __init__(
self,
hdf5_file: str,
halfwidth: int = 0,
nworkers: int = 1,
batch_mb: float = 1000,
) -> None:
self.file = hdf5_file
self.meta = read_feature_metadata(hdf5_file)
self.meta.halfwidth = halfwidth
self.nworkers = nworkers
self.size = self.meta.image.height * self.meta.image.width
self.worker = _QueryDataProcessor(hdf5_file, self.meta.image, halfwidth)
self.batch_mb = batch_mb
self.batchsize = points_per_batch(self.meta, batch_mb)
def __len__(self) -> int:
"""Total number of points."""
return self.size
@property
def crs(self) -> Dict[str, str]:
"""Get CRS."""
return self.meta.image.crs
def _run(self, index_list: List[np.ndarray]) -> Iterator[XData]:
"""Slice array at indices defined by `tasks`."""
da_it = task_list(index_list, IdReader(), self.worker, self.nworkers)
xdata_it = (dataarrays_to_xdata(d, self.meta) for d in da_it)
return xdata_it
def read(
self,
npoints: Optional[int] = None,
shuffle: bool = False,
random_seed: int = 220,
) -> Iterator[XData]:
"""Read N points of (optionally random) query data (in batches)."""
npoints = min(npoints or self.size, self.size)
if shuffle:
it, _ = random_indices(
self.meta.image, npoints, self.batchsize, random_seed=random_seed
)
else:
it_all, _ = indices_strip(self.meta.image, 1, 1, self.batchsize)
it = _islice_batched(it_all, npoints)
return self._run(list(it))
def read_ix(self, indexes: np.ndarray) -> Iterator[XData]:
"""Read array at supplied indexes."""
index_list = np.array_split(
indexes, range(self.batchsize, indexes.shape[0], self.batchsize)
)
return self._run(index_list)
def read_coords(self, coords: np.ndarray) -> Iterator[XData]:
"""Read array at the supplied coordinates."""
indexes = np.vstack(
[
world_to_image(coords[:, 0], self.meta.image.x_coordinates),
world_to_image(coords[:, 1], self.meta.image.y_coordinates),
]
).T
return self.read_ix(indexes)
| 32.693431 | 87 | 0.668006 |
7949654b40e5a4dee405c1182816a5f4363f0f87
| 38,812 |
py
|
Python
|
src/black/linegen.py
|
thomasfulton/black
|
448c06e51080ea5326447667b05136de6e79fb69
|
[
"MIT"
] | 1 |
2021-09-11T04:35:10.000Z
|
2021-09-11T04:35:10.000Z
|
src/black/linegen.py
|
thomasfulton/black
|
448c06e51080ea5326447667b05136de6e79fb69
|
[
"MIT"
] | 2 |
2021-05-28T13:32:29.000Z
|
2021-05-28T13:32:49.000Z
|
src/black/linegen.py
|
thomasfulton/black
|
448c06e51080ea5326447667b05136de6e79fb69
|
[
"MIT"
] | null | null | null |
"""
Generating lines of code.
"""
from functools import partial, wraps
import sys
from typing import Collection, Iterator, List, Optional, Set, Union
from dataclasses import dataclass, field
from black.nodes import WHITESPACE, STATEMENT, STANDALONE_COMMENT
from black.nodes import ASSIGNMENTS, OPENING_BRACKETS, CLOSING_BRACKETS
from black.nodes import Visitor, syms, first_child_is_arith, ensure_visible
from black.nodes import is_docstring, is_empty_tuple, is_one_tuple, is_one_tuple_between
from black.nodes import is_walrus_assignment, is_yield, is_vararg, is_multiline_string
from black.nodes import is_stub_suite, is_stub_body, is_atom_with_invisible_parens
from black.nodes import wrap_in_parentheses
from black.brackets import max_delimiter_priority_in_atom
from black.brackets import DOT_PRIORITY, COMMA_PRIORITY
from black.lines import Line, line_to_string, is_line_short_enough
from black.lines import can_omit_invisible_parens, can_be_split, append_leaves
from black.comments import generate_comments, list_comments, FMT_OFF
from black.numerics import normalize_numeric_literal
from black.strings import get_string_prefix, fix_docstring
from black.strings import normalize_string_prefix, normalize_string_quotes
from black.trans import Transformer, CannotTransform, StringMerger
from black.trans import StringSplitter, StringParenWrapper, StringParenStripper
from black.mode import Mode
from black.mode import Feature
from blib2to3.pytree import Node, Leaf
from blib2to3.pgen2 import token
# types
LeafID = int
LN = Union[Leaf, Node]
class CannotSplit(CannotTransform):
"""A readable split that fits the allotted line length is impossible."""
@dataclass
class LineGenerator(Visitor[Line]):
"""Generates reformatted Line objects. Empty lines are not emitted.
Note: destroys the tree it's visiting by mutating prefixes of its leaves
in ways that will no longer stringify to valid Python code on the tree.
"""
mode: Mode
remove_u_prefix: bool = False
current_line: Line = field(init=False)
def line(self, indent: int = 0) -> Iterator[Line]:
"""Generate a line.
If the line is empty, only emit if it makes sense.
If the line is too long, split it first and then generate.
If any lines were generated, set up a new current_line.
"""
if not self.current_line:
self.current_line.depth += indent
return # Line is empty, don't emit. Creating a new one unnecessary.
complete_line = self.current_line
self.current_line = Line(mode=self.mode, depth=complete_line.depth + indent)
yield complete_line
def visit_default(self, node: LN) -> Iterator[Line]:
"""Default `visit_*()` implementation. Recurses to children of `node`."""
if isinstance(node, Leaf):
any_open_brackets = self.current_line.bracket_tracker.any_open_brackets()
for comment in generate_comments(node):
if any_open_brackets:
# any comment within brackets is subject to splitting
self.current_line.append(comment)
elif comment.type == token.COMMENT:
# regular trailing comment
self.current_line.append(comment)
yield from self.line()
else:
# regular standalone comment
yield from self.line()
self.current_line.append(comment)
yield from self.line()
normalize_prefix(node, inside_brackets=any_open_brackets)
if self.mode.string_normalization and node.type == token.STRING:
node.value = normalize_string_prefix(
node.value, remove_u_prefix=self.remove_u_prefix
)
node.value = normalize_string_quotes(node.value)
if node.type == token.NUMBER:
normalize_numeric_literal(node)
if node.type not in WHITESPACE:
self.current_line.append(node)
yield from super().visit_default(node)
def visit_INDENT(self, node: Leaf) -> Iterator[Line]:
"""Increase indentation level, maybe yield a line."""
# In blib2to3 INDENT never holds comments.
yield from self.line(+1)
yield from self.visit_default(node)
def visit_DEDENT(self, node: Leaf) -> Iterator[Line]:
"""Decrease indentation level, maybe yield a line."""
# The current line might still wait for trailing comments. At DEDENT time
# there won't be any (they would be prefixes on the preceding NEWLINE).
# Emit the line then.
yield from self.line()
# While DEDENT has no value, its prefix may contain standalone comments
# that belong to the current indentation level. Get 'em.
yield from self.visit_default(node)
# Finally, emit the dedent.
yield from self.line(-1)
def visit_stmt(
self, node: Node, keywords: Set[str], parens: Set[str]
) -> Iterator[Line]:
"""Visit a statement.
This implementation is shared for `if`, `while`, `for`, `try`, `except`,
`def`, `with`, `class`, `assert` and assignments.
The relevant Python language `keywords` for a given statement will be
NAME leaves within it. This methods puts those on a separate line.
`parens` holds a set of string leaf values immediately after which
invisible parens should be put.
"""
normalize_invisible_parens(node, parens_after=parens)
for child in node.children:
if child.type == token.NAME and child.value in keywords: # type: ignore
yield from self.line()
yield from self.visit(child)
def visit_suite(self, node: Node) -> Iterator[Line]:
"""Visit a suite."""
if self.mode.is_pyi and is_stub_suite(node):
yield from self.visit(node.children[2])
else:
yield from self.visit_default(node)
def visit_simple_stmt(self, node: Node) -> Iterator[Line]:
"""Visit a statement without nested statements."""
if first_child_is_arith(node):
wrap_in_parentheses(node, node.children[0], visible=False)
is_suite_like = node.parent and node.parent.type in STATEMENT
if is_suite_like:
if self.mode.is_pyi and is_stub_body(node):
yield from self.visit_default(node)
else:
yield from self.line(+1)
yield from self.visit_default(node)
yield from self.line(-1)
else:
if (
not self.mode.is_pyi
or not node.parent
or not is_stub_suite(node.parent)
):
yield from self.line()
yield from self.visit_default(node)
def visit_async_stmt(self, node: Node) -> Iterator[Line]:
"""Visit `async def`, `async for`, `async with`."""
yield from self.line()
children = iter(node.children)
for child in children:
yield from self.visit(child)
if child.type == token.ASYNC:
break
internal_stmt = next(children)
for child in internal_stmt.children:
yield from self.visit(child)
def visit_decorators(self, node: Node) -> Iterator[Line]:
"""Visit decorators."""
for child in node.children:
yield from self.line()
yield from self.visit(child)
def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]:
"""Remove a semicolon and put the other statement on a separate line."""
yield from self.line()
def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]:
"""End of file. Process outstanding comments and end with a newline."""
yield from self.visit_default(leaf)
yield from self.line()
def visit_STANDALONE_COMMENT(self, leaf: Leaf) -> Iterator[Line]:
if not self.current_line.bracket_tracker.any_open_brackets():
yield from self.line()
yield from self.visit_default(leaf)
def visit_factor(self, node: Node) -> Iterator[Line]:
"""Force parentheses between a unary op and a binary power:
-2 ** 8 -> -(2 ** 8)
"""
_operator, operand = node.children
if (
operand.type == syms.power
and len(operand.children) == 3
and operand.children[1].type == token.DOUBLESTAR
):
lpar = Leaf(token.LPAR, "(")
rpar = Leaf(token.RPAR, ")")
index = operand.remove() or 0
node.insert_child(index, Node(syms.atom, [lpar, operand, rpar]))
yield from self.visit_default(node)
def visit_STRING(self, leaf: Leaf) -> Iterator[Line]:
if is_docstring(leaf) and "\\\n" not in leaf.value:
# We're ignoring docstrings with backslash newline escapes because changing
# indentation of those changes the AST representation of the code.
prefix = get_string_prefix(leaf.value)
docstring = leaf.value[len(prefix) :] # Remove the prefix
quote_char = docstring[0]
# A natural way to remove the outer quotes is to do:
# docstring = docstring.strip(quote_char)
# but that breaks on """""x""" (which is '""x').
# So we actually need to remove the first character and the next two
# characters but only if they are the same as the first.
quote_len = 1 if docstring[1] != quote_char else 3
docstring = docstring[quote_len:-quote_len]
docstring_started_empty = not docstring
if is_multiline_string(leaf):
indent = " " * 4 * self.current_line.depth
docstring = fix_docstring(docstring, indent)
else:
docstring = docstring.strip()
if docstring:
# Add some padding if the docstring starts / ends with a quote mark.
if docstring[0] == quote_char:
docstring = " " + docstring
if docstring[-1] == quote_char:
docstring += " "
if docstring[-1] == "\\":
backslash_count = len(docstring) - len(docstring.rstrip("\\"))
if backslash_count % 2:
# Odd number of tailing backslashes, add some padding to
# avoid escaping the closing string quote.
docstring += " "
elif not docstring_started_empty:
docstring = " "
# We could enforce triple quotes at this point.
quote = quote_char * quote_len
leaf.value = prefix + quote + docstring + quote
yield from self.visit_default(leaf)
def __post_init__(self) -> None:
"""You are in a twisty little maze of passages."""
self.current_line = Line(mode=self.mode)
v = self.visit_stmt
Ø: Set[str] = set()
self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","})
self.visit_if_stmt = partial(
v, keywords={"if", "else", "elif"}, parens={"if", "elif"}
)
self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"})
self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"})
self.visit_try_stmt = partial(
v, keywords={"try", "except", "else", "finally"}, parens=Ø
)
self.visit_except_clause = partial(v, keywords={"except"}, parens=Ø)
self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø)
self.visit_funcdef = partial(v, keywords={"def"}, parens=Ø)
self.visit_classdef = partial(v, keywords={"class"}, parens=Ø)
self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS)
self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"})
self.visit_import_from = partial(v, keywords=Ø, parens={"import"})
self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"})
self.visit_async_funcdef = self.visit_async_stmt
self.visit_decorated = self.visit_decorators
def transform_line(
line: Line, mode: Mode, features: Collection[Feature] = ()
) -> Iterator[Line]:
"""Transform a `line`, potentially splitting it into many lines.
They should fit in the allotted `line_length` but might not be able to.
`features` are syntactical features that may be used in the output.
"""
if line.is_comment:
yield line
return
line_str = line_to_string(line)
ll = mode.line_length
sn = mode.string_normalization
string_merge = StringMerger(ll, sn)
string_paren_strip = StringParenStripper(ll, sn)
string_split = StringSplitter(ll, sn)
string_paren_wrap = StringParenWrapper(ll, sn)
transformers: List[Transformer]
if (
not line.contains_uncollapsable_type_comments()
and not line.should_split_rhs
and not line.magic_trailing_comma
and (
is_line_short_enough(line, line_length=mode.line_length, line_str=line_str)
or line.contains_unsplittable_type_ignore()
)
and not (line.inside_brackets and line.contains_standalone_comments())
):
# Only apply basic string preprocessing, since lines shouldn't be split here.
if mode.experimental_string_processing:
transformers = [string_merge, string_paren_strip]
else:
transformers = []
elif line.is_def:
transformers = [left_hand_split]
else:
def rhs(line: Line, features: Collection[Feature]) -> Iterator[Line]:
"""Wraps calls to `right_hand_split`.
The calls increasingly `omit` right-hand trailers (bracket pairs with
content), meaning the trailers get glued together to split on another
bracket pair instead.
"""
for omit in generate_trailers_to_omit(line, mode.line_length):
lines = list(
right_hand_split(line, mode.line_length, features, omit=omit)
)
# Note: this check is only able to figure out if the first line of the
# *current* transformation fits in the line length. This is true only
# for simple cases. All others require running more transforms via
# `transform_line()`. This check doesn't know if those would succeed.
if is_line_short_enough(lines[0], line_length=mode.line_length):
yield from lines
return
# All splits failed, best effort split with no omits.
# This mostly happens to multiline strings that are by definition
# reported as not fitting a single line, as well as lines that contain
# trailing commas (those have to be exploded).
yield from right_hand_split(
line, line_length=mode.line_length, features=features
)
if mode.experimental_string_processing:
if line.inside_brackets:
transformers = [
string_merge,
string_paren_strip,
string_split,
delimiter_split,
standalone_comment_split,
string_paren_wrap,
rhs,
]
else:
transformers = [
string_merge,
string_paren_strip,
string_split,
string_paren_wrap,
rhs,
]
else:
if line.inside_brackets:
transformers = [delimiter_split, standalone_comment_split, rhs]
else:
transformers = [rhs]
for transform in transformers:
# We are accumulating lines in `result` because we might want to abort
# mission and return the original line in the end, or attempt a different
# split altogether.
try:
result = run_transformer(line, transform, mode, features, line_str=line_str)
except CannotTransform:
continue
else:
yield from result
break
else:
yield line
def left_hand_split(line: Line, _features: Collection[Feature] = ()) -> Iterator[Line]:
"""Split line into many lines, starting with the first matching bracket pair.
Note: this usually looks weird, only use this for function definitions.
Prefer RHS otherwise. This is why this function is not symmetrical with
:func:`right_hand_split` which also handles optional parentheses.
"""
tail_leaves: List[Leaf] = []
body_leaves: List[Leaf] = []
head_leaves: List[Leaf] = []
current_leaves = head_leaves
matching_bracket: Optional[Leaf] = None
for leaf in line.leaves:
if (
current_leaves is body_leaves
and leaf.type in CLOSING_BRACKETS
and leaf.opening_bracket is matching_bracket
):
current_leaves = tail_leaves if body_leaves else head_leaves
current_leaves.append(leaf)
if current_leaves is head_leaves:
if leaf.type in OPENING_BRACKETS:
matching_bracket = leaf
current_leaves = body_leaves
if not matching_bracket:
raise CannotSplit("No brackets found")
head = bracket_split_build_line(head_leaves, line, matching_bracket)
body = bracket_split_build_line(body_leaves, line, matching_bracket, is_body=True)
tail = bracket_split_build_line(tail_leaves, line, matching_bracket)
bracket_split_succeeded_or_raise(head, body, tail)
for result in (head, body, tail):
if result:
yield result
def right_hand_split(
line: Line,
line_length: int,
features: Collection[Feature] = (),
omit: Collection[LeafID] = (),
) -> Iterator[Line]:
"""Split line into many lines, starting with the last matching bracket pair.
If the split was by optional parentheses, attempt splitting without them, too.
`omit` is a collection of closing bracket IDs that shouldn't be considered for
this split.
Note: running this function modifies `bracket_depth` on the leaves of `line`.
"""
tail_leaves: List[Leaf] = []
body_leaves: List[Leaf] = []
head_leaves: List[Leaf] = []
current_leaves = tail_leaves
opening_bracket: Optional[Leaf] = None
closing_bracket: Optional[Leaf] = None
for leaf in reversed(line.leaves):
if current_leaves is body_leaves:
if leaf is opening_bracket:
current_leaves = head_leaves if body_leaves else tail_leaves
current_leaves.append(leaf)
if current_leaves is tail_leaves:
if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit:
opening_bracket = leaf.opening_bracket
closing_bracket = leaf
current_leaves = body_leaves
if not (opening_bracket and closing_bracket and head_leaves):
# If there is no opening or closing_bracket that means the split failed and
# all content is in the tail. Otherwise, if `head_leaves` are empty, it means
# the matching `opening_bracket` wasn't available on `line` anymore.
raise CannotSplit("No brackets found")
tail_leaves.reverse()
body_leaves.reverse()
head_leaves.reverse()
head = bracket_split_build_line(head_leaves, line, opening_bracket)
body = bracket_split_build_line(body_leaves, line, opening_bracket, is_body=True)
tail = bracket_split_build_line(tail_leaves, line, opening_bracket)
bracket_split_succeeded_or_raise(head, body, tail)
if (
Feature.FORCE_OPTIONAL_PARENTHESES not in features
# the opening bracket is an optional paren
and opening_bracket.type == token.LPAR
and not opening_bracket.value
# the closing bracket is an optional paren
and closing_bracket.type == token.RPAR
and not closing_bracket.value
# it's not an import (optional parens are the only thing we can split on
# in this case; attempting a split without them is a waste of time)
and not line.is_import
# there are no standalone comments in the body
and not body.contains_standalone_comments(0)
# and we can actually remove the parens
and can_omit_invisible_parens(body, line_length, omit_on_explode=omit)
):
omit = {id(closing_bracket), *omit}
try:
yield from right_hand_split(line, line_length, features=features, omit=omit)
return
except CannotSplit:
if not (
can_be_split(body)
or is_line_short_enough(body, line_length=line_length)
):
raise CannotSplit(
"Splitting failed, body is still too long and can't be split."
)
elif head.contains_multiline_strings() or tail.contains_multiline_strings():
raise CannotSplit(
"The current optional pair of parentheses is bound to fail to"
" satisfy the splitting algorithm because the head or the tail"
" contains multiline strings which by definition never fit one"
" line."
)
ensure_visible(opening_bracket)
ensure_visible(closing_bracket)
for result in (head, body, tail):
if result:
yield result
def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None:
"""Raise :exc:`CannotSplit` if the last left- or right-hand split failed.
Do nothing otherwise.
A left- or right-hand split is based on a pair of brackets. Content before
(and including) the opening bracket is left on one line, content inside the
brackets is put on a separate line, and finally content starting with and
following the closing bracket is put on a separate line.
Those are called `head`, `body`, and `tail`, respectively. If the split
produced the same line (all content in `head`) or ended up with an empty `body`
and the `tail` is just the closing bracket, then it's considered failed.
"""
tail_len = len(str(tail).strip())
if not body:
if tail_len == 0:
raise CannotSplit("Splitting brackets produced the same line")
elif tail_len < 3:
raise CannotSplit(
f"Splitting brackets on an empty body to save {tail_len} characters is"
" not worth it"
)
def bracket_split_build_line(
leaves: List[Leaf], original: Line, opening_bracket: Leaf, *, is_body: bool = False
) -> Line:
"""Return a new line with given `leaves` and respective comments from `original`.
If `is_body` is True, the result line is one-indented inside brackets and as such
has its first leaf's prefix normalized and a trailing comma added when expected.
"""
result = Line(mode=original.mode, depth=original.depth)
if is_body:
result.inside_brackets = True
result.depth += 1
if leaves:
# Since body is a new indent level, remove spurious leading whitespace.
normalize_prefix(leaves[0], inside_brackets=True)
# Ensure a trailing comma for imports and standalone function arguments, but
# be careful not to add one after any comments or within type annotations.
no_commas = (
original.is_def
and opening_bracket.value == "("
and not any(leaf.type == token.COMMA for leaf in leaves)
)
if original.is_import or no_commas:
for i in range(len(leaves) - 1, -1, -1):
if leaves[i].type == STANDALONE_COMMENT:
continue
if leaves[i].type != token.COMMA:
new_comma = Leaf(token.COMMA, ",")
leaves.insert(i + 1, new_comma)
break
# Populate the line
for leaf in leaves:
result.append(leaf, preformatted=True)
for comment_after in original.comments_after(leaf):
result.append(comment_after, preformatted=True)
if is_body and should_split_line(result, opening_bracket):
result.should_split_rhs = True
return result
def dont_increase_indentation(split_func: Transformer) -> Transformer:
"""Normalize prefix of the first leaf in every line returned by `split_func`.
This is a decorator over relevant split functions.
"""
@wraps(split_func)
def split_wrapper(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
for line in split_func(line, features):
normalize_prefix(line.leaves[0], inside_brackets=True)
yield line
return split_wrapper
@dont_increase_indentation
def delimiter_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
"""Split according to delimiters of the highest priority.
If the appropriate Features are given, the split will add trailing commas
also in function signatures and calls that contain `*` and `**`.
"""
try:
last_leaf = line.leaves[-1]
except IndexError:
raise CannotSplit("Line empty")
bt = line.bracket_tracker
try:
delimiter_priority = bt.max_delimiter_priority(exclude={id(last_leaf)})
except ValueError:
raise CannotSplit("No delimiters found")
if delimiter_priority == DOT_PRIORITY:
if bt.delimiter_count_with_priority(delimiter_priority) == 1:
raise CannotSplit("Splitting a single attribute from its owner looks wrong")
current_line = Line(
mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
)
lowest_depth = sys.maxsize
trailing_comma_safe = True
def append_to_line(leaf: Leaf) -> Iterator[Line]:
"""Append `leaf` to current line or to new line if appending impossible."""
nonlocal current_line
try:
current_line.append_safe(leaf, preformatted=True)
except ValueError:
yield current_line
current_line = Line(
mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
)
current_line.append(leaf)
for leaf in line.leaves:
yield from append_to_line(leaf)
for comment_after in line.comments_after(leaf):
yield from append_to_line(comment_after)
lowest_depth = min(lowest_depth, leaf.bracket_depth)
if leaf.bracket_depth == lowest_depth:
if is_vararg(leaf, within={syms.typedargslist}):
trailing_comma_safe = (
trailing_comma_safe and Feature.TRAILING_COMMA_IN_DEF in features
)
elif is_vararg(leaf, within={syms.arglist, syms.argument}):
trailing_comma_safe = (
trailing_comma_safe and Feature.TRAILING_COMMA_IN_CALL in features
)
leaf_priority = bt.delimiters.get(id(leaf))
if leaf_priority == delimiter_priority:
yield current_line
current_line = Line(
mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
)
if current_line:
if (
trailing_comma_safe
and delimiter_priority == COMMA_PRIORITY
and current_line.leaves[-1].type != token.COMMA
and current_line.leaves[-1].type != STANDALONE_COMMENT
):
new_comma = Leaf(token.COMMA, ",")
current_line.append(new_comma)
yield current_line
@dont_increase_indentation
def standalone_comment_split(
line: Line, features: Collection[Feature] = ()
) -> Iterator[Line]:
"""Split standalone comments from the rest of the line."""
if not line.contains_standalone_comments(0):
raise CannotSplit("Line does not have any standalone comments")
current_line = Line(
mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
)
def append_to_line(leaf: Leaf) -> Iterator[Line]:
"""Append `leaf` to current line or to new line if appending impossible."""
nonlocal current_line
try:
current_line.append_safe(leaf, preformatted=True)
except ValueError:
yield current_line
current_line = Line(
line.mode, depth=line.depth, inside_brackets=line.inside_brackets
)
current_line.append(leaf)
for leaf in line.leaves:
yield from append_to_line(leaf)
for comment_after in line.comments_after(leaf):
yield from append_to_line(comment_after)
if current_line:
yield current_line
def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None:
"""Leave existing extra newlines if not `inside_brackets`. Remove everything
else.
Note: don't use backslashes for formatting or you'll lose your voting rights.
"""
if not inside_brackets:
spl = leaf.prefix.split("#")
if "\\" not in spl[0]:
nl_count = spl[-1].count("\n")
if len(spl) > 1:
nl_count -= 1
leaf.prefix = "\n" * nl_count
return
leaf.prefix = ""
def normalize_invisible_parens(node: Node, parens_after: Set[str]) -> None:
"""Make existing optional parentheses invisible or create new ones.
`parens_after` is a set of string leaf values immediately after which parens
should be put.
Standardizes on visible parentheses for single-element tuples, and keeps
existing visible parentheses for other tuples and generator expressions.
"""
for pc in list_comments(node.prefix, is_endmarker=False):
if pc.value in FMT_OFF:
# This `node` has a prefix with `# fmt: off`, don't mess with parens.
return
check_lpar = False
for index, child in enumerate(list(node.children)):
# Fixes a bug where invisible parens are not properly stripped from
# assignment statements that contain type annotations.
if isinstance(child, Node) and child.type == syms.annassign:
normalize_invisible_parens(child, parens_after=parens_after)
# Add parentheses around long tuple unpacking in assignments.
if (
index == 0
and isinstance(child, Node)
and child.type == syms.testlist_star_expr
):
check_lpar = True
if check_lpar:
if child.type == syms.atom:
if maybe_make_parens_invisible_in_atom(child, parent=node):
wrap_in_parentheses(node, child, visible=False)
elif is_one_tuple(child):
wrap_in_parentheses(node, child, visible=True)
elif node.type == syms.import_from:
# "import from" nodes store parentheses directly as part of
# the statement
if child.type == token.LPAR:
# make parentheses invisible
child.value = "" # type: ignore
node.children[-1].value = "" # type: ignore
elif child.type != token.STAR:
# insert invisible parentheses
node.insert_child(index, Leaf(token.LPAR, ""))
node.append_child(Leaf(token.RPAR, ""))
break
elif not (isinstance(child, Leaf) and is_multiline_string(child)):
wrap_in_parentheses(node, child, visible=False)
check_lpar = isinstance(child, Leaf) and child.value in parens_after
def maybe_make_parens_invisible_in_atom(node: LN, parent: LN) -> bool:
"""If it's safe, make the parens in the atom `node` invisible, recursively.
Additionally, remove repeated, adjacent invisible parens from the atom `node`
as they are redundant.
Returns whether the node should itself be wrapped in invisible parentheses.
"""
if (
node.type != syms.atom
or is_empty_tuple(node)
or is_one_tuple(node)
or (is_yield(node) and parent.type != syms.expr_stmt)
or max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY
):
return False
if is_walrus_assignment(node):
if parent.type in [
syms.annassign,
syms.expr_stmt,
syms.assert_stmt,
syms.return_stmt,
# these ones aren't useful to end users, but they do please fuzzers
syms.for_stmt,
syms.del_stmt,
]:
return False
first = node.children[0]
last = node.children[-1]
if first.type == token.LPAR and last.type == token.RPAR:
middle = node.children[1]
# make parentheses invisible
first.value = "" # type: ignore
last.value = "" # type: ignore
maybe_make_parens_invisible_in_atom(middle, parent=parent)
if is_atom_with_invisible_parens(middle):
# Strip the invisible parens from `middle` by replacing
# it with the child in-between the invisible parens
middle.replace(middle.children[1])
return False
return True
def should_split_line(line: Line, opening_bracket: Leaf) -> bool:
"""Should `line` be immediately split with `delimiter_split()` after RHS?"""
if not (opening_bracket.parent and opening_bracket.value in "[{("):
return False
# We're essentially checking if the body is delimited by commas and there's more
# than one of them (we're excluding the trailing comma and if the delimiter priority
# is still commas, that means there's more).
exclude = set()
trailing_comma = False
try:
last_leaf = line.leaves[-1]
if last_leaf.type == token.COMMA:
trailing_comma = True
exclude.add(id(last_leaf))
max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude)
except (IndexError, ValueError):
return False
return max_priority == COMMA_PRIORITY and (
(line.mode.magic_trailing_comma and trailing_comma)
# always explode imports
or opening_bracket.parent.type in {syms.atom, syms.import_from}
)
def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]:
"""Generate sets of closing bracket IDs that should be omitted in a RHS.
Brackets can be omitted if the entire trailer up to and including
a preceding closing bracket fits in one line.
Yielded sets are cumulative (contain results of previous yields, too). First
set is empty, unless the line should explode, in which case bracket pairs until
the one that needs to explode are omitted.
"""
omit: Set[LeafID] = set()
if not line.magic_trailing_comma:
yield omit
length = 4 * line.depth
opening_bracket: Optional[Leaf] = None
closing_bracket: Optional[Leaf] = None
inner_brackets: Set[LeafID] = set()
for index, leaf, leaf_length in line.enumerate_with_length(reversed=True):
length += leaf_length
if length > line_length:
break
has_inline_comment = leaf_length > len(leaf.value) + len(leaf.prefix)
if leaf.type == STANDALONE_COMMENT or has_inline_comment:
break
if opening_bracket:
if leaf is opening_bracket:
opening_bracket = None
elif leaf.type in CLOSING_BRACKETS:
prev = line.leaves[index - 1] if index > 0 else None
if (
prev
and prev.type == token.COMMA
and not is_one_tuple_between(
leaf.opening_bracket, leaf, line.leaves
)
):
# Never omit bracket pairs with trailing commas.
# We need to explode on those.
break
inner_brackets.add(id(leaf))
elif leaf.type in CLOSING_BRACKETS:
prev = line.leaves[index - 1] if index > 0 else None
if prev and prev.type in OPENING_BRACKETS:
# Empty brackets would fail a split so treat them as "inner"
# brackets (e.g. only add them to the `omit` set if another
# pair of brackets was good enough.
inner_brackets.add(id(leaf))
continue
if closing_bracket:
omit.add(id(closing_bracket))
omit.update(inner_brackets)
inner_brackets.clear()
yield omit
if (
prev
and prev.type == token.COMMA
and not is_one_tuple_between(leaf.opening_bracket, leaf, line.leaves)
):
# Never omit bracket pairs with trailing commas.
# We need to explode on those.
break
if leaf.value:
opening_bracket = leaf.opening_bracket
closing_bracket = leaf
def run_transformer(
line: Line,
transform: Transformer,
mode: Mode,
features: Collection[Feature],
*,
line_str: str = "",
) -> List[Line]:
if not line_str:
line_str = line_to_string(line)
result: List[Line] = []
for transformed_line in transform(line, features):
if str(transformed_line).strip("\n") == line_str:
raise CannotTransform("Line transformer returned an unchanged result")
result.extend(transform_line(transformed_line, mode=mode, features=features))
if not (
transform.__name__ == "rhs"
and line.bracket_tracker.invisible
and not any(bracket.value for bracket in line.bracket_tracker.invisible)
and not line.contains_multiline_strings()
and not result[0].contains_uncollapsable_type_comments()
and not result[0].contains_unsplittable_type_ignore()
and not is_line_short_enough(result[0], line_length=mode.line_length)
):
return result
line_copy = line.clone()
append_leaves(line_copy, line, line.leaves)
features_fop = set(features) | {Feature.FORCE_OPTIONAL_PARENTHESES}
second_opinion = run_transformer(
line_copy, transform, mode, features_fop, line_str=line_str
)
if all(
is_line_short_enough(ln, line_length=mode.line_length) for ln in second_opinion
):
result = second_opinion
return result
| 39.403046 | 88 | 0.627976 |
794965584027a65acbbc1b499c6545ac4463891c
| 333 |
py
|
Python
|
survey/migrations/0002_survey_template.py
|
TheWITProject/MentorApp
|
2f08b87a7cde6d180e16d6f37d0b8019b8361638
|
[
"MIT"
] | null | null | null |
survey/migrations/0002_survey_template.py
|
TheWITProject/MentorApp
|
2f08b87a7cde6d180e16d6f37d0b8019b8361638
|
[
"MIT"
] | 65 |
2020-02-04T22:31:07.000Z
|
2022-01-13T02:39:19.000Z
|
survey/migrations/0002_survey_template.py
|
TheWITProject/MentorApp
|
2f08b87a7cde6d180e16d6f37d0b8019b8361638
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("survey", "0001_initial")]
operations = [
migrations.AddField(
model_name="survey", name="template", field=models.CharField(max_length=255, null=True, blank=True)
)
]
| 22.2 | 111 | 0.63964 |
794965aaa59fcd8c9cdade9300f0aa42839f5368
| 3,042 |
py
|
Python
|
boto3_type_annotations/boto3_type_annotations/datasync/client.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 119 |
2018-12-01T18:20:57.000Z
|
2022-02-02T10:31:29.000Z
|
boto3_type_annotations/boto3_type_annotations/datasync/client.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 15 |
2018-11-16T00:16:44.000Z
|
2021-11-13T03:44:18.000Z
|
boto3_type_annotations/boto3_type_annotations/datasync/client.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 11 |
2019-05-06T05:26:51.000Z
|
2021-09-28T15:27:59.000Z
|
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from typing import Union
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import List
class Client(BaseClient):
def can_paginate(self, operation_name: str = None):
pass
def cancel_task_execution(self, TaskExecutionArn: str) -> Dict:
pass
def create_agent(self, ActivationKey: str, AgentName: str = None, Tags: List = None) -> Dict:
pass
def create_location_efs(self, Subdirectory: str, EfsFilesystemArn: str, Ec2Config: Dict, Tags: List = None) -> Dict:
pass
def create_location_nfs(self, Subdirectory: str, ServerHostname: str, OnPremConfig: Dict, Tags: List = None) -> Dict:
pass
def create_location_s3(self, Subdirectory: str, S3BucketArn: str, S3Config: Dict, Tags: List = None) -> Dict:
pass
def create_task(self, SourceLocationArn: str, DestinationLocationArn: str, CloudWatchLogGroupArn: str = None, Name: str = None, Options: Dict = None, Tags: List = None) -> Dict:
pass
def delete_agent(self, AgentArn: str) -> Dict:
pass
def delete_location(self, LocationArn: str) -> Dict:
pass
def delete_task(self, TaskArn: str) -> Dict:
pass
def describe_agent(self, AgentArn: str) -> Dict:
pass
def describe_location_efs(self, LocationArn: str) -> Dict:
pass
def describe_location_nfs(self, LocationArn: str) -> Dict:
pass
def describe_location_s3(self, LocationArn: str) -> Dict:
pass
def describe_task(self, TaskArn: str) -> Dict:
pass
def describe_task_execution(self, TaskExecutionArn: str) -> Dict:
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
pass
def list_agents(self, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def list_locations(self, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def list_tags_for_resource(self, ResourceArn: str, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def list_task_executions(self, TaskArn: str = None, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def list_tasks(self, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def start_task_execution(self, TaskArn: str, OverrideOptions: Dict = None) -> Dict:
pass
def tag_resource(self, ResourceArn: str, Tags: List) -> Dict:
pass
def untag_resource(self, ResourceArn: str, Keys: List) -> Dict:
pass
def update_agent(self, AgentArn: str, Name: str = None) -> Dict:
pass
def update_task(self, TaskArn: str, Options: Dict = None, Name: str = None) -> Dict:
pass
| 31.360825 | 181 | 0.660421 |
794967db1c2a4b6f417afe4a9479944d0141ab7e
| 17,367 |
py
|
Python
|
pw_presubmit/py/pw_presubmit/pigweed_presubmit.py
|
erwincoumans/pigweed
|
d1669a85515eda45e2925158aef6e089af28923c
|
[
"Apache-2.0"
] | 1 |
2020-12-19T19:42:46.000Z
|
2020-12-19T19:42:46.000Z
|
pw_presubmit/py/pw_presubmit/pigweed_presubmit.py
|
erwincoumans/pigweed
|
d1669a85515eda45e2925158aef6e089af28923c
|
[
"Apache-2.0"
] | 3 |
2021-03-11T06:53:56.000Z
|
2022-02-13T21:59:25.000Z
|
pw_presubmit/py/pw_presubmit/pigweed_presubmit.py
|
erwincoumans/pigweed
|
d1669a85515eda45e2925158aef6e089af28923c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Runs the local presubmit checks for the Pigweed repository."""
import argparse
import logging
import os
from pathlib import Path
import re
import sys
from typing import Sequence, IO, Tuple, Optional
try:
import pw_presubmit
except ImportError:
# Append the pw_presubmit package path to the module search path to allow
# running this module without installing the pw_presubmit package.
sys.path.append(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
import pw_presubmit
from pw_presubmit import build, cli, environment, format_code, git_repo
from pw_presubmit import call, filter_paths, plural, PresubmitContext
from pw_presubmit import PresubmitFailure, Programs
from pw_presubmit.install_hook import install_hook
_LOG = logging.getLogger(__name__)
#
# Initialization
#
def init_cipd(ctx: PresubmitContext):
environment.init_cipd(ctx.root, ctx.output_dir)
def init_virtualenv(ctx: PresubmitContext):
environment.init_virtualenv(
ctx.root,
ctx.output_dir,
gn_targets=(
f'{ctx.root}#:python.install',
f'{ctx.root}#:target_support_packages.install',
),
)
# Trigger builds if files with these extensions change.
_BUILD_EXTENSIONS = ('.py', '.rst', '.gn', '.gni',
*format_code.C_FORMAT.extensions)
def _at_all_optimization_levels(target):
for level in ['debug', 'size_optimized', 'speed_optimized']:
yield f'{target}_{level}'
#
# Build presubmit checks
#
def gn_clang_build(ctx: PresubmitContext):
build.gn_gen(ctx.root, ctx.output_dir)
build.ninja(ctx.output_dir, *_at_all_optimization_levels('host_clang'))
@filter_paths(endswith=_BUILD_EXTENSIONS)
def gn_quick_build_check(ctx: PresubmitContext):
build.gn_gen(ctx.root, ctx.output_dir)
build.ninja(ctx.output_dir, 'host_clang_size_optimized',
'stm32f429i_size_optimized', 'python.tests', 'python.lint')
@filter_paths(endswith=_BUILD_EXTENSIONS)
def gn_gcc_build(ctx: PresubmitContext):
build.gn_gen(ctx.root, ctx.output_dir)
# Skip optimized host GCC builds for now, since GCC sometimes emits spurious
# warnings.
#
# -02: GCC 9.3 emits spurious maybe-uninitialized warnings
# -0s: GCC 8.1 (Mingw-w64) emits a spurious nonnull warning
#
# TODO(pwbug/255): Enable optimized GCC builds when this is fixed.
build.ninja(ctx.output_dir, 'host_gcc_debug')
@filter_paths(endswith=_BUILD_EXTENSIONS)
def gn_arm_build(ctx: PresubmitContext):
build.gn_gen(ctx.root, ctx.output_dir)
build.ninja(ctx.output_dir, *_at_all_optimization_levels('stm32f429i'))
@filter_paths(endswith=_BUILD_EXTENSIONS)
def gn_nanopb_build(ctx: PresubmitContext):
build.install_package(ctx.package_root, 'nanopb')
build.gn_gen(ctx.root,
ctx.output_dir,
dir_pw_third_party_nanopb='"{}"'.format(ctx.package_root /
'nanopb'))
build.ninja(
ctx.output_dir,
*_at_all_optimization_levels('stm32f429i'),
*_at_all_optimization_levels('host_clang'),
)
@filter_paths(endswith=_BUILD_EXTENSIONS)
def gn_qemu_build(ctx: PresubmitContext):
build.gn_gen(ctx.root, ctx.output_dir)
build.ninja(ctx.output_dir, *_at_all_optimization_levels('qemu'))
def gn_docs_build(ctx: PresubmitContext):
build.gn_gen(ctx.root, ctx.output_dir)
build.ninja(ctx.output_dir, 'docs')
def gn_host_tools(ctx: PresubmitContext):
build.gn_gen(ctx.root, ctx.output_dir, pw_build_HOST_TOOLS=True)
build.ninja(ctx.output_dir)
@filter_paths(endswith=format_code.C_FORMAT.extensions)
def oss_fuzz_build(ctx: PresubmitContext):
build.gn_gen(ctx.root,
ctx.output_dir,
pw_toolchain_OSS_FUZZ_ENABLED=True,
pw_toolchain_SANITIZER="address")
build.ninja(ctx.output_dir, "host_clang")
@filter_paths(endswith='.py')
def python_checks(ctx: PresubmitContext):
build.gn_gen(ctx.root, ctx.output_dir)
build.ninja(
ctx.output_dir,
':python.lint',
':python.tests',
':target_support_packages.lint',
':target_support_packages.tests',
)
@filter_paths(endswith=(*format_code.C_FORMAT.extensions, '.cmake',
'CMakeLists.txt'))
def cmake_tests(ctx: PresubmitContext):
toolchain = ctx.root / 'pw_toolchain' / 'host_clang' / 'toolchain.cmake'
build.cmake(ctx.root,
ctx.output_dir,
f'-DCMAKE_TOOLCHAIN_FILE={toolchain}',
env=build.env_with_clang_vars())
build.ninja(ctx.output_dir, 'pw_run_tests.modules')
@filter_paths(endswith=(*format_code.C_FORMAT.extensions, '.bzl', 'BUILD'))
def bazel_test(ctx: PresubmitContext):
try:
call('bazel',
'test',
'//...',
'--verbose_failures',
'--verbose_explanations',
'--worker_verbose',
'--symlink_prefix',
ctx.output_dir.joinpath('bazel-'),
cwd=ctx.root,
env=build.env_with_clang_vars())
except:
_LOG.info('If the Bazel build inexplicably fails while the '
'other builds are passing, try deleting the Bazel cache:\n'
' rm -rf ~/.cache/bazel')
raise
#
# General presubmit checks
#
# TODO(pwbug/45) Probably want additional checks.
_CLANG_TIDY_CHECKS = ('modernize-use-override', )
@filter_paths(endswith=format_code.C_FORMAT.extensions)
def clang_tidy(ctx: PresubmitContext):
build.gn_gen(ctx.root, ctx.output_dir, '--export-compile-commands')
build.ninja(ctx.output_dir)
build.ninja(ctx.output_dir, '-t', 'compdb', 'objcxx', 'cxx')
run_clang_tidy = None
for var in ('PW_PIGWEED_CIPD_INSTALL_DIR', 'PW_CIPD_INSTALL_DIR'):
if var in os.environ:
possibility = os.path.join(os.environ[var],
'share/clang/run-clang-tidy.py')
if os.path.isfile(possibility):
run_clang_tidy = possibility
break
checks = ','.join(_CLANG_TIDY_CHECKS)
call(
run_clang_tidy,
f'-p={ctx.output_dir}',
f'-checks={checks}',
# TODO(pwbug/45) not sure if this is needed.
# f'-extra-arg-before=-warnings-as-errors={checks}',
*ctx.paths)
# The first line must be regex because of the '20\d\d' date
COPYRIGHT_FIRST_LINE = r'Copyright 20\d\d The Pigweed Authors'
COPYRIGHT_COMMENTS = r'(#|//| \*|REM|::)'
COPYRIGHT_BLOCK_COMMENTS = (
# HTML comments
(r'<!--', r'-->'), )
COPYRIGHT_FIRST_LINE_EXCEPTIONS = (
'#!',
'/*',
'@echo off',
'# -*-',
':',
)
COPYRIGHT_LINES = tuple("""\
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
""".splitlines())
_EXCLUDE_FROM_COPYRIGHT_NOTICE: Sequence[str] = (
# Configuration
r'^(?:.+/)?\..+$',
r'\bPW_PLUGINS$',
# Metadata
r'^docker/tag$',
r'\bAUTHORS$',
r'\bLICENSE$',
r'\bOWNERS$',
r'\brequirements.txt$',
r'\bgo.(mod|sum)$',
r'\bpackage.json$',
r'\byarn.lock$',
# Data files
r'\.elf$',
r'\.gif$',
r'\.jpg$',
r'\.json$',
r'\.png$',
# Documentation
r'\.md$',
r'\.rst$',
# Generated protobuf files
r'\.pb\.h$',
r'\.pb\.c$',
r'\_pb2.pyi?$',
)
def match_block_comment_start(line: str) -> Optional[str]:
"""Matches the start of a block comment and returns the end."""
for block_comment in COPYRIGHT_BLOCK_COMMENTS:
if re.match(block_comment[0], line):
# Return the end of the block comment
return block_comment[1]
return None
def copyright_read_first_line(
file: IO) -> Tuple[Optional[str], Optional[str], Optional[str]]:
"""Reads the file until it reads a valid first copyright line.
Returns (comment, block_comment, line). comment and block_comment are
mutually exclusive and refer to the comment character sequence and whether
they form a block comment or a line comment. line is the first line of
the copyright, and is used for error reporting.
"""
line = file.readline()
first_line_matcher = re.compile(COPYRIGHT_COMMENTS + ' ' +
COPYRIGHT_FIRST_LINE)
while line:
end_block_comment = match_block_comment_start(line)
if end_block_comment:
next_line = file.readline()
copyright_line = re.match(COPYRIGHT_FIRST_LINE, next_line)
if not copyright_line:
return (None, None, line)
return (None, end_block_comment, line)
first_line = first_line_matcher.match(line)
if first_line:
return (first_line.group(1), None, line)
if (line.strip()
and not line.startswith(COPYRIGHT_FIRST_LINE_EXCEPTIONS)):
return (None, None, line)
line = file.readline()
return (None, None, None)
@filter_paths(exclude=_EXCLUDE_FROM_COPYRIGHT_NOTICE)
def copyright_notice(ctx: PresubmitContext):
"""Checks that the Pigweed copyright notice is present."""
errors = []
for path in ctx.paths:
if path.stat().st_size == 0:
continue # Skip empty files
with path.open() as file:
(comment, end_block_comment,
line) = copyright_read_first_line(file)
if not line:
_LOG.warning('%s: invalid first line', path)
errors.append(path)
continue
if not (comment or end_block_comment):
_LOG.warning('%s: invalid first line %r', path, line)
errors.append(path)
continue
if end_block_comment:
expected_lines = COPYRIGHT_LINES + (end_block_comment, )
else:
expected_lines = COPYRIGHT_LINES
for expected, actual in zip(expected_lines, file):
if end_block_comment:
expected_line = expected + '\n'
elif comment:
expected_line = (comment + ' ' + expected).rstrip() + '\n'
if expected_line != actual:
_LOG.warning(' bad line: %r', actual)
_LOG.warning(' expected: %r', expected_line)
errors.append(path)
break
if errors:
_LOG.warning('%s with a missing or incorrect copyright notice:\n%s',
plural(errors, 'file'), '\n'.join(str(e) for e in errors))
raise PresubmitFailure
_BAZEL_SOURCES_IN_BUILD = tuple(format_code.C_FORMAT.extensions)
_GN_SOURCES_IN_BUILD = '.rst', '.py', *_BAZEL_SOURCES_IN_BUILD
@filter_paths(endswith=(*_GN_SOURCES_IN_BUILD, 'BUILD', '.bzl', '.gn', '.gni'))
def source_is_in_build_files(ctx: PresubmitContext):
"""Checks that source files are in the GN and Bazel builds."""
missing = build.check_builds_for_files(
_BAZEL_SOURCES_IN_BUILD,
_GN_SOURCES_IN_BUILD,
ctx.paths,
bazel_dirs=[ctx.root],
gn_build_files=git_repo.list_files(
pathspecs=['BUILD.gn', '*BUILD.gn']))
if missing:
_LOG.warning(
'All source files must appear in BUILD and BUILD.gn files')
raise PresubmitFailure
def build_env_setup(ctx: PresubmitContext):
if 'PW_CARGO_SETUP' not in os.environ:
_LOG.warning(
'Skipping build_env_setup since PW_CARGO_SETUP is not set')
return
tmpl = ctx.root.joinpath('pw_env_setup', 'py', 'pyoxidizer.bzl.tmpl')
out = ctx.output_dir.joinpath('pyoxidizer.bzl')
with open(tmpl, 'r') as ins:
cfg = ins.read().replace('${PW_ROOT}', str(ctx.root))
with open(out, 'w') as outs:
outs.write(cfg)
call('pyoxidizer', 'build', cwd=ctx.output_dir)
def commit_message_format(_: PresubmitContext):
"""Checks that the top commit's message is correctly formatted."""
lines = git_repo.commit_message().splitlines()
# Show limits and current commit message in log.
_LOG.debug('%-25s%+25s%+22s', 'Line limits', '72|', '72|')
for line in lines:
_LOG.debug(line)
if not lines:
_LOG.error('The commit message is too short!')
raise PresubmitFailure
errors = 0
if len(lines[0]) > 72:
_LOG.warning("The commit message's first line must be no longer than "
'72 characters.')
_LOG.warning('The first line is %d characters:\n %s', len(lines[0]),
lines[0])
errors += 1
if lines[0].endswith('.'):
_LOG.warning(
"The commit message's first line must not end with a period:\n %s",
lines[0])
errors += 1
if len(lines) > 1 and lines[1]:
_LOG.warning("The commit message's second line must be blank.")
_LOG.warning('The second line has %d characters:\n %s', len(lines[1]),
lines[1])
errors += 1
# Check that the lines are 72 characters or less, but skip any lines that
# might possibly have a URL, path, or metadata in them. Also skip any lines
# with non-ASCII characters.
for i, line in enumerate(lines[2:], 3):
if ':' in line or '/' in line or not line.isascii():
continue
if len(line) > 72:
_LOG.warning(
'Commit message lines must be no longer than 72 characters.')
_LOG.warning('Line %d has %d characters:\n %s', i, len(line),
line)
errors += 1
if errors:
_LOG.error('Found %s in the commit message', plural(errors, 'error'))
raise PresubmitFailure
#
# Presubmit check programs
#
BROKEN = (
# TODO(pwbug/45): Remove clang-tidy from BROKEN when it passes.
clang_tidy,
# QEMU build. Currently doesn't have test runners.
gn_qemu_build,
# Build that attempts to duplicate the build OSS-Fuzz does. Currently
# failing.
oss_fuzz_build,
bazel_test,
cmake_tests,
gn_nanopb_build,
)
QUICK = (
commit_message_format,
init_cipd,
init_virtualenv,
source_is_in_build_files,
copyright_notice,
format_code.presubmit_checks(),
pw_presubmit.pragma_once,
gn_quick_build_check,
# TODO(pwbug/141): Re-enable CMake and Bazel for Mac after we have fixed the
# the clang issues. The problem is that all clang++ invocations need the
# two extra flags: "-nostdc++" and "${clang_prefix}/../lib/libc++.a".
cmake_tests if sys.platform != 'darwin' else (),
)
FULL = (
commit_message_format,
init_cipd,
init_virtualenv,
copyright_notice,
format_code.presubmit_checks(),
pw_presubmit.pragma_once,
gn_clang_build,
gn_arm_build,
gn_docs_build,
gn_host_tools,
# On Mac OS, system 'gcc' is a symlink to 'clang' by default, so skip GCC
# host builds on Mac for now.
gn_gcc_build if sys.platform != 'darwin' else (),
source_is_in_build_files,
python_checks,
build_env_setup,
)
PROGRAMS = Programs(broken=BROKEN, quick=QUICK, full=FULL)
def parse_args() -> argparse.Namespace:
"""Creates an argument parser and parses arguments."""
parser = argparse.ArgumentParser(description=__doc__)
cli.add_arguments(parser, PROGRAMS, 'quick')
parser.add_argument(
'--install',
action='store_true',
help='Install the presubmit as a Git pre-push hook and exit.')
return parser.parse_args()
def run(install: bool, **presubmit_args) -> int:
"""Entry point for presubmit."""
if install:
install_hook(__file__, 'pre-push',
['--base', 'origin/master..HEAD', '--program', 'quick'],
Path.cwd())
return 0
return cli.run(**presubmit_args)
def main() -> int:
"""Run the presubmit for the Pigweed repository."""
return run(**vars(parse_args()))
if __name__ == '__main__':
try:
# If pw_cli is available, use it to initialize logs.
from pw_cli import log
log.install(logging.INFO)
except ImportError:
# If pw_cli isn't available, display log messages like a simple print.
logging.basicConfig(format='%(message)s', level=logging.INFO)
sys.exit(main())
| 31.235612 | 80 | 0.640237 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.